diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1b740f5d433d49e009215021779cfc8a15f8a1a3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +TORCH_API at::Tensor & _amp_update_scale_out(at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +TORCH_API at::Tensor & _amp_update_scale_outf(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..616da5b43851a739e2fc7ab57c93e1e5ddb5bb0f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..37476d620d3939d4da55093d27a22071eb3a9333 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _convert_weight_to_int4pack_cpu(const at::Tensor & self, int64_t innerKTiles); +TORCH_API at::Tensor _convert_weight_to_int4pack_cuda(const at::Tensor & self, int64_t innerKTiles); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c.h new file mode 100644 index 0000000000000000000000000000000000000000..ae4f8d7a69d7f7c642cf24c4e5e7b2c24cfad59c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor +inline at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + return at::_ops::_fft_r2c::call(self, dim, normalization, onesided); +} + +// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + return at::_ops::_fft_r2c_out::call(self, dim, normalization, onesided, out); +} +// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) { + return at::_ops::_fft_r2c_out::call(self, dim, normalization, onesided, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b678ec3b329ae312e77441ff085d4bb8cbd83e1d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5d1e54a932f468761376f174a6ba0dd47a8aa901 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_mul_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_mul_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_mul__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_mul_List { + using schema = ::std::vector (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_mul__List { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()") + static void call(at::TensorList self, at::TensorList other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_mul_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_mul__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_mul_Tensor { + using schema = ::std::vector (at::TensorList, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Tensor & other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other); +}; + +struct TORCH_API _foreach_mul__Tensor { + using schema = void (at::TensorList, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()") + static void call(at::TensorList self, const at::Tensor & other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other); +}; + +struct TORCH_API _foreach_mul_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_mul_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out); +}; + +struct TORCH_API _foreach_mul_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +struct TORCH_API _foreach_mul_Tensor_out { + using schema = void (at::TensorList, const at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_mul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Tensor & other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..432ed1ae5599fd1890ec139ee8e5a3960da38781 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6c5c2e24e3dd11aba12b2647e91bf9aa6a99a5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..ba819531604e2f2c81aedce6c94832dba6092ad2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_transpose.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor _mps_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor _mps_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor _mps_convolution_transpose_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor _mps_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups); + } +} + +// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } +} + +// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } +} + +// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_transpose_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, out); + } +} + +// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_transpose_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5127ff1f0312f675412976861009a598591df696 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_ragged_idx_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_ragged_idx_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5127ff1f0312f675412976861009a598591df696 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_ragged_idx_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1b40d1a0b7cf70e4983fc8526cad31004ae25b21 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor NestedTensor_softmax_dropout(const at::Tensor & self, const at::Tensor & query); +TORCH_API at::Tensor NestedTensor_softmax_dropout_cuda(const at::Tensor & self, const at::Tensor & query); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f12880fb1601e9cafb7fb1ef065c44bfd2e20c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets); +TORCH_API at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h new file mode 100644 index 0000000000000000000000000000000000000000..3d2307a1b2052fe98f504126758d8ce5b214af89 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); +} +namespace symint { + template ::value>> + at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); + } +} + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, size, stride); +} +namespace symint { + template ::value>> + at::Tensor _reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias::call(self, size, stride); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..91146292d51db2c63037f44a190074dcf7cbf067 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_ff_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sobol_engine_ff_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sobol_engine_ff_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..67cfde367693393d9872bb36fb3b9a19d2cb135a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor +inline at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy::call(self, size); +} + +// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_broadcast_to_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out); +} +// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_broadcast_to_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..01aab769ee9e990baa7720b039e7373a362177fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self); +TORCH_API at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..472dca29e201da308469aa62d075d64706863aa9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _standard_gamma(const at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e0b50d867984926bd1d685eb8fb2ec59ea1a63ec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _thnn_differentiable_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr.h new file mode 100644 index 0000000000000000000000000000000000000000..18581b99cfbf714330c69c4accf2b49c4ac6e613 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csr.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_sparse_csr_out(at::Tensor & out, const at::Tensor & self, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_csr_out::call(self, dense_dim, out); +} +// aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_sparse_csr_outf(const at::Tensor & self, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_csr_out::call(self, dense_dim, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..360977d215aabc1470651aa884b4f465bdb4f027 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _triton_scaled_dot_attention(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3013f1670251ae69db15d9f1f0299763965d34f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ad933aa205ae5be495d8df2166ec176fcf56880 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1b4b38e8f3123842fef65e6eff9295c0ef0c9f2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5ac45da86f56a0c3abd942b1085b29de0874229 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor atan2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c6a3b89f60d47e63d8742eccd9b67f92dcc0920 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor bartlett_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & bartlett_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & bartlett_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & bartlett_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & bartlett_window_outf(int64_t window_length, bool periodic, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00e674925702e2dac42ec88ac9a2a73c25d7a0e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bilinear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bilinear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f290700d4ee6984e8e8e16c678957736ee92c600 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bincount(const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast.h new file mode 100644 index 0000000000000000000000000000000000000000..52cb6eafcdb06a633b037cc8d741660a84f13283 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::can_cast(ScalarType from, ScalarType to) -> bool +inline bool can_cast(at::ScalarType from, at::ScalarType to) { + return at::_ops::can_cast::call(from, to); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h new file mode 100644 index 0000000000000000000000000000000000000000..1487ae16e964103da6b5d79f46c31333b79415da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); +} +namespace symint { + template ::value>> + at::Tensor & col2im_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out); + } +} + +// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride); +} +namespace symint { + template ::value>> + at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride); + } +} + +// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride); +} +namespace symint { + template ::value>> + at::Tensor col2im(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd.h new file mode 100644 index 0000000000000000000000000000000000000000..5fd2b33ad3bec2dd159277ef6f299c2ad0a1a761 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor +inline at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, c10::fromIntArrayRefSlow(pad), value); +} +namespace symint { + template ::value>> + at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, c10::fromIntArrayRefSlow(pad), value); + } +} + +// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor +inline at::Tensor constant_pad_nd_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, pad, value); +} +namespace symint { + template ::value>> + at::Tensor constant_pad_nd(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::call(self, pad, value); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); + } +} + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & constant_pad_nd_symint_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); +} +namespace symint { + template ::value>> + at::Tensor & constant_pad_nd_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::call(self, pad, value, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9d90a7598728788716ce7128fffa1b4eb352782 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API constant_pad_nd { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::constant_pad_nd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value); +}; + +struct TORCH_API constant_pad_nd_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::constant_pad_nd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/contiguous_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/contiguous_native.h new file mode 100644 index 0000000000000000000000000000000000000000..da924668246a048c6f89366f32f23ec31bf1813a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/contiguous_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor contiguous(const at::Tensor & self, at::MemoryFormat memory_format=MemoryFormat::Contiguous); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/copy_sparse_to_sparse_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/copy_sparse_to_sparse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..19b1fa3459df6213c415764683f9fd8c3fd76ee8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/copy_sparse_to_sparse_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor copy_sparse_to_sparse(const at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_sparse_to_sparse_out(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out); +TORCH_API at::Tensor & copy_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diag.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diag.h new file mode 100644 index 0000000000000000000000000000000000000000..743e10ccdbddbf0e9de78265e31672d8d4dcc5e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diag.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diag_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::diag_out::call(self, diagonal, out); +} +// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diag_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::diag_out::call(self, diagonal, out); +} + +// aten::diag(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor diag(const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::diag::call(self, diagonal); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9d150fba82758fe5756c3e635548805b05ddd6ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API diagonal { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); +}; + +struct TORCH_API diagonal_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, at::Dimname, at::Dimname, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dot_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dot_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d7ae585ea1babbb29999cc39d2d722f9374eb169 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dot_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & dot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor); +TORCH_API at::Tensor & dot_outf(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b065f5df5d03361a5055acbfdf678398a4d6d4e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor floor_divide_sparse(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & floor_divide_out_sparse_zerodim(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_sparse_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & floor_divide_Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Scalar & other); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e488b458e4648d336637dee2bd1939f4bd430a03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/ge_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0441115d0719dd7c5b5b0262d95330a5ae76b534 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, c10::optional generator=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gradient_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gradient_native.h new file mode 100644 index 0000000000000000000000000000000000000000..732b6c24f1c8c47faa3657b819bf0a3a8f45f74a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gradient_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector gradient(const at::Tensor & self, const c10::optional & spacing=c10::nullopt, c10::optional dim=c10::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8c26720867c36b21bdca8b0012c2e5f8ec8c5f07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hypot_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API hypot { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API hypot_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hypot_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8ecff044f5ef1dd8b7e23f2a67ca067d4e6d1b2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40131cf854de239d045455a93d6dcbe2ef5c2f9a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor indices_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c7b8206515af1375770ce99df6ac17821cc3dd7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b84a8eacdea28f4ebb03b86fc87489cc6a04fa20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isfinite { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isfinite") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isfinite(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..301705e7121f4ad6dea7871e597ac691557f1346 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_outf(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ca856a13fe23a3e381bce9aa16b07a581827a20a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu_factor { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu_factor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)") + static ::std::tuple call(const at::Tensor & A, bool pivot); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot); +}; + +struct TORCH_API linalg_lu_factor_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu_factor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)") + static ::std::tuple call(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..20ad74a501feb52014995be8848b271366db65fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..39ca5332f208177b05ea7e723bd81c5b64f4ce74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_power_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_power { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::matrix_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "matrix_power(Tensor self, int n) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n); +}; + +struct TORCH_API matrix_power_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::matrix_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc78609dd56eb8fbaa9da543c0757f1634c5d53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_min_dim : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00f567354f39da503a3ac3bf0780df4eaff2afe7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple miopen_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0eebfdbab373ff676f78cebd322af811c45de5f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple miopen_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6e78a879b24da91bbe3449c29046b565b934332 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & mkldnn_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & mkldnn_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6334f19fdf81fd82e7b906485a044ed75231e1fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..66bb86cacf02e045452ccc9c02ba6ccf38aeee34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b4504ef1c60a2482f8ac4d86f3871edd0b8f6a01 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_nll_loss_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f97a30b1b4f0e09f07f50c4f1991ede9ad24592b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim=false); +TORCH_API at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, bool keepdim=false); +TORCH_API at::Tensor & nuclear_norm_outf(const at::Tensor & self, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & nuclear_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/numpy_T_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/numpy_T_native.h new file mode 100644 index 0000000000000000000000000000000000000000..be123b40e0873f77378f8e545ecffc2671729137 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/numpy_T_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor numpy_T(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pdist_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pdist_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..66e1a0f74c99445ff59e1f00af048a8e1f4e6e70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pdist_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor pdist(const at::Tensor & self, double p=2); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a81b814bde4a3c77f00dde1923e620230f02865 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & pixel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t upscale_factor); +TORCH_API at::Tensor & pixel_shuffle_outf(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..382ce43161562695db7a43e6e9f023e94df95c72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg.h new file mode 100644 index 0000000000000000000000000000000000000000..eb4b0b6c1f04fdb952797123ae6ed19f5090677c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rad2deg(Tensor self) -> Tensor +inline at::Tensor rad2deg(const at::Tensor & self) { + return at::_ops::rad2deg::call(self); +} + +// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & rad2deg_(at::Tensor & self) { + return at::_ops::rad2deg_::call(self); +} + +// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::rad2deg_out::call(self, out); +} +// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::rad2deg_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4465aa774fce171038b5677c963def97ae61a5bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_sparse_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_sparse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..beb06b63ff4c927d7bad11dbc81e3e7ebff682f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_sparse_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor resize_as_sparse(const at::Tensor & self, const at::Tensor & the_template); +TORCH_API const at::Tensor & resize_as_sparse_out(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out); +TORCH_API const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template); +TORCH_API const at::Tensor & resize_as_sparse_compressed_(const at::Tensor & self, const at::Tensor & the_template); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resize_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resize_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e1c425c52e027402ef25030d4645bcd064156a6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resize_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & resize_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out); +TORCH_API const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & resize_cuda_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & resize_sparse_csr_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & quantized_resize_cpu_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rsub_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rsub_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f901c4ab46aaf01cd2db626c1f645bcd54247b6a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rsub_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rsub_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rsub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API rsub_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rsub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API rsub_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rsub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API rsub_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rsub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..620dd3533f039b353694f20b084dc2079fcad2e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sinh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sinh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sinh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sinh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sinh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sinh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/softmax.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..5eddf0d2e3139ce6d5c07ac7c44ba0ecee65a150 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/softmax.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_int::call(self, dim, dtype); +} + +// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_int_out::call(self, dim, dtype, out); +} +// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & softmax_outf(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::softmax_int_out::call(self, dim, dtype, out); +} + +// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor softmax(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_Dimname::call(self, dim, dtype); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..921752fe249a75b96dae7e0b9cb20f8c87b65d74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_chebyshev_polynomial_t { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_chebyshev_polynomial_t_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_chebyshev_polynomial_t_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_chebyshev_polynomial_t_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_chebyshev_polynomial_t_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_chebyshev_polynomial_t_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_chebyshev_polynomial_t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8264603f34c4956f76ef41a42bdd70ccc64657b9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f171a92083f52be998e063a1e06506af8db8ef1e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void split_with_sizes_copy_out(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out); +TORCH_API void split_with_sizes_copy_out_cuda(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out); +TORCH_API ::std::vector split_with_sizes_copy_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sub_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sub_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..073a975dc47b02606018ea63e65c3125d8cb4c36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sub_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sub_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sub_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d7b8ebad92e1d9b43c3902295e07ec903cf5f0c5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sub_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sub_out : public at::meta::structured_sub_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_sub_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_sparse_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub_zerotensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd15e5e4ce49c3e24714ed029884c1d6d06239ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void sym_constrain_range(const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at