diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1dda8a7f900b507be662d7e3c4f40cc6227d10ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1caa0989425a2a374613794d0044f6edbbb60c29 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cdist_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cdist_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); +}; + +struct TORCH_API _cdist_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cdist_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..86b2129dd6925056bff05b06bb90e366b3ea5811 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _dim_arange(const at::Tensor & like, int64_t dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized.h new file mode 100644 index 0000000000000000000000000000000000000000..e4d2f2d518acf5834691de46e1bf1b8e8c46e68f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } +} + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_symint_out(at::Tensor & out, c10::SymIntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_out(at::Tensor & out, c10::SymIntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_symint_outf(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_outf(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min.h new file mode 100644 index 0000000000000000000000000000000000000000..883754a68fd9105cfea5fca164e6ece430bc4cf5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_clamp_min(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_clamp_min_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_clamp_min(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min_List::call(self, other); +} + +// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_clamp_min_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min__List::call(self, other); +} + +// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_clamp_min(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_clamp_min_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_min_List_out::call(self, other, out); +} +// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_min_List_out::call(self, other, out); +} + +// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_min_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13b1f5ff77cf54134b9298ac643d456a71523926 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_exp_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_exp_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..946bec271173fa5f4d2f0150300ac2ea24823d90 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_reciprocal { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_reciprocal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_reciprocal(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_reciprocal_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_reciprocal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_reciprocal_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_reciprocal_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_reciprocal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_int_mm_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_int_mm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4e90ef4752b129aaddc0264ca592c930c5efaa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_int_mm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _int_mm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_int_mm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_int_mm(Tensor self, Tensor mat2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2); +}; + +struct TORCH_API _int_mm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_int_mm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor.h new file mode 100644 index 0000000000000000000000000000000000000000..8b19943212dbb95c3704410d38ae6415047fc206 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_is_zerotensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_is_zerotensor(Tensor self) -> bool +inline bool __dispatch__is_zerotensor(const at::Tensor & self) { + return at::_ops::_is_zerotensor::call(self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8882cca8009f50385f669ab281f0472b1841b898 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _log_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +struct TORCH_API _log_softmax_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e4c28c720a8c0e349382fadc907d77d9018eab25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional mask_type=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..d09e8a602ac74a0eeb74fc962efedb95b89b7304 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_neg_view_copy(Tensor self) -> Tensor +inline at::Tensor _neg_view_copy(const at::Tensor & self) { + return at::_ops::_neg_view_copy::call(self); +} + +// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _neg_view_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_neg_view_copy_out::call(self, out); +} +// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _neg_view_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_neg_view_copy_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..6c26dc5f58698eb73935bcb2d36fef27363971d0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor +inline at::Tensor _saturate_weight_to_fp16(const at::Tensor & weight) { + return at::_ops::_saturate_weight_to_fp16::call(weight); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3b7cdfc83ab50d0421bee0b5030d06b83a8d3264 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _scaled_dot_product_flash_attention_backward_cuda(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt); +TORCH_API ::std::tuple _scaled_dot_product_flash_attention_backward_nested(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..086d41b79118a22f08d8bf035dbebf6e6d5bc457 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple slow_conv2d_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple slow_conv2d_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple _slow_conv2d_backward_output_mask_out_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple slow_conv2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask); +TORCH_API ::std::tuple slow_conv2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h new file mode 100644 index 0000000000000000000000000000000000000000..385a33ea851b7d5372ed01434be6719f5e610788 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) +inline at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) { + return at::_ops::_sobol_engine_initialize_state_::call(self, dimension); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f0ff32bea01c1227e5edf0f6a6fe2858244411ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +struct TORCH_API _sparse_softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype); +}; + +struct TORCH_API _sparse_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +struct TORCH_API _sparse_softmax_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e7c83d3867e081b6e268ad172618daba246dc501 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _unique(const at::Tensor & self, bool sorted=true, bool return_inverse=false); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..deeafb0aef4846bfbe38fcb74feecdd52aeaf2b4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9572d6838897a04af24545f8e2a187af1cb4c128 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97a07a59888e7fe4d2e02c72980a92249d7b6b03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API void _validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fb0a3bcf28c239d269e2cef4acbc1b20c1fe81b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API abs { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API abs_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API abs_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_native.h new file mode 100644 index 0000000000000000000000000000000000000000..274c06cbdfd9cc33d1c91398527d071c108e7317 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor affine_grid_generator(const at::Tensor & theta, at::IntArrayRef size, bool align_corners); +TORCH_API at::Tensor & affine_grid_generator_out_symint(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..74438d94d66c2f5675528ccb2bec006caec8100c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_aminmax : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, c10::optional dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b10f365d03b774d0904a0defab63a07f693f1090 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arctan2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & arctan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & arctan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & arctan2_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..b840607c1e51bebf4a495db64bd66922baeaef4f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor as_strided_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor as_strided_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } +} + +// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + at::Tensor as_strided_copy(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::call(self, size, stride, storage_offset); + } +} + +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } +} + +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } +} + +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out); + } +} + +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & as_strided_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out); +} +namespace symint { + template ::value>> + at::Tensor & as_strided_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..346c2b788c2fb4b7a590f79e08faa64c4cdde128 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); +}; + +struct TORCH_API avg_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e3aac6c92edb5162fad9f9d7805aeb5820da8399 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool3d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); +}; + +struct TORCH_API avg_pool3d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..230ad6fd7ed338c576ffab97216cbf58aa5398e5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1096fe9c0d7f7ff637183370eee3b4624775f321 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42a10def65600bb8d0e75f715a62b7bb5e1f2a9a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_right_shift_out : public at::meta::structured_bitwise_right_shift_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_right_shift_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..302b238af6ff5d3b7c9a008a35bfc57b7d5fc780 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cdist_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p=2, c10::optional compute_mode=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de9b36bbeda4da429980947514e60ba563d85f46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cross(const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & cross_outf(const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..857a490cd2a051f30549884f1b728d0212aa18e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & diag_embed_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); +TORCH_API at::Tensor & diag_embed_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7614c28971d57ad6ef15181805dbe18eec45e09b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/elu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API elu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, bool, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::elu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input); +}; + +struct TORCH_API elu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::elu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c4aea293eb0634655d184f74a54778f32b09d504 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor erfinv(const at::Tensor & self); +TORCH_API at::Tensor & erfinv_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..69aa7cfda5ad4bd3d0a46e5848959b65fa7424dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_exp2_out : public at::meta::structured_exp2 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8553a6158d02dc5d7c1ff9b09dd19ce8c1b6a9e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..03fbe7e93d8080f207c6250ee03759aa575f3daf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_ihfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor fft_ihfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_ihfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_ihfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_ihfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_ihfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft.h new file mode 100644 index 0000000000000000000000000000000000000000..be7f26e580c61557c861ad3153f20b13c71502a0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_rfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_rfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::call(self, n, dim, norm); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..484326525aabb5dfe48376df5032187fa90c6067 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor flatten(const at::Tensor & self, int64_t start_dim=0, int64_t end_dim=-1); +TORCH_API at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim); +TORCH_API at::Tensor flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim); +TORCH_API at::Tensor flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor.h new file mode 100644 index 0000000000000000000000000000000000000000..91a5e249b2eaab5af25d079072c709b9ee8d998f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::floor(Tensor self) -> Tensor +inline at::Tensor floor(const at::Tensor & self) { + return at::_ops::floor::call(self); +} + +// aten::floor_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & floor_(at::Tensor & self) { + return at::_ops::floor_::call(self); +} + +// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::floor_out::call(self, out); +} +// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::floor_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4da0b335eb8c166ae48ab8a60214370b16ef1d19 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/histc_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor histogram_histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & histogram_histc_out(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +TORCH_API at::Tensor _histc_cuda(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0); +TORCH_API at::Tensor & _histc_out_cuda(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6f2218d28f5fe31a028ba6a829478f1cb2588da8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_add_cpu_out : public at::meta::structured_index_add { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, const at::Tensor & out); +}; +struct TORCH_API structured_index_add_cuda_out : public at::meta::structured_index_add { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..724242bd5929ba52fe380f361cbdbe6cb32b9a6a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb7f4052ee537e17340b969829ddd20e3a4ab4c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e9b83024877bd1a3c895d79d5da61d991ff86f06 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a0f37286cf8a364a7ce77c54dfd2e9dc4352b8e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..4c992fe4b0d58b8ddead0f529c499ff1f5ebe314 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) +inline ::std::tuple linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors); +} + +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info); +} +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +inline ::std::tuple linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0336f0b359be4b490e5e3fb87fbf465d91b1fccf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_vander(const at::Tensor & x, c10::optional N=c10::nullopt); +TORCH_API at::Tensor linalg_vander_symint(const at::Tensor & x, c10::optional N=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..235acd95a149e73d0b8ea771b66f27f15c9cc0d6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool1d_with_indices { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_pool1d_with_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ff6b24434a4ca2e43ffc3f52c06d84ddc7b3bc40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_mish : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..564e6bcbb7d23875e56372be1e9009ad0a6498dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nll_loss_forward_output { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, c10::SymInt, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight); +}; + +struct TORCH_API nll_loss_forward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1cd3c3836c364a6d29bed6f8d96fa38fbb57f200 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & poisson_out(at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & poisson_outf(const at::Tensor & self, c10::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..873278408f909df0623739937d9a66bb7d1c1244 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor poisson(const at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_nll_loss.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_nll_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..44d81f07cda4107fc81d03afae931ed00e19964c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_nll_loss.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor +inline at::Tensor poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { + return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fca9aa90ca696827834ca588e776fc1d82db1c82 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06e2c830aa815b246884db9e72568afbacbfb000 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); +TORCH_API at::Tensor repeat_interleave_symint(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); +TORCH_API at::Tensor repeat_interleave(const at::Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); +TORCH_API at::Tensor repeat_interleave_symint(const at::Tensor & self, c10::SymInt repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c8ef8f162daf1c289c3a41d1f831c8b7a543bd1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor resolve_conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1176edc2208a7ef30b2265f30f8e60a194fdc232 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_scatter_reduce_two : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d9c8bfcdedc007857a313bdcd3cbccc003cf7842 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sgn(const at::Tensor & self); +TORCH_API at::Tensor & sgn_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sin_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ac63d9165a803153e0026a52354a00088a654075 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sin_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sin : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4dbcf467b269036c6b2ed122506a6e27f6d4f3e5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/smooth_l1_loss_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta); +TORCH_API at::Tensor & smooth_l1_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai.h new file mode 100644 index 0000000000000000000000000000000000000000..8801492ac52b9086aa2eea2ddbe8144654a91f96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_airy_ai(Tensor x) -> Tensor +inline at::Tensor special_airy_ai(const at::Tensor & x) { + return at::_ops::special_airy_ai::call(x); +} + +// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_airy_ai_out(at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_airy_ai_out::call(x, out); +} +// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_airy_ai_outf(const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_airy_ai_out::call(x, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1d560c15ae37558b7f2cda7f3d060dfb6f82983d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_bessel_y1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_exp2_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_exp2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a39c665ed569b2345d11e48f07cb685a0f66befc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_exp2_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_exp2(const at::Tensor & self); +TORCH_API at::Tensor & special_exp2_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_native.h new file mode 100644 index 0000000000000000000000000000000000000000..91aade17f672c72cc3e068a6ac86977a12a5240f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_laguerre_polynomial_l_out : public at::meta::structured_special_laguerre_polynomial_l { +void impl(const at::Tensor & x, const at::Tensor & n, const at::Tensor & out); +}; +TORCH_API at::Tensor special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_laguerre_polynomial_l_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_laguerre_polynomial_l_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16873a8ae66cffbc3a7fb7d730c857d951fbecaf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_modified_bessel_k0(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_k0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/std_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/std_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..351da27635300cf85268e5c1ef0078950783a4f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/std_ops.h @@ -0,0 +1,116 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API std { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std(Tensor self, bool unbiased=True) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool unbiased); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased); +}; + +struct TORCH_API std_dim { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); +}; + +struct TORCH_API std_correction { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); +}; + +struct TORCH_API std_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API std_correction_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const c10::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API std_names_dim { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim); +}; + +struct TORCH_API std_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API std_correction_names { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim); +}; + +struct TORCH_API std_correction_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, const c10::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sum_to_size_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sum_to_size_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1bea5d36509a11964e2bd283f4dc3607f63161a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sum_to_size_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sum_to_size { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sum_to_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sum_to_size(Tensor self, SymInt[] size) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1a01be8a37682b3b42f4b9dc58c0783e82005f40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_tanh : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b3e1f7f8d37888d037869f3bcf8ee62ae9310ff4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense.h new file mode 100644 index 0000000000000000000000000000000000000000..591be3a19ac6a1814f2c0ee7577f83240686f85b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..023ec2ab3df69325aed529e87568ed4576a63492 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor vander(const at::Tensor & x, c10::optional N=c10::nullopt, bool increasing=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c1fac4b58f54721fccdd1678cf1a57067388a4a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor view_as_complex_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at