diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2d978e6e54168c488dfd4b120b465229c7f3ee8a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _adaptive_avg_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_adaptive_avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +struct TORCH_API _adaptive_avg_pool3d_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_adaptive_avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..ba9cb081d24770317792c0606d1ac69a7ca3c23a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () +inline void _cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + return at::_ops::_cummax_helper::call(self, values, indices, dim); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange.h new file mode 100644 index 0000000000000000000000000000000000000000..f0b55d220d60dbbf6b117a1691241deb44874fe2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_dim_arange(Tensor like, int dim) -> Tensor +inline at::Tensor _dim_arange(const at::Tensor & like, int64_t dim) { + return at::_ops::_dim_arange::call(like, dim); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h new file mode 100644 index 0000000000000000000000000000000000000000..141968c355806bfa45dbfc7beabd1657d4f756ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_ceil(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_ceil(at::TensorList self) { + return at::_ops::_foreach_ceil::call(self); +} + +// aten::_foreach_ceil_(Tensor(a!)[] self) -> () +inline void _foreach_ceil_(at::TensorList self) { + return at::_ops::_foreach_ceil_::call(self); +} + +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_ceil_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_ceil_out::call(self, out); +} +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_ceil_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_ceil_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..63a1e65400988f524f074cdd9feb05414d5aeb6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_sqrt(at::TensorList self); +TORCH_API void _foreach_sqrt_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f3ffd14a9a5b504636034b1c7223fbba74599d05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); +TORCH_API void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_native.h new file mode 100644 index 0000000000000000000000000000000000000000..05d92df30daf7040a848b0091b9bef2f078d971f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured__linalg_slogdet_out : public at::meta::structured__linalg_slogdet { +void impl(const at::Tensor & A, const at::Tensor & sign, const at::Tensor & logabsdet, const at::Tensor & LU, const at::Tensor & pivots); +}; +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61cf50e0b4955be2abc32984a5f40a1d33334985 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _masked_softmax_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_masked_softmax_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim); +}; + +struct TORCH_API _masked_softmax_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_masked_softmax_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths.h new file mode 100644 index 0000000000000000000000000000000000000000..db3a4dc72e30b9817337637a6f27383fc8067606 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_get_lengths(Tensor self) -> Tensor +inline at::Tensor _nested_get_lengths(const at::Tensor & self) { + return at::_ops::_nested_get_lengths::call(self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cf852df3d33d500d764caed98957dc8d8d275bde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sobol_engine_draw { + using schema = ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sobol_engine_draw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..97bbe89c41f4fd672f410cd792ee0dc121411940 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_broadcast_to { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_broadcast_to") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..44c842b3e83868767d3331467575865473f36413 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out); +} +namespace symint { + template ::value>> + at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out); +} +namespace symint { + template ::value>> + at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, is_coalesced, out); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out); +} +namespace symint { + template ::value>> + at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out); + } +} + +// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out); +} +namespace symint { + template ::value>> + at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional is_coalesced, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data.h new file mode 100644 index 0000000000000000000000000000000000000000..c3465e6b605aa3da6028e32fc513e32e25c052c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor +inline at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self); +} + +// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output, output, dim, self, out); +} +// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output, output, dim, self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags.h new file mode 100644 index 0000000000000000000000000000000000000000..0c8bb5b0a77a017fbc1769eb541f924b2315c40b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor +inline at::Tensor _spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt) { + return at::_ops::_spdiags::call(diagonals, offsets, shape, layout); +} + +// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _spdiags_out(at::Tensor & out, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt) { + return at::_ops::_spdiags_out::call(diagonals, offsets, shape, layout, out); +} +// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _spdiags_outf(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout, at::Tensor & out) { + return at::_ops::_spdiags_out::call(diagonals, offsets, shape, layout, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df7c2724257217c81820c1c553ca43760a1bac9b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\"); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..19743d58631af08944eef29a372472e2a180a648 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..57c0d6e74969cdbd57373b088e9dd2fcc9b22354 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ca942ce0a005a46c4c49f7e3b793faaacc7b660 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c094f1b32272fe3e61b2aca512f980bfb828b671 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API addbmm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addbmm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +struct TORCH_API addbmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API addbmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b7e946dedd4eaee429ecd2ffe0d068ddb3d7517a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API as_strided_copy { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); +}; + +struct TORCH_API as_strided_copy_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1489472c1ada3d05bcb11c06ffb562551bd9b4f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1ddd6fdc66988115293116a0f7d4496570e6683e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API channel_shuffle { + using schema = at::Tensor (const at::Tensor &, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::channel_shuffle") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "channel_shuffle(Tensor self, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups); +}; + +struct TORCH_API channel_shuffle_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::channel_shuffle") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymInt groups, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..04e68385f32a2bdb237de7be7b302c762dc225f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API choose_qparams_optimized { + using schema = ::std::tuple (const at::Tensor &, int64_t, int64_t, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::choose_qparams_optimized") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5bcda0bd3e975d4785b134a61eefa50c282286d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor conv_depthwise3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation); +TORCH_API at::Tensor conv_depthwise3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16bf9233c1daeef3db4864f43d60175023d0953d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor cos(const at::Tensor & self); +TORCH_API at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & cos_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..15aed586fcbd8697086bb721ffa7f88b8d41f560 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_out(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); +TORCH_API at::Tensor cudnn_affine_grid_generator_backward(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..508c61c06c2988d6a3e82ec9c3b5ec57cc381439 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_convolution_transpose { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); +}; + +struct TORCH_API cudnn_convolution_transpose_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, bool, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit.h new file mode 100644 index 0000000000000000000000000000000000000000..e6b59564204f1b3e199774d71fc63041eb656316 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector dsplit(const at::Tensor & self, int64_t sections) { + return at::_ops::dsplit_int::call(self, sections); +} + +// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector dsplit(const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::dsplit_array::call(self, indices); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/elu_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/elu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6d308a207af09ab4cda49ede0734464c36ef869c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/elu_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API elu_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::elu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out); +}; + +struct TORCH_API elu { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::elu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale); +}; + +struct TORCH_API elu_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::elu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8d3064814bff9a44a32be48c30666b28f2f3116a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_native.h new file mode 100644 index 0000000000000000000000000000000000000000..40e5fa0e276039a4214f1e6f404efccd99331827 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor empty_names(at::IntArrayRef size, c10::optional names, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & empty_names_out(at::IntArrayRef size, c10::optional names, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor & empty_out(at::IntArrayRef size, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor empty_cpu(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_cuda(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_sparse(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_sparse_compressed(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_meta_symint(c10::SymIntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_mkldnn(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor empty_unknown_quantized(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e425e5a5f0b35240cd8c309ed4aae6877e5d6e2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor eq(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & eq_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/flipud_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/flipud_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..09799ab979e6a1f2fdea5574e270c458517e37ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/flipud_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API flipud { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::flipud") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "flipud(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4252aa4d5b9782e823edad49d0244aa23952698f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/full_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/full_native.h new file mode 100644 index 0000000000000000000000000000000000000000..739b3c73785cfa4df42ce8c601cda035851682ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/full_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & full_names_out(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & full_out(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e939c6bd8d6dbfae24a92ef37fefab90597bcd67 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor +inline at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae541f9888e9ee6de2b631a2fcb8d98498db1c06 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ge(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ge_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa915dc91bc9194e93fcf99f124570314d57201b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52e332cbfc5cc9156688923d780644a304475dda --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/group_norm_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/group_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca9137024842060accfd8f1e64d35928c92ac19 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/group_norm_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API group_norm { + using schema = at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::group_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor") + static at::Tensor call(const at::Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..8d24995abbbbd737fab026a9f4b3e4f86130e4cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::call(grad_output, self); +} + +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd7918ccbd17e4bb3da31aa151876c624007634f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_leaky_relu_out : public at::meta::structured_leaky_relu { +void impl(const at::Tensor & self, const at::Scalar & negative_slope, const at::Tensor & out); +}; +TORCH_API at::Tensor leaky_relu_quantized_cpu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out_quantized_cpu(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_quantized_cpu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h new file mode 100644 index 0000000000000000000000000000000000000000..2479d0374d78a5fcd345a49724aec689b757e878 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::lgamma_out::call(self, out); +} +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::lgamma_out::call(self, out); +} + +// aten::lgamma(Tensor self) -> Tensor +inline at::Tensor lgamma(const at::Tensor & self) { + return at::_ops::lgamma::call(self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc9263d7d6e73cd4579f7a17d26bcd2f4e3419e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_inv(const at::Tensor & A); +TORCH_API at::Tensor & linalg_inv_out(const at::Tensor & A, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv.h new file mode 100644 index 0000000000000000000000000000000000000000..8fa66a95ca653d209c0ab4f268b7b53872e75667 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv.h @@ -0,0 +1,81 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian); +} + +// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self, atol, rtol, hermitian, out); +} +// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self, atol, rtol, hermitian, out); +} + +// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian); +} + +// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float_out::call(self, atol, rtol, hermitian, out); +} +// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_float_out::call(self, atol, rtol, hermitian, out); +} + +// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv::call(self, rcond, hermitian); +} + +// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor +inline at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian); +} + +// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out::call(self, rcond, hermitian, out); +} +// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out::call(self, rcond, hermitian, out); +} + +// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out_rcond_tensor::call(self, rcond, hermitian, out); +} +// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out_rcond_tensor::call(self, rcond, hermitian, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7060dfe10932adac7251280360590b45d2a4fe7a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d587f1a4a38ad074d5e84659e00e39d9db55b83 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8f79c0d5229b0e0a3892f937aca80b2403778577 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logit(const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_out(const at::Tensor & self, c10::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, c10::optional eps=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9291e3eb2f4295d1cb6cea65d416ca6df50bf5cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & __lshift___Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04177f190653f7bf80c171f52a0f369d3659ddc6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb04f83190a57ebe8eb34d584f3648dc2c5830d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8255fc7c95260d71f74c4aed976127ce57609d7f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool2d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_unpooling2d_forward_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpooling2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor max_unpooling2d_forward_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpooling2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6244dbc3d042b5d1a0f52d34db577ce73960f091 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd91134d5d49a9cb0707aea4c47b86ea3d32cc37 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor minimum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..ea15934a4fcb47c155cdef5607ea1fcac4a2b935 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss2d_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss2d_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +inline ::std::tuple nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index); + } +} + +// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +inline ::std::tuple nll_loss2d_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/normal_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/normal_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cab11432b39f59e56aaf20b36b1073995ebe5128 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/normal_compositeexplicitautograd_dispatch.h @@ -0,0 +1,33 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor normal_functional(const at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, const at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional generator=c10::nullopt, at::TensorOptions options={}); +TORCH_API at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional generator=c10::nullopt, at::TensorOptions options={}); +TORCH_API at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & normal_symint_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_symint_outf(double mean, double std, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/randint_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/randint_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c1e6fb918769531481536dd0c2be2114d17ba40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/randint_compositeexplicitautograd_dispatch.h @@ -0,0 +1,54 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size); +TORCH_API at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size); +TORCH_API at::Tensor & randint_symint_outf(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & randint_symint_outf(c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size); +TORCH_API at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size); +TORCH_API at::Tensor & randint_symint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & randint_symint_out(at::Tensor & out, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & randint_symint_outf(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..95cb05e247e6ac9694ce59f756642738d5b52474 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API repeat_interleave_Tensor { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::repeat_interleave") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor") + static at::Tensor call(const at::Tensor & repeats, c10::optional output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size); +}; + +struct TORCH_API repeat_interleave_self_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::repeat_interleave") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim, c10::optional output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & repeats, c10::optional dim, c10::optional output_size); +}; + +struct TORCH_API repeat_interleave_self_int { + using schema = at::Tensor (const at::Tensor &, c10::SymInt, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::repeat_interleave") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymInt repeats, c10::optional dim, c10::optional output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt repeats, c10::optional dim, c10::optional output_size); +}; + +struct TORCH_API repeat_interleave_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::repeat_interleave") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & repeats, c10::optional output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj_native.h new file mode 100644 index 0000000000000000000000000000000000000000..622eb60bbaad17251c961744cc38dbee34c9b773 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/resolve_conj_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor resolve_conj(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38f4925d4e1b591f7596a9eff97b97c3c2e4327c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_outf(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/set_data_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/set_data_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..86bf5fdc01b197b6ae761db42f17bd450baf3a54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/set_data_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API set_data { + using schema = void (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set_data") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set_data(Tensor(a!) self, Tensor new_data) -> ()") + static void call(at::Tensor & self, const at::Tensor & new_data); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/silu_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/silu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..45a88633d4859db8af09942404e1cab13f163c5b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/silu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor silu(const at::Tensor & self); +TORCH_API at::Tensor & silu_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & silu_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & silu_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e2df79aa1f4a0c3533e9282f215498df8eb2816 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3d015efd6e68c7cc319766ad493734b940fb64b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_resize_and_clear_ { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +struct TORCH_API sparse_resize_and_clear_out { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); +}; + +struct TORCH_API sparse_resize_and_clear { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_sampled_addmm_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_sampled_addmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7018e305788353f31b5c55ed3dae79544f4d8f21 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_sampled_addmm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_sampled_addmm_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sparse_sampled_addmm_out_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor sparse_sampled_addmm_sparse_csr_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sparse_sampled_addmm_out_sparse_csr_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ceb9c2c5004cc337de0e7069e325fd01263842b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_laguerre_polynomial_l { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_laguerre_polynomial_l") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_laguerre_polynomial_l_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_laguerre_polynomial_l") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_laguerre_polynomial_l_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_laguerre_polynomial_l") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_laguerre_polynomial_l_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_laguerre_polynomial_l") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_laguerre_polynomial_l_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_laguerre_polynomial_l") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_laguerre_polynomial_l_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_laguerre_polynomial_l") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a9e1aee35c33e28250bf79860dddef8fcde2dd8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_xlog1py(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c5fec0e472580578b4e64f015e0b7a179cc079c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor squeeze(const at::Tensor & self); +TORCH_API at::Tensor & squeeze_(at::Tensor & self); +TORCH_API at::Tensor squeeze(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, int64_t dim); +TORCH_API at::Tensor squeeze(const at::Tensor & self, at::IntArrayRef dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, at::IntArrayRef dim); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/std.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/std.h new file mode 100644 index 0000000000000000000000000000000000000000..e7502c1ab72e48d1091e440731887fec0abd1ebc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/std.h @@ -0,0 +1,86 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::std(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor std(const at::Tensor & self, bool unbiased) { + return at::_ops::std::call(self, unbiased); +} + +// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_dim::call(self, dim, unbiased, keepdim); +} + +// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction::call(self, dim, correction, keepdim); +} + +// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_out::call(self, dim, unbiased, keepdim, out); +} +// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_out::call(self, dim, unbiased, keepdim, out); +} + +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out); +} +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out); +} + +// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim); +} + +// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) { + return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out); +} +// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out); +} + +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_names::call(self, dim, correction, keepdim); +} + +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) { + return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out); +} +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..127a5223fb25c64ae11a21e35b6c43eec6a1e3c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_bicubic2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e40608a17924f371a08b7d97304c1d5fb65b6786 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..601466059a93230790b0f68959d6c4458719b891 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_nearest2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3b35319b6b3a9eed962f1b5adc3e1a6845a18f17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors); +struct TORCH_API structured_upsample_nearest2d_out_cpu : public at::meta::structured_upsample_nearest2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_nearest2d_out_cuda : public at::meta::structured_upsample_nearest2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +TORCH_API at::Tensor upsample_nearest2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/values.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/values.h new file mode 100644 index 0000000000000000000000000000000000000000..80ee3cc0e282cdfb2f72f1f7e76b864cdda9985d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/values.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/values_copy_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/values_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..270ad5ac36dd634fc95ea445b0faee503c762202 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/values_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & values_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor values_copy(const at::Tensor & self); +} // namespace native +} // namespace at