diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc9099cb356e4911015355a8b6b7ed3dbe67e0b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h new file mode 100644 index 0000000000000000000000000000000000000000..5dc6e802701a5d9e4b5ab9cc315ede95805d3a80 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor +inline at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); +} +namespace symint { + template ::value>> + at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } +} + +// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor +inline at::Tensor _cudnn_rnn_flatten_weight_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); +} +namespace symint { + template ::value>> + at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_symint_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); +} +namespace symint { + template ::value>> + at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..31ffaca726d6fc061def1597b1468746f9afc35b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _embedding_bag_dense_backward_out_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, at::Tensor & out); +TORCH_API at::Tensor _embedding_bag_dense_backward_cpu(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1); +TORCH_API at::Tensor _embedding_bag_dense_backward_cuda(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2bfb7862cd1a3c3f826a782f7ff89debaa18920f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e88579040af34e53524b8c83a5f698841f8e4054 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_erfc(at::TensorList self); +TORCH_API void _foreach_erfc_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tan_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tan_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9f0ac47d9763ba3b3979615fc175b3c473466fc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tan_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_tan { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tan(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tan_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tan_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tan_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tan_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd028fe732a5de98e98a937ad577a34c3c5541f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_trunc_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_trunc_slow(at::TensorList self); +TORCH_API void foreach_tensor_trunc_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_trunc_cuda(at::TensorList self); +TORCH_API void foreach_tensor_trunc_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..035a0c9c7956bcf29f5a8fe6c0a121a568a390de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_zero_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_zero_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_zero_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_zero_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_zero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +struct TORCH_API _foreach_zero { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_zero") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_zero(Tensor[] self) -> Tensor[] self_out") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b36bba1c41a9978d510b739a62b65d9e089fcf64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_log_softmax_backward_cpu_out : public at::meta::structured__log_softmax_backward_data { +void impl(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, const at::Tensor & out); +}; +struct TORCH_API structured_log_softmax_backward_cuda_out : public at::meta::structured__log_softmax_backward_data { +void impl(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e125441d32ff5a72d7f5d03b52587c050520dd41 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _masked_softmax(const at::Tensor & self, const at::Tensor & mask, c10::optional dim=c10::nullopt, c10::optional mask_type=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ced73ac79993a2dab989280be77d865b74663bfd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _native_batch_norm_legit_no_training(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_no_training_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_no_training_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0b6e78326190847067f92681a0a8407048f1102b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _native_batch_norm_legit { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, bool, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +}; + +struct TORCH_API _native_batch_norm_legit_out { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +}; + +struct TORCH_API _native_batch_norm_legit_no_stats { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, bool, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "no_stats") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); +}; + +struct TORCH_API _native_batch_norm_legit_no_stats_out { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "no_stats_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +}; + +struct TORCH_API _native_batch_norm_legit_functional { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, bool, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_native_batch_norm_legit_functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..30ea7e17aa2513ab6850a40abb075d9bea4c8c6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_get_offsets { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_get_offsets") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_get_offsets(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_select_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dec6d1557ac0e89375d895076274315a22b30dbd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _nested_select_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b5af539581b805482ab84f4509e6358fd2c01a82 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_tensor_from_tensor_list { + using schema = at::Tensor (at::TensorList, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_tensor_list") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API _nested_tensor_from_tensor_list_out { + using schema = at::Tensor & (at::TensorList, c10::optional, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_tensor_list") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..5075b13005a17023ee8041880b4e4580e60cd4d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) +inline at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..ed618ecdc69b69d8259999a08cbad17d3b30c99d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor) +inline ::std::tuple _scaled_mm(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false) { + return at::_ops::_scaled_mm::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum); +} + +// aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _scaled_mm_out(at::Tensor & out, at::Tensor & out_amax, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false) { + return at::_ops::_scaled_mm_out::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, out_amax); +} +// aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _scaled_mm_outf(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax) { + return at::_ops::_scaled_mm_out::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, out_amax); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..99e05ac9240ef956b390826958fead1aba15b167 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_csr_tensor_unsafe { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_tensor_unsafe") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4cb993d70d4143aff90075579995dee130b16542 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_mm_reduce_impl_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_mm_reduce_impl_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2b094ab1ddf8183e3f902c4d9bbd55acb9682de8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_spdiags_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _spdiags_out(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout, at::Tensor & out); +TORCH_API at::Tensor spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f5cd8c8ec891b1f27c03b6eabd4f2a93f8b74a63 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _standard_gamma { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma(Tensor self, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator); +}; + +struct TORCH_API _standard_gamma_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e1bb0c47150bc56c5345fd83e49b8cf4ff0cf251 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _thnn_fused_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4368ffd655f67ff5ab139f2e63af036ceea94394 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _trilinear_out(at::Tensor & out, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1); +TORCH_API at::Tensor & _trilinear_outf(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..030398bd5e8f63436e17f398d7313aba0c9c853e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int4pack_mm_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int4pack_mm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b66c1efd1dc5342716d4aff2d6dbaa37a098c03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int4pack_mm_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _weight_int4pack_mm(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_native.h new file mode 100644 index 0000000000000000000000000000000000000000..704afa7feae422312edd657f28d1c72bc4b838d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _weight_norm_interface_out(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple weight_norm_cpu(const at::Tensor & v, const at::Tensor & g, int64_t dim=0); +TORCH_API ::std::tuple weight_norm_cuda(const at::Tensor & v, const at::Tensor & g, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37f7c864c2ae13e0fe3b5d63e3b021b5a2af823f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..3c06cb2ca0849804337b848ed07ce882341dba3b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor +inline at::Tensor affine_grid_generator(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::call(theta, c10::fromIntArrayRefSlow(size), align_corners); +} +namespace symint { + template ::value>> + at::Tensor affine_grid_generator(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::call(theta, c10::fromIntArrayRefSlow(size), align_corners); + } +} + +// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor +inline at::Tensor affine_grid_generator_symint(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::call(theta, size, align_corners); +} +namespace symint { + template ::value>> + at::Tensor affine_grid_generator(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::call(theta, size, align_corners); + } +} + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::call(theta, c10::fromIntArrayRefSlow(size), align_corners, out); +} +namespace symint { + template ::value>> + at::Tensor & affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::call(theta, c10::fromIntArrayRefSlow(size), align_corners, out); + } +} + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & affine_grid_generator_outf(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::call(theta, c10::fromIntArrayRefSlow(size), align_corners, out); +} +namespace symint { + template ::value>> + at::Tensor & affine_grid_generator_outf(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::call(theta, c10::fromIntArrayRefSlow(size), align_corners, out); + } +} + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & affine_grid_generator_symint_out(at::Tensor & out, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::call(theta, size, align_corners, out); +} +namespace symint { + template ::value>> + at::Tensor & affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::call(theta, size, align_corners, out); + } +} + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & affine_grid_generator_symint_outf(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::call(theta, size, align_corners, out); +} +namespace symint { + template ::value>> + at::Tensor & affine_grid_generator_outf(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::call(theta, size, align_corners, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41a16c447a2a50a14002ccc3e2f63fa13ddb2ce0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple aminmax(const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h new file mode 100644 index 0000000000000000000000000000000000000000..4669f4d1ee112147dd9660833422906aabf197ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arccos(Tensor self) -> Tensor +inline at::Tensor arccos(const at::Tensor & self) { + return at::_ops::arccos::call(self); +} + +// aten::arccos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & arccos_(at::Tensor & self) { + return at::_ops::arccos_::call(self); +} + +// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arccos_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccos_out::call(self, out); +} +// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arccos_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccos_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f471e939e41cc21e86918702fe798d38f4d9b6bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arcsin_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API arcsin { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arcsin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arcsin(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API arcsin_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arcsin_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arcsin_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API arcsin_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arcsin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argwhere_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argwhere_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..30dfe3e6224d1e596e2b7c6a5111f183df0d028b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argwhere_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor argwhere(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..15780c45c6c484e6942271bf3b48e8e8ce48e5cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API baddbmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::baddbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +struct TORCH_API baddbmm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::baddbmm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +struct TORCH_API baddbmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::baddbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..f3aa8c92d7ce0e8378374239fc19e52ee178149c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor +inline at::Tensor batch_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c6f6e60f2199de4a5bb6c14efe5dde78e5e4cffa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor bernoulli(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & bernoulli_out(const at::Tensor & self, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor bernoulli(const at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & bernoulli_Tensor_out(const at::Tensor & self, const at::Tensor & p, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & bernoulli_float_out(const at::Tensor & self, double p, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor bernoulli(const at::Tensor & self, double p, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1524e9b37caeadfe3d165e83ff6e4d4d6432c549 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/eye_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor eye(int64_t n, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & eye_out_cpu(int64_t n, at::Tensor & out); +TORCH_API at::Tensor & eye_out_cuda(int64_t n, at::Tensor & out); +TORCH_API at::Tensor eye(int64_t n, int64_t m, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & eye_out_cpu(int64_t n, int64_t m, at::Tensor & out); +TORCH_API at::Tensor & eye_out_cuda(int64_t n, int64_t m, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5de31e13b13a9de84316a42b3d0533246cecd019 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fake_quantize_per_channel_affine_cachemask { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fake_quantize_per_channel_affine_cachemask") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +}; + +struct TORCH_API fake_quantize_per_channel_affine_cachemask_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fake_quantize_per_channel_affine_cachemask") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a918173b1fe0aadebe2d77c28e658e8c879ac7a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2.h new file mode 100644 index 0000000000000000000000000000000000000000..fd8be03e251b18c255f5df3de1f6173c2a246e8e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_rfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_rfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a93af795ba6130deb5c016110ec8d823fe028b72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..75706f0c730b7db8b517effadf90dee28de1f3ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool3d_output { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); +}; + +struct TORCH_API fractional_max_pool3d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a79a92cec390de19ec2b126aa1dabc27bd717b0e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gather_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gather_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); +}; + +struct TORCH_API gather_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +}; + +struct TORCH_API gather_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gather") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf3f261dcd2c9e65f308e42572ccfe7977b7dcdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..87d3f521d623b05e3ae74ba8e93887ccd05a1831 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple geqrf(const at::Tensor & self); +TORCH_API ::std::tuple geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self); +TORCH_API ::std::tuple geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1d241244cf13dd3cace15c009af908c9f32a1ca4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple grid_sampler_3d_backward_out(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple grid_sampler_3d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +TORCH_API ::std::tuple grid_sampler_3d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ce9a5ab12b2a0bff4c7ed9d14a09f61989ddde0e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..226b16f7db572867a0ce671890820697a0e9b52e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::indices_copy(Tensor self) -> Tensor +inline at::Tensor indices_copy(const at::Tensor & self) { + return at::_ops::indices_copy::call(self); +} + +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::indices_copy_out::call(self, out); +} +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::indices_copy_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06a867a3f36a53486cbb1c50d2a9a72ab62924d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API bool is_set_to(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2912880a9d3e0cc7432101940632e1326bb65185 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_cross_out : public at::meta::structured_linalg_cross { +void impl(const at::Tensor & self, const at::Tensor & other, int64_t dim, const at::Tensor & out); +}; +TORCH_API at::Tensor linalg_cross_zerotensor(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1a69ee8bb5e223ce3c801ccef4aaf147aa9e3d1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lstsq { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lstsq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver); +}; + +struct TORCH_API linalg_lstsq_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lstsq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h new file mode 100644 index 0000000000000000000000000000000000000000..9487988ddaf2fd27412a2d0932c96ff27e1df053 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) +inline ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor::call(A, pivot); +} + +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +inline ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..44104f6392cfe2c63087a513ad0d320e91b89d9e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log10 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ddbba218b0c8d5e636c57fed91cc53c1a8f183a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +}; + +struct TORCH_API log_sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52225f1698ab5f577980a103947dc9577c883cc0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source); +TORCH_API at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ed9a0f0c5deee9f6990b053c7610f6e2562dde2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor matmul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb106bc85639df0a72a06927a60a95d2d0990068 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a1c594c5ca07efae46db49d085b1831448ce2319 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input); +} +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input); +} + +// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor +inline at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6cc719c973ec0d2d3e9a38e95f79548da8caef34 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API multilabel_margin_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multilabel_margin_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input); +}; + +struct TORCH_API multilabel_margin_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::multilabel_margin_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/neg_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/neg_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b446677f12be9af27fffd74a179ec1b449cd9aca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/neg_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API neg { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::neg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "neg(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API neg_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::neg_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "neg_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API neg_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::neg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse.h new file mode 100644 index 0000000000000000000000000000000000000000..d0aef083294b90db38b0067ec528382c476bfa0a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +inline at::Tensor pinverse(const at::Tensor & self, double rcond=1e-15) { + return at::_ops::pinverse::call(self, rcond); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/prelu_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/prelu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4987293fa35db02fb7aa8a969204965942551de8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/prelu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API prelu { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::prelu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "prelu(Tensor self, Tensor weight) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantile.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantile.h new file mode 100644 index 0000000000000000000000000000000000000000..b8e1002fcacd38d90d8d272b0948f9f9d6b39a9b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantile.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor quantile(const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile::call(self, q, dim, keepdim, interpolation); +} + +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out); +} +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out); +} + +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor quantile(const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation); +} + +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantile_outf(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79f48d087e742a5b74064bf507689da86f482dd5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor randn_like(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); +TORCH_API at::Tensor & randn_like_out(at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & randn_like_outf(const at::Tensor & self, c10::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/round_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/round_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..64dd0589dd1a27c7e30c43c0bb40c03d021ee017 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/round_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_round : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; +struct TORCH_API structured_round_decimals : public TensorIteratorBase { + + + void meta(const at::Tensor & self, int64_t decimals); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/round_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/round_native.h new file mode 100644 index 0000000000000000000000000000000000000000..07695faecbd2b71bf922674e1db7ad923a29bc02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/round_native.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_round_out : public at::meta::structured_round { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor round_sparse(const at::Tensor & self); +TORCH_API at::Tensor & round_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & round_sparse_(at::Tensor & self); +TORCH_API at::Tensor round_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & round_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & round_sparse_csr_(at::Tensor & self); +struct TORCH_API structured_round_decimals_out : public at::meta::structured_round_decimals { +void impl(const at::Tensor & self, int64_t decimals, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3b725fdc15087daef5bee4ab5e03481fbc96a2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API scalar_tensor { + using schema = at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::scalar_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API scalar_tensor_out { + using schema = at::Tensor & (const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::scalar_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & s, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..84ebdf73753208691c30df3b29d97441edb697c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple sort(const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_outf(const at::Tensor & self, c10::optional stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b6ed2d12c7d6b5ad04e0ee4ab5b8bbaf4c89f711 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_i1e : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e51c50a1a056fb7b8602952fd7720b11f5feced --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_log_ndtr(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eefb8220d9a92ccbc1d4049f4684f0fae3e925f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_ndtri : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d184797c64a87c74da40e1c90050ef5bfb1bc816 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/t_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/t_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..716aa691739115faa77f02a8f5ceb07613731fea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/t_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API t { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::t") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "t(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API t_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::t_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "t_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad9b24df15c7d6cba076fb028d130ec5c4e1c6f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f6badf5a999fd1313c6a3e9530c6f188778ded0f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bf7b6a464aa6c2e9842baa1d1e0459f7f73ee90b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor vstack(at::TensorList tensors); +TORCH_API at::Tensor & vstack_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at