diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_scalar_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_scalar_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e2eb0e9cf8e875ad4c91adbf66cb1a868334dfc4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_scalar_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _assert_scalar(const at::Scalar & self, c10::string_view assert_msg); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..65c323cc8cefdd0bab4574f458f4970b7c54687b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _backward(const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Char.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Char.h new file mode 100644 index 0000000000000000000000000000000000000000..01fc2e40b9996c6694e7297d6d5c6dcff9ee02b9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Char.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor +inline at::Tensor _cast_Char(const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Char::call(self, non_blocking); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap.h new file mode 100644 index 0000000000000000000000000000000000000000..b190cedb62c83dbe2ba61714ea7691c09c503547 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_debug_has_internal_overlap(Tensor self) -> int +inline int64_t _debug_has_internal_overlap(const at::Tensor & self) { + return at::_ops::_debug_has_internal_overlap::call(self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sigmoid_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sigmoid_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b68e7a8e08e3a904427aabc12c4a3ca08c57025c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sigmoid_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_sigmoid(at::TensorList self); +TORCH_API void _foreach_sigmoid_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0cc334a241742652aac1268b99917e736fabaf99 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_sin_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_sin_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h new file mode 100644 index 0000000000000000000000000000000000000000..09a067936d524f0a6fdbd4263c0e935fb346c560 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15d142633a32b3072b29d5f68e84b36d077b4b14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp.h new file mode 100644 index 0000000000000000000000000000000000000000..2beb0efa43fd34b9aaef8f8b75e0fc5b5bf76a89 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_logcumsumexp(Tensor self, int dim) -> Tensor +inline at::Tensor _logcumsumexp(const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp::call(self, dim); +} + +// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp_out::call(self, dim, out); +} +// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::_logcumsumexp_out::call(self, dim, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ca7754b6390232be7b4de7a501900444b5cc6000 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_sum_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::OptionalIntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_sum_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a74ed53df883269012f4e91419dbf693f52c3c9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_tensor_size_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _nested_tensor_size(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bf73f0261d58cbb5f6042fb85a35a33071f01b3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor +inline at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self); +} + +// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor +inline at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self, bool b) { + return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b); +} + +// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _test_autograd_multiple_dispatch_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self, out); +} +// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _test_autograd_multiple_dispatch_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..55be257872606a14eb4dfa16639dce066b3e6aac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb0cff50b91ef5d5db51c55fabc7769b0f1d2931 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _unique2_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false); +TORCH_API ::std::tuple _unique2_outf(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..568d6549322c294b14cc0d4e51fd6153ec36b61c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b659eb7774b4d603a85050e440fcbb3662cfd7bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _validate_sparse_compressed_tensor_args { + using schema = void (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Layout); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_validate_sparse_compressed_tensor_args") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()") + static void call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d7b046897e899255bd90b9543d469ea1c1549372 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _weight_norm_interface { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_weight_norm_interface") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & v, const at::Tensor & g, int64_t dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim); +}; + +struct TORCH_API _weight_norm_interface_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_weight_norm_interface") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/alias.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/alias.h new file mode 100644 index 0000000000000000000000000000000000000000..86e7498b172fcaeeef2aae2dcf40ea75f7ec3aa4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/alias.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::alias(Tensor(a) self) -> Tensor(a) +inline at::Tensor alias(const at::Tensor & self) { + return at::_ops::alias::call(self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_meta_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eaf63befae1df14f90a63bac6e8edbc622b7df07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor argmax(const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmax_out(at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmax_outf(const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c952a1ba1ae6ae7547b6fd202321777a1862d3d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_left_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_left_shift_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b86c0b04f9829fd18c3261d00294e062d6d16007 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6dce56d7a063284b260be84593e773512f406aa6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_clamp_out : public at::meta::structured_clamp { +void impl(const at::Tensor & self, at::OptionalScalarRef min, at::OptionalScalarRef max, const at::Tensor & out); +}; +TORCH_API at::Tensor clamp_quantized_cpu(const at::Tensor & self, const c10::optional & min=c10::nullopt, const c10::optional & max=c10::nullopt); +struct TORCH_API structured_clamp_Tensor_out : public at::meta::structured_clamp_Tensor { +void impl(const at::Tensor & self, at::OptionalTensorRef min, at::OptionalTensorRef max, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..88b83d7df31e9579506d4275a838801410e1304b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conj_physical { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conj_physical") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conj_physical(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API conj_physical_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conj_physical") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API conj_physical_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conj_physical_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conj_physical_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2b98c4b61e7347580b7c4e722af53cc2282fdd9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor conv_transpose3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c70ed644c82d626c7f1bc45c87b6d6b8b738e05e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_overrideable_out_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd3732a5f394acc1dce48683fe76a6d57ea78a2a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor corrcoef(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diff_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diff_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5de0601dbddaf1f443cc2aa700a6c0efa6065b02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/diff_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API diff { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diff") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append); +}; + +struct TORCH_API diff_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diff") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2c11d323806b0b4f4afe8428c345682c740e9fd7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & empty_permuted_out_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f9e40ad683707a39e441274f9d4eaa800f3ac0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor erf(const at::Tensor & self); +TORCH_API at::Tensor & erf_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a7139b4ea8a50acde3f4808cfac4aabfe61a8f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor expm1(const at::Tensor & self); +TORCH_API at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & expm1_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9332df138892de6340f2da918ec6683dae7eef08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Scalar & value); +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Tensor & value); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2704b39f184b620509594d67052fd03f9fd20663 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_meta.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..94e27813e0342bf8af4dd6a93c6f835ca718db96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fmod_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..72acce599b2bb8d0616f4f7e45eca601a8e58c4a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hsplit_int { + using schema = ::std::vector (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hsplit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, int64_t sections); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections); +}; + +struct TORCH_API hsplit_array { + using schema = ::std::vector (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hsplit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "array") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]") + static ::std::vector call(const at::Tensor & self, at::IntArrayRef indices); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..77db0b79ae45859727929e1c65b36d2a56155830 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..950ddc5ffa8a174799ec617ae10372aafd0fc9bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_copy_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +}; + +struct TORCH_API index_copy_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy__dimname { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..096df260c47151873ba140ee9eb0a41403d1e6d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out); +} +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out); +} + +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..771318844ccbb86af2de4dbbf4226a3450314e72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh.h new file mode 100644 index 0000000000000000000000000000000000000000..e858d16e4c578365eac3e3e00007b1b9f49b1876 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lift_fresh(Tensor(a) self) -> Tensor(a) +inline at::Tensor lift_fresh(const at::Tensor & self) { + return at::_ops::lift_fresh::call(self); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7903efd51345cce8adc6ff4af99bfda9969d9a96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..68e584f5d72134d39ef594f700239b646753150e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lift_fresh(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c3f6308c6c4e7c3dd92b8c147442ed98db9807f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eig_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eig { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_eig") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)") + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_eig_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_eig") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)") + static ::std::tuple call(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4c098d179f1b3feea6955056343c0d9ab5aaf94e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_lu { + using schema = ::std::tuple (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)") + static ::std::tuple call(const at::Tensor & A, bool pivot); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot); +}; + +struct TORCH_API linalg_lu_out { + using schema = ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)") + static ::std::tuple call(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a726affcd3d64e04e5d73f5ac719f32a92e60966 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_slogdet_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_slogdet { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_slogdet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)") + static ::std::tuple call(const at::Tensor & A); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_slogdet_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_slogdet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)") + static ::std::tuple call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1bdd65d71e9f3854e027ef620e6b3991991fa494 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svdvals.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svdvals.h new file mode 100644 index 0000000000000000000000000000000000000000..f2e7e81d78d3f6acf83817c19aa37fc29002a866 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svdvals.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor +inline at::Tensor linalg_svdvals(const at::Tensor & A, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svdvals::call(A, driver); +} + +// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_svdvals_out(at::Tensor & out, const at::Tensor & A, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svdvals_out::call(A, driver, out); +} +// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_svdvals_outf(const at::Tensor & A, c10::optional driver, at::Tensor & out) { + return at::_ops::linalg_svdvals_out::call(A, driver, out); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svdvals_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svdvals_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3ea527481ca98f5ac8c812df152b809c30eff10a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svdvals_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_svdvals { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_svdvals") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor") + static at::Tensor call(const at::Tensor & A, c10::optional driver); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional driver); +}; + +struct TORCH_API linalg_svdvals_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_svdvals") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & A, c10::optional driver, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional driver, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_add_relu_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_add_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d9a508dfb5b2d7533be88a4901c595681d66cc65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_add_relu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_convolution_add_relu { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_convolution_add_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ec662c3cc0025ecec01ea8c8d67456b968d807c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & mkldnn_max_pool3d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +TORCH_API at::Tensor mkldnn_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multiply.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multiply.h new file mode 100644 index 0000000000000000000000000000000000000000..fa2d655c88b7b03545cc2e712ac90be7deb85062 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/multiply.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor multiply(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_Tensor::call(self, other); +} + +// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & multiply_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_out::call(self, other, out); +} +// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & multiply_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::multiply_out::call(self, other, out); +} + +// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor multiply(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::multiply_Scalar::call(self, other); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d333c663d6e66c359871ef41ad46cdcf6224a62c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & nanmedian_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & nanmedian_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API ::std::tuple nanmedian(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..295692e5314e088bbf2ef8160ccdadce1a7700a1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot.h new file mode 100644 index 0000000000000000000000000000000000000000..6617cfa53c19f659054645b55cd13feaed9dd610 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor +inline at::Tensor one_hot(const at::Tensor & self, int64_t num_classes=-1) { + return at::_ops::one_hot::call(self, num_classes); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bfa7c81872489faadb894545b47cc4dd56be8a60 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & permute_copy_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); +TORCH_API at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..31a0af12225f7005fa25632adee59ec645672429 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & quantize_per_channel_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype); +TORCH_API at::Tensor & quantize_per_channel_outf(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..805558d8427bff383ef6f595d27e927f9c02bb6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/rad2deg_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rad2deg { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rad2deg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rad2deg(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API rad2deg_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rad2deg_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rad2deg_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API rad2deg_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rad2deg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b487d0689da26128e61450d4cd71c2db80aeda9a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7cf53b66cacf2495a866b5f24e8afe1201ecffe6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor reflection_pad2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor reflection_pad2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a5395ed964a9709a66d32f4523c5a761fdf8cb75 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_native.h new file mode 100644 index 0000000000000000000000000000000000000000..389c5c0bf4811e95f8c99fd4abee2a6cf4b79289 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor reshape_symint(const at::Tensor & self, c10::SymIntArrayRef shape); +TORCH_API at::Tensor reshape_nested_symint(const at::Tensor & self, c10::SymIntArrayRef shape); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..dd3c813413edf2daf879bf709548fd9ca7c74f6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/softplus.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/softplus.h new file mode 100644 index 0000000000000000000000000000000000000000..9d449fa9f88a03a2cb4cb246b857695a0a06ec67 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/softplus.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus_out::call(self, beta, threshold, out); +} +// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) { + return at::_ops::softplus_out::call(self, beta, threshold, out); +} + +// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor +inline at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus::call(self, beta, threshold); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sort_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sort_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0944e03bce1c7e7d8200c50d223fad7fffbcd06a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sort_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple sort(const at::Tensor & self, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_outf(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dfd69d39e00ea9e082e95735e0f01e47b47f053d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options); +TORCH_API at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f3a70a0ba1c92a6549f14ec22439402276d98965 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_i0e { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_i0e") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_i0e(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_i0e_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_i0e") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..567b38160dd4f59d63f9081e3f01ffe9aec28409 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..21492ee3f88010cc6054e9947154d7bcb5c1b109 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3f5ab73bf80ea349e9b73f09fe9b85866340797e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API tanh_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tanh_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); +}; + +struct TORCH_API tanh_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::tanh_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "tanh_backward(Tensor grad_output, Tensor output) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output); +}; + +}} // namespace at::_ops diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..39e78e259acff33e937d9222f764db84ade6b85d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_native.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_tanh_out : public at::meta::structured_tanh { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_tanh(const at::Tensor & self); +TORCH_API at::Tensor & NestedTensor_tanh_(at::Tensor & self); +TORCH_API at::Tensor tanh_sparse(const at::Tensor & self); +TORCH_API at::Tensor & tanh_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & tanh_sparse_(at::Tensor & self); +TORCH_API at::Tensor tanh_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & tanh_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & tanh_sparse_csr_(at::Tensor & self); +TORCH_API at::Tensor mkldnn_tanh(const at::Tensor & self); +TORCH_API at::Tensor & mkldnn_tanh_(at::Tensor & self); +TORCH_API at::Tensor tanh_quantized_cpu(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9a3092ae8356d2432db503e2f00d9602db117113 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & to_padded_tensor_out_symint(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor NestedTensor_to_padded_tensor_generic(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt); +TORCH_API at::Tensor NestedTensor_to_padded_tensor_cuda(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeimplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..30fe07181bba8cd6093401257cb5664f86e9ca6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3a0a2a8940231af0711f7ba4aac4f14df21ab651 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_split_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector unsafe_split(const at::Tensor & self, int64_t split_size, int64_t dim=0); +TORCH_API void unsafe_split_Tensor_out_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_native.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cbbf9831b39046acfe2ea5c041c87eb1ec42fbd4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & unsqueeze_copy_out(const at::Tensor & self, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim); +} // namespace native +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy_compositeexplicitautograd_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7827b3e7ad7c2aae9690cb28e325c447effc406b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & view_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size); +TORCH_API at::Tensor & view_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype); +TORCH_API at::Tensor & view_copy_outf(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_cpu_dispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9010f9c905d49bf2d3438b5968b1a0db8b7db231 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at