diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fac61cb39cce792d58ef7482e6a0cf38150330a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _adaptive_avg_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor adaptive_avg_pool2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor adaptive_avg_pool2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h new file mode 100644 index 0000000000000000000000000000000000000000..72b8a8a8ce7707baaca79e8b1d071dd638bc86ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () +inline void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale); +} + +// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () +inline void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out); +} +// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () +inline void _amp_foreach_non_finite_check_and_unscale_outf(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out); +} + +// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out) +inline ::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor.h new file mode 100644 index 0000000000000000000000000000000000000000..6c11184c08a704120101dd1331b616ed41035b3f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor _efficientzerotensor(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::_efficientzerotensor_out::call(size, out); + } +} + +// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _efficientzerotensor_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & _efficientzerotensor_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::call(size, out); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h new file mode 100644 index 0000000000000000000000000000000000000000..d32a492b20587ec24a1b3021c6def522ec24449b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_abs(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_abs(at::TensorList self) { + return at::_ops::_foreach_abs::call(self); +} + +// aten::_foreach_abs_(Tensor(a!)[] self) -> () +inline void _foreach_abs_(at::TensorList self) { + return at::_ops::_foreach_abs_::call(self); +} + +// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_abs_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_abs_out::call(self, out); +} +// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_abs_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_abs_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4e771d91d551427ff180544295459fcfe8451912 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API void _foreach_copy_(at::TensorList self, at::TensorList src, bool non_blocking=false); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f5643f3400bda609b773d5819f7cdacbd7f554ff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _grid_sampler_2d_cpu_fallback { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_grid_sampler_2d_cpu_fallback") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +}; + +struct TORCH_API _grid_sampler_2d_cpu_fallback_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_grid_sampler_2d_cpu_fallback") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4fd5817fab7e3954e1e978115c5eedac7a34ed92 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _histogramdd_from_bin_cts_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API at::Tensor & _histogramdd_from_bin_cts_outf(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_det_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_det_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d1190f28416307678d4fccfea9b222e863f19599 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_det_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__linalg_det : public at::impl::MetaBase { + + + void meta(const at::Tensor & A); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3aa0fbc39d445de7e925603361b0d84cb250bae1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7bdf71c32d726a21dcef54c3eadf01172aeacb01 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__log_softmax : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size.h new file mode 100644 index 0000000000000000000000000000000000000000..26c3cb7d09f9e5a4db47c2aa480bedf119973556 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_size_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_size_out::call(self, out); +} +// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_size_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_size_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5c98a361604ee2b37fba4c04409e88335f44a23 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b2ba80b1d0de68b1f2ebd0779ce9080c37b9ac3d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_softmax_backward_cpu_out : public at::meta::structured__softmax_backward_data { +void impl(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, const at::Tensor & grad_input); +}; +struct TORCH_API structured_softmax_backward_cuda_out : public at::meta::structured__softmax_backward_data { +void impl(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, const at::Tensor & grad_input); +}; +TORCH_API at::Tensor nested_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..66b85fa5ffb5c9ef5c890e0d6e2d0c8fa1bfd439 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _standard_gamma_out(const at::Tensor & self, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor _s_gamma_cpu(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor _s_gamma_cuda(const at::Tensor & self, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c0301d24bd7f8c588f9528e68623a102d07cd6f5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _triton_multi_head_attention { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_triton_multi_head_attention") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor") + static at::Tensor call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask); +}; + +struct TORCH_API _triton_multi_head_attention_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_triton_multi_head_attention") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9d2454dbe676fda88b707c7a3975a2364d6359cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional is_coalesced=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/amax_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/amax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd0ee782b9fa05b4c7eefa8a0583a597b64af230 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/amax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..22b4ea0e59578490324a98226411fb6656036975 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple batch_norm_update_stats(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00e674925702e2dac42ec88ac9a2a73c25d7a0e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bilinear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bilinear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f2dae0a7159fc7de701647512a5b17bcbd27e06b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_with_logits_out(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_native.h new file mode 100644 index 0000000000000000000000000000000000000000..268e2b6462bb99e73fbdc8bf07e4c6a2eca32c00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_or_out : public at::meta::structured_bitwise_or_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_or_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_or(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor.h new file mode 100644 index 0000000000000000000000000000000000000000..c2722f90131b990cb56986b7ec06709491ff8856 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Tensor_out::call(self, other, out); +} +// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Tensor_out::call(self, other, out); +} + +// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor_Scalar_out::call(self, other, out); +} +// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Scalar_out::call(self, other, out); +} + +// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor bitwise_xor(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor_Scalar::call(self, other); +} + +// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +inline at::Tensor bitwise_xor(const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other); +} + +// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Tensor::call(self, other); +} + +// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other, out); +} +// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_xor_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bucketize_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bucketize_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42e164390f65dd1d129c0abca008bd525456f2f7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bucketize_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false); +TORCH_API at::Tensor & bucketize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false); +TORCH_API at::Tensor & bucketize_outf(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out); +TORCH_API at::Tensor bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3aa45802c9e6dad3c7611df2d44375fcd4f37a31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor constant_pad_nd_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & constant_pad_nd_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_symint_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d.h new file mode 100644 index 0000000000000000000000000000000000000000..bc850690c01177524b594eb99e66440365442e36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor +inline at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose3d_input::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation)); +} +namespace symint { + template ::value>> + at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose3d_input::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), groups, c10::fromIntArrayRefSlow(dilation)); + } +} + +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor +inline at::Tensor conv_transpose3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation); +} +namespace symint { + template ::value>> + at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5baf7411c5443c9dd7b4b83f392147f4f0d6d9ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); +TORCH_API at::Tensor & convolution_out_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..32c2a9b1106d73f0ea7c20be136feaf400543bef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..04b171804ab4414037a201d8d45d6b2d2d9947ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor cumsum(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumsum_(at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a918173b1fe0aadebe2d77c28e658e8c879ac7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2ee6e61c5f8d5dfa8cfb69d4f78261caf9e0f205 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c51ba2002c4422ab5f3279397efb82e76e312ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor fmax(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ea591768bd37f66e5918f6dd021d352a5cd2941 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h new file mode 100644 index 0000000000000000000000000000000000000000..c30360fdb4d026c1c765674d38f333cb11f98eff --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::full_like::call(self, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format); +} + +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format=c10::nullopt) { + return at::_ops::full_like_out::call(self, fill_value, memory_format, out); +} +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format, at::Tensor & out) { + return at::_ops::full_like_out::call(self, fill_value, memory_format, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f434e64830e4609e7c6915e4fdd6c7bd0b7580 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fused_moving_avg_obs_fake_quant { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fused_moving_avg_obs_fake_quant") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..60dac9a81337d7a39ab6b108aeef0d4f8a5e838e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none"); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6455a6cee6b06135ac38ff201b2b5c6d06edda89 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & grid_sampler_3d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & grid_sampler_3d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h new file mode 100644 index 0000000000000000000000000000000000000000..764c572659fac897a22fc6622b873ea006f9d14d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} + +// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor +inline at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::call(self, min_val, max_val); +} + +// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) +inline at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::call(self, min_val, max_val); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1fbab04d81ecfc8374798d790f8f897df3e0fb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8b3afc2f1f5d95c8dc5e7814c1a8b5e50ac9f7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igamma : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..bab54186b8a3ced89328464f8d800ba5e3d7303b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} + +// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b950db631fa9823335a5089f05b332c84120c8d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fd884d762101fded87d988f8f24dc7c9582a3104 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/inverse_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API inverse { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::inverse") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "inverse(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API inverse_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::inverse") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a0f37286cf8a364a7ce77c54dfd2e9dc4352b8e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lgamma(const at::Tensor & self); +TORCH_API at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & lgamma_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4f803b9a9fd41aeee20374f382d5f0de95ab70e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt); +TORCH_API ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4a9c7783b58ab10d868a3e81a771bb580a219b43 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_qr_out : public at::meta::structured_linalg_qr { +void impl(const at::Tensor & A, c10::string_view mode, const at::Tensor & Q, const at::Tensor & R); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ex.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..fe0750d5a5d0085fd18b99e94aa2a3b1ebe902dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ex.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) +inline ::std::tuple linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex::call(A, B, left, check_errors); +} + +// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) +inline ::std::tuple linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex_out::call(A, B, left, check_errors, result, info); +} +// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) +inline ::std::tuple linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { + return at::_ops::linalg_solve_ex_out::call(A, B, left, check_errors, result, info); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0336f0b359be4b490e5e3fb87fbf465d91b1fccf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_vander(const at::Tensor & x, c10::optional N=c10::nullopt); +TORCH_API at::Tensor linalg_vander_symint(const at::Tensor & x, c10::optional N=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a3c01286e3b06e962ebc6de15323b7c1283c6ade --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_max_pool2d_with_indices_out_cpu : public at::meta::structured_max_pool2d_with_indices { +void impl(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & out, const at::Tensor & indices); +}; +struct TORCH_API structured_max_pool2d_with_indices_out_cuda : public at::meta::structured_max_pool2d_with_indices { +void impl(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & out, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mish_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mish_native.h new file mode 100644 index 0000000000000000000000000000000000000000..eaa436bfbcd5658131655b4de497cfc5c35734fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mish_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_mish_out : public at::meta::structured_mish { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cdc16f33d0f151d6aae9b9eedd8dfff0d4a0ddc7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mkldnn_rnn_layer_backward_out(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6); +TORCH_API ::std::tuple mkldnn_rnn_layer_backward(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..69891ac818f712ba34e2599885ce2c5c6a9891c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, is_target, grad_input); +} +// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & multilabel_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) { + return at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, is_target, grad_input); +} + +// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor +inline at::Tensor multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..814a88d116834f88290c3c8f9fe4f1ab8942bc2f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple native_layer_norm_backward_out_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple layer_norm_backward_cpu(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); +TORCH_API ::std::tuple layer_norm_backward_cuda(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); +TORCH_API ::std::tuple layer_norm_backward_nested(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40af820f1cab913de99c93fccb62bfdec0097eb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor nll_loss_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +TORCH_API at::Tensor & nll_loss_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c2f6d08ac1f6e9848c01dd4fdce651450553a37a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2); +TORCH_API at::Tensor & orgqr_out(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fca9aa90ca696827834ca588e776fc1d82db1c82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/prod_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..5a0296517f0a2d1cad7501dacc55991152ed9d92 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool3d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor quantized_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantized_max_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out); +} +// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantized_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db4db85c90ae8c33c7554157eaaa3cca5b5a4b14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reciprocal(const at::Tensor & self); +TORCH_API at::Tensor & reciprocal_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & reciprocal_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & reciprocal_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7a73c91f8aeb33d3f59ba055786f0c2452a66e08 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor row_indices_default(const at::Tensor & self); +TORCH_API at::Tensor row_indices_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9ca3e1ffa90e2ae830644b69c8c6fef7f4e4e2ec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b12d2ca913121e2ecf9bfac216a0b5eefa6ba4df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sgn_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sgn : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb29448070f8236a7714864cb707b6b254475943 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sinc(const at::Tensor & self); +TORCH_API at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinc_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c5865bee332ec9332e96e27e54445a4381b8551 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0); +TORCH_API at::Tensor slow_conv3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)); +TORCH_API at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0); +TORCH_API at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & slow_conv3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)); +TORCH_API at::Tensor & slow_conv3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..84ebdf73753208691c30df3b29d97441edb697c9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple sort(const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_outf(const at::Tensor & self, c10::optional stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_native.h new file mode 100644 index 0000000000000000000000000000000000000000..718cd921048c9b2c2e9c092dc1b8edc13cc1263a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_resize_and_clear(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +TORCH_API const at::Tensor & sparse_resize_and_clear_out(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); +TORCH_API const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ab8120bd229c623244c5ee02841608ef2dfd8dcc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_bessel_j1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..01e0b2c9c547a43f14ecd56a622222f6bdd0109b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_entr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_entr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_entr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_entr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_entr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eefb8220d9a92ccbc1d4049f4684f0fae3e925f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_ndtri : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_polygamma.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_polygamma.h new file mode 100644 index 0000000000000000000000000000000000000000..b459a97f6e71dd47f9d87cbec00cfaf3767a3bb1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_polygamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_polygamma(int n, Tensor self) -> Tensor +inline at::Tensor special_polygamma(int64_t n, const at::Tensor & self) { + return at::_ops::special_polygamma::call(n, self); +} + +// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::special_polygamma_out::call(n, self, out); +} +// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_polygamma_out::call(n, self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9d5f6b9ffa5e3a8e2f795902f56d4335c5d67b0f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a6da4e6952c4148dd49bd12cface256bb5af4bd8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor +inline at::Tensor to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input) { + return at::_ops::to_mkldnn_backward::call(grad, input); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_copy.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..3e67a975f91a710c73920e557d42e00cb458b944 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[] +inline ::std::vector unbind_copy(const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_copy_int::call(self, dim); +} + +// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () +inline void unbind_copy_out(at::TensorList out, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_copy_int_out::call(self, dim, out); +} +// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () +inline void unbind_copy_outf(const at::Tensor & self, int64_t dim, at::TensorList out) { + return at::_ops::unbind_copy_int_out::call(self, dim, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..38be86e2559e8a0409d6994f52f96f42333bbaae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor +inline at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step); +} +namespace symint { + template ::value>> + at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step); + } +} + +// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor +inline at::Tensor unfold_backward_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); +} +namespace symint { + template ::value>> + at::Tensor unfold_backward(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_symint_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_symint_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f6badf5a999fd1313c6a3e9530c6f188778ded0f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5cc58b1d564821edb0ebc56699d97ae907edaa1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_nearest2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h new file mode 100644 index 0000000000000000000000000000000000000000..da7d4a4460f077b90284a9b247d1a0b1e5079b05 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +inline at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors); + } +} + +// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +inline at::Tensor upsample_trilinear3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w); + } +} + +// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); + } +} + +}