diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2d978e6e54168c488dfd4b120b465229c7f3ee8a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _adaptive_avg_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_adaptive_avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +struct TORCH_API _adaptive_avg_pool3d_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_adaptive_avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de606b9fbf58001178e402e172dc46edb2436b47 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a55f4c0c9c01b5890a78123a8ca2d87674983e50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_addmm_activation_out_cpu : public at::meta::structured__addmm_activation { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, const at::Tensor & out); +}; +struct TORCH_API structured_addmm_activation_out_cuda : public at::meta::structured__addmm_activation { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..770ecb497aaaad010b2a7276a84e71ff6e35a01d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _backward { + using schema = void (const at::Tensor &, at::TensorList, const c10::optional &, c10::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()") + static void call(const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..944890c75838e2fd4374aeeafb40c77a874c5399 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..2944b8817228e5f7eef0eb42a614129a5872cc72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_convolution_mode::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_convolution_mode::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor _convolution_mode_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c498537f98a1d734b7c6989526f42bae8b7daa15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5b0c8e928406df7865cc8e503f1e4bfd8be8c3c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & bias, const c10::optional & cu_seqlens_q, const c10::optional & cu_seqlens_k, c10::optional max_seqlen_q, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, c10::optional scale=c10::nullopt, const c10::optional & causal_diagonal={}, const c10::optional & seqlen_k={}); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h new file mode 100644 index 0000000000000000000000000000000000000000..ea05b2c52de71dc2dd0e5a18a7fea25357c57ae6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); +} +namespace symint { + template ::value>> + at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } +} + +// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor _empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); +} +namespace symint { + template ::value>> + at::Tensor _empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } +} + +// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out); + } +} + +// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out); + } +} + +// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_per_channel_affine_quantized_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out); + } +} + +// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_per_channel_affine_quantized_symint_outf(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_per_channel_affine_quantized_outf(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_euclidean_dist.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_euclidean_dist.h new file mode 100644 index 0000000000000000000000000000000000000000..b350e32aef33da455cd371108a2c293b3a1b95c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_euclidean_dist.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor +inline at::Tensor _euclidean_dist(const at::Tensor & x1, const at::Tensor & x2) { + return at::_ops::_euclidean_dist::call(x1, x2); +} + +// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _euclidean_dist_out(at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2) { + return at::_ops::_euclidean_dist_out::call(x1, x2, out); +} +// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _euclidean_dist_outf(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) { + return at::_ops::_euclidean_dist_out::call(x1, x2, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0758ebb4eac18683897072174139779648c12629 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_clamp_max(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_clamp_max_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_clamp_max(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_clamp_max_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cd4d54f6a93bf07a1544fcf524ac000ed29dcec8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_exp { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_exp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_exp(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_exp_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_exp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_exp_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_exp_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_exp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log10_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log10_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cdb71d893575167705245a191da35f724a8e1ef9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log10_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_log10(at::TensorList self); +TORCH_API void _foreach_log10_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6b261fc131cb71b27ac2448fac375de2f33c6c06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_tanh { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tanh(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tanh_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tanh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tanh_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_tanh_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_tanh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size.h new file mode 100644 index 0000000000000000000000000000000000000000..3baf9b39b8dfcd040684f7e6415528ef4b286a16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_for_size.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor +inline at::Tensor _functional_sym_constrain_range_for_size(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token) { + return at::_ops::_functional_sym_constrain_range_for_size::call(size, min, max, dep_token); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aa91848397008a42ce309455a1625c7c52110e89 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _fw_primal(const at::Tensor & self, int64_t level); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_has_compatible_shallow_copy_type_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_has_compatible_shallow_copy_type_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ea20d4091fb7a7bb62eb0f008fda4974fe33389a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_has_compatible_shallow_copy_type_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool _has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6782dbc7fb49e1d14e0983ab335d4000f6b364a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple _linalg_slogdet(const at::Tensor & A); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db3bcec2901141a19dc6f51f6e7267cdd2220528 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _make_dual_copy_out(at::Tensor & out, const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +TORCH_API at::Tensor & _make_dual_copy_outf(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..55754cf1bac3245a075d856af87828a72d40b98e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _make_dual { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_dual") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)") + static at::Tensor call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cf852df3d33d500d764caed98957dc8d8d275bde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sobol_engine_draw { + using schema = ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sobol_engine_draw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b030bce65f11cbcbe3e3b7cef38beb0d6cb22838 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_semi_structured_linear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_semi_structured_linear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional & bias, c10::optional activation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional & bias, c10::optional activation); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8fe13e9be8a8a7fd454c42e9086be6ab256daaa7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_sparse_matmul { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_sparse_matmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API _sparse_sparse_matmul_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_sparse_matmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ea78d987886d672740ce2d1c7e08ffc965acc5dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _thnn_fused_gru_cell_backward_out(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4); +TORCH_API ::std::tuple _thnn_fused_gru_cell_backward_cuda(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b4516bf8904e1553e9cce2b806919e35ee9343ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool2d_backward_out_cpu : public at::meta::structured_adaptive_max_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +struct TORCH_API structured_adaptive_max_pool2d_backward_out_cuda : public at::meta::structured_adaptive_max_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bcb852633608c23608b99fb457032332ff03b85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor batch_norm_backward_elemt(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f750d87abae951267a775c1adf3cfff629b8542e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor batch_norm_elemt(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps); +TORCH_API at::Tensor & batch_norm_elemt_out(at::Tensor & out, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps); +TORCH_API at::Tensor & batch_norm_elemt_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..96424b9faaacf5e6d12e4fc914e06625fb27de90 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits.h new file mode 100644 index 0000000000000000000000000000000000000000..dd751d5b67fa2fc4dec286e3dc6d5e323313a62f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor +inline at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction); +} + +// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & binary_cross_entropy_with_logits_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_with_logits_out::call(self, target, weight, pos_weight, reduction, out); +} +// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & binary_cross_entropy_with_logits_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out) { + return at::_ops::binary_cross_entropy_with_logits_out::call(self, target, weight, pos_weight, reduction, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binomial.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binomial.h new file mode 100644 index 0000000000000000000000000000000000000000..f0a3eb2d1fdde291fa0df689203fe682063f2852 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binomial.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor +inline at::Tensor binomial(const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt) { + return at::_ops::binomial::call(count, prob, generator); +} + +// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & binomial_out(at::Tensor & out, const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt) { + return at::_ops::binomial_out::call(count, prob, generator, out); +} +// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & binomial_outf(const at::Tensor & count, const at::Tensor & prob, c10::optional generator, at::Tensor & out) { + return at::_ops::binomial_out::call(count, prob, generator, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42a10def65600bb8d0e75f715a62b7bb5e1f2a9a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_right_shift_out : public at::meta::structured_bitwise_right_shift_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_right_shift_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a624b4e2dabd55b0086d5d64e4a1a5bc69664dfa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cholesky(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & cholesky_out(const at::Tensor & self, bool upper, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..53e8b1502f9c757594c7e02938720593df2b5870 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cholesky_out { + using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cholesky") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, bool upper, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out); +}; + +struct TORCH_API cholesky { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cholesky") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cholesky(Tensor self, bool upper=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool upper); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/clip_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/clip_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a62d2b2ad8f0924b278c0b9acb5c3a99cca22ae1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/clip_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor clip(const at::Tensor & self, const c10::optional & min=c10::nullopt, const c10::optional & max=c10::nullopt); +TORCH_API at::Tensor & clip_out(const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out); +TORCH_API at::Tensor & clip_(at::Tensor & self, const c10::optional & min=c10::nullopt, const c10::optional & max=c10::nullopt); +TORCH_API at::Tensor clip(const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}); +TORCH_API at::Tensor & clip_out(const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out); +TORCH_API at::Tensor & clip_(at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate.h new file mode 100644 index 0000000000000000000000000000000000000000..82023be2c77bbe6fed6785f42143ad2f879df368 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor +inline at::Tensor concatenate(at::TensorList tensors, int64_t dim=0) { + return at::_ops::concatenate::call(tensors, dim); +} + +// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concatenate_out::call(tensors, dim, out); +} +// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concatenate_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concatenate_out::call(tensors, dim, out); +} + +// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor +inline at::Tensor concatenate(at::TensorList tensors, at::Dimname dim) { + return at::_ops::concatenate_names::call(tensors, dim); +} + +// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concatenate_names_out::call(tensors, dim, out); +} +// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concatenate_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concatenate_names_out::call(tensors, dim, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e1d10c6a8e0e333dccceadcb4c33eafa71d0492 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor cosh(const at::Tensor & self); +TORCH_API at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & cosh_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cov_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cov_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..83b807f429a08a6750c7356997d841df1f88e553 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cov_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cov { + using schema = at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cov") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t correction, const c10::optional & fweights, const c10::optional & aweights); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t correction, const c10::optional & fweights, const c10::optional & aweights); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b234d1c6c651e39ce3665788f5f94be4c3cdb935 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple cudnn_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7d17a4be1d2af72360d110a36905eb38c023a3b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API div_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API div__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API div_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API div_Tensor_mode { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); +}; + +struct TORCH_API div__Tensor_mode { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); +}; + +struct TORCH_API div_out_mode { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out_mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out); +}; + +struct TORCH_API div_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API div__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API div_Scalar_mode { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode); +}; + +struct TORCH_API div__Scalar_mode { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode); +}; + +struct TORCH_API div_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API div_Scalar_mode_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..45ea8ea2f65380a8265f1bb483da9a50e4d09c16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API embedding_bag { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_bag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset); +}; + +struct TORCH_API embedding_bag_padding_idx { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_bag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "padding_idx") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/erf_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/erf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..06d065ba684412703fb23b6e44b030fab7b99231 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/erf_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_erf_out : public at::meta::structured_erf { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor erf_sparse(const at::Tensor & self); +TORCH_API at::Tensor & erf_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & erf_sparse_(at::Tensor & self); +TORCH_API at::Tensor erf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & erf_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & erf_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmax.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmax.h new file mode 100644 index 0000000000000000000000000000000000000000..672569e3f03a1e37302aea16ea848c542c9f831f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmax.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmax(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmax(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax::call(self, other); +} + +// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax_out::call(self, other, out); +} +// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmax_out::call(self, other, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h new file mode 100644 index 0000000000000000000000000000000000000000..49aed94e12452bf3b12a74468114e2be1efe6e5a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & fmod_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Scalar & other); +struct TORCH_API structured_fmod_out : public at::meta::structured_fmod_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e189038097f903385fa20ee7fc0097659c4b78ca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & glu_jvp_out(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink.h new file mode 100644 index 0000000000000000000000000000000000000000..8a4c0618ef0f7e98dbb88fdf2cf6219f938a1c1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink_out::call(self, lambd, out); +} +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::hardshrink_out::call(self, lambd, out); +} + +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +inline at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink::call(self, lambd); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f894d1b0cc209549f95060ce8d2a94b08ceabb54 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a12333ade3b0ffd94d8705486b9fdcc1f71928a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..31c952f6e2e2c3f9210e77892867ead25b650192 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_reduce_cpu_out : public at::meta::structured_index_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +struct TORCH_API structured_index_reduce_cuda_out : public at::meta::structured_index_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e7e3a6ae918f437a65a7efa32ebca8ad052dd9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/item_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/item_native.h new file mode 100644 index 0000000000000000000000000000000000000000..90652f887a66b19fcfe472e21ef635d17b47c00c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/item_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Scalar item(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06853c7cd10ba29e532f96be0e71d2765c4d6e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_lu_factor(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..46de2e38358492cbaf9c528f244d348bb43616e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_factor_ex : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool pivot, bool check_errors); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/log.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/log.h new file mode 100644 index 0000000000000000000000000000000000000000..b00546b8509551a84d9f1b632a659739433d05fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/log.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log(Tensor self) -> Tensor +inline at::Tensor log(const at::Tensor & self) { + return at::_ops::log::call(self); +} + +// aten::log_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log_(at::Tensor & self) { + return at::_ops::log_::call(self); +} + +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_out::call(self, out); +} +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f7312d370d55e8dab6efea20fd6c723ba311252b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_normal_ { + using schema = at::Tensor & (at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_normal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double mean, double std, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional generator); +}; + +struct TORCH_API log_normal_out { + using schema = at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API log_normal { + using schema = at::Tensor (const at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double mean, double std, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c345b4a6bbc98cbece296a63424140b648e79373 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_fill__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_fill_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +}; + +struct TORCH_API masked_fill_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +}; + +struct TORCH_API masked_fill__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_fill_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +}; + +struct TORCH_API masked_fill_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +}; + +struct TORCH_API masked_fill_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); +}; + +struct TORCH_API masked_fill_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6eb548130c5cf2cfc2f9bc972db8ab7e44bd0059 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ec662c3cc0025ecec01ea8c8d67456b968d807c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & mkldnn_max_pool3d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +TORCH_API at::Tensor mkldnn_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b90683d02e99303a580d20bde87b654bfb283005 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor new_zeros_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor new_zeros_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor & new_zeros_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & new_zeros_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size); +TORCH_API at::Tensor & new_zeros_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e58f09a9924da56a1271ac7297079fa5b33a206e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_polygamma_out : public at::meta::structured_polygamma { +void impl(int64_t n, const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor & polygamma_(at::Tensor & self, int64_t n); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like.h new file mode 100644 index 0000000000000000000000000000000000000000..66acc78b2b067a051cb7f85ac6b10cf7b8a7610b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like.h @@ -0,0 +1,201 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::call(self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::call(self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like(const at::Tensor & self, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format); + } +} + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::call(self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, c10::SymInt high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::call(self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format); + } +} + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::call(self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::call(self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format); + } +} + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::call(self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::call(self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format); +} +namespace symint { + template ::value>> + at::Tensor randint_like(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format); + } +} + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); + } +} + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); + } +} + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, c10::SymInt high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); + } +} + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_symint_outf(const at::Tensor & self, c10::SymInt high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_outf(const at::Tensor & self, c10::SymInt high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::call(self, high, memory_format, out); + } +} + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); + } +} + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); + } +} + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); + } +} + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randint_like_symint_outf(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & randint_like_outf(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7bd383a5a8f6273f1841237147cd44c7b199e566 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API remainder_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API remainder_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API remainder__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API remainder_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API remainder_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API remainder__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API remainder_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other); +}; + +struct TORCH_API remainder_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::remainder") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4465aa774fce171038b5677c963def97ae61a5bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/resize.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/resize.h new file mode 100644 index 0000000000000000000000000000000000000000..2121dc2824a1674c7ed70e9c007d093c0ac59f3f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/resize.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +namespace symint { + template ::value>> + const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::call(self, c10::fromIntArrayRefSlow(size), memory_format); + } +} + +namespace symint { + template ::value>> + const at::Tensor & resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::call(self, size, memory_format); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, size, memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, size, memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, size, memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, size, memory_format, out); + } +} + +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format); +} +namespace symint { + template ::value>> + at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format); + } +} + +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, size, memory_format); +} +namespace symint { + template ::value>> + at::Tensor resize(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, size, memory_format); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..e745cac7818056eec5ece0de0deed2d5e66a1fca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src::call(self, dim, index, src); +} + +// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src_out::call(self, dim, index, src, out); +} +// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { + return at::_ops::scatter_src_out::call(self, dim, index, src, out); +} + +// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value::call(self, dim, index, value); +} + +// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value_out::call(self, dim, index, value, out); +} +// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::scatter_value_out::call(self, dim, index, value, out); +} + +// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce::call(self, dim, index, src, reduce); +} + +// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce_out::call(self, dim, index, src, reduce, out); +} +// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_reduce_out::call(self, dim, index, src, reduce, out); +} + +// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce); +} + +// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce_out::call(self, dim, index, value, reduce, out); +} +// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_value_reduce_out::call(self, dim, index, value, reduce, out); +} + +// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_dimname_src::call(self, dim, index, src); +} + +// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_dimname_value::call(self, dim, index, value); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1176edc2208a7ef30b2265f30f8e60a194fdc232 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_scatter_reduce_two : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..312e2cc9753e03daf126579019b7173ca6ce4fee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_bessel_j0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_j0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_j0(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_bessel_j0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_j0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a942097b4f90295027b67145b06d4207cc292794 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_expit(const at::Tensor & self); +TORCH_API at::Tensor & special_expit_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b9c948b04b9f25d6627e1f3e5d35b06790cefb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5c20a02a5a3d94700bdfc3ca635e854bd44f83d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_xlog1py : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/square_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/square_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..82e8a9f3909816ffca6d42ea8319030a2f9064b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/square_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor square(const at::Tensor & self); +TORCH_API at::Tensor & square_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & square_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & square_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..295e9658d8ffe3b812227696874143f853593190 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6a17431bca56efdd5c9a32ef140839760e070546 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/t_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API t_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::t_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "t_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API t_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::t_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b3acd9f5400fb5913fac572cf46b29bc31b25942 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..277cefbb493de29a1efbd0df258b26625a81814a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_nearest3d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_nearest3d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API upsample_nearest3d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..188579f0915744a3d727213296e8785efa056711 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor view_as_complex(const at::Tensor & self); + +} // namespace meta +} // namespace at