diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax.h new file mode 100644 index 0000000000000000000000000000000000000000..88773895f648af77f87c04603169791099e92ca4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_aminmax(Tensor self) -> (Tensor, Tensor) +inline ::std::tuple _aminmax(const at::Tensor & self) { + return at::_ops::_aminmax::call(self); +} + +// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) +inline ::std::tuple _aminmax(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::_aminmax_dim::call(self, dim, keepdim); +} + +// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self) { + return at::_ops::_aminmax_out::call(self, out0, out1); +} +// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _aminmax_outf(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_aminmax_out::call(self, out0, out1); +} + +// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::_aminmax_dim_out::call(self, dim, keepdim, out0, out1); +} +// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _aminmax_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_aminmax_dim_out::call(self, dim, keepdim, out0, out1); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b9d1783dd580fffbf9291a098f37243ff63ccd01 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +TORCH_API at::Tensor & _amp_update_scale_out(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out); +TORCH_API at::Tensor & _amp_update_scale_cuda_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h new file mode 100644 index 0000000000000000000000000000000000000000..5100a9527dd43f1d7d9728118fb035b37efe28a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8272599eb8fc95c315f0af85dd2c954cc22ef0de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _embedding_bag_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); +TORCH_API ::std::tuple _embedding_bag_outf(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f2c222b11bb8dd66556b2554bc0b53e0468950bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb855f64fc0e254c13c73c4386adc540cdd89f54 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & _fill_mem_eff_dropout_mask_(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bfd3e0005d275acc723524d8c73dcc96ffabd6f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_ceil_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_ceil(at::TensorList self); +TORCH_API void _foreach_ceil_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c223519b05ca78fb02d1f673e4d660e188e6bf27 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_sign_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_sign_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7a18517b2ea77a857284aa80c2635fc9185ba855 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); +TORCH_API at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1955fcea4b6bcdba2301e7fa1f689e2fd6b55fa7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _thnn_fused_gru_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f4b8f57cf62069bbce8159bb496c70f8de1884 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unsafe_index_Tensor { + using schema = at::Tensor (const at::Tensor &, const c10::List> &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_index") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor") + static at::Tensor call(const at::Tensor & self, const c10::List> & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..20b57e40ebda9a1febd4cac8032f8d1720ec1db9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _upsample_bilinear2d_aa_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bilinear2d_aa_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API _upsample_bilinear2d_aa_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bilinear2d_aa_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/absolute.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/absolute.h new file mode 100644 index 0000000000000000000000000000000000000000..af1407d5540201b623577e71cfef087a1a3cd0d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/absolute.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::absolute(Tensor self) -> Tensor +inline at::Tensor absolute(const at::Tensor & self) { + return at::_ops::absolute::call(self); +} + +// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::absolute_out::call(self, out); +} +// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::absolute_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d.h new file mode 100644 index 0000000000000000000000000000000000000000..4561b19b04878e9a1aa6ae75781d1fc046f357a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool1d::call(self, output_size); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9149d75e1d5c56f5c1aa0c3ee9e38e0cdb598256 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ad23ce0577d552682209502825545fe9cfd7ee2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor alias(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a20e02eebd39c50c13fdf01200488d1a686a7c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor alias(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..98eb8b4c0e26e9fd654618cce8836909fad429e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor align_as(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..18c982408091b18fc34d41973d1c0d5111a8fccc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +TORCH_API at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +TORCH_API at::Tensor & avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8cccc5161ee51d88e2db99dca34ff66462da036f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple batch_norm_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps); +TORCH_API ::std::tuple batch_norm_stats_outf(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc846df532aa72be19847983739af117f43b06b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & bincount_out(at::Tensor & out, const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0); +TORCH_API at::Tensor & bincount_outf(const at::Tensor & self, const c10::optional & weights, int64_t minlength, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/chalf_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/chalf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8f78aed615cf219d8b460818f08f5809da2dab63 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/chalf_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor chalf(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..afd71cf7dbe0d98f61816cbbf960e19ab16231d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable.h new file mode 100644 index 0000000000000000000000000000000000000000..3e35a0b60b7365880db89c9a012ca7eaced59836 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_overrideable.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor +inline at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups); +} +namespace symint { + template ::value>> + at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups); + } +} + +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor +inline at::Tensor convolution_overrideable_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); +} +namespace symint { + template ::value>> + at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copy_sparse_to_sparse_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copy_sparse_to_sparse_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ca037317e17597b98d5beab0a3b3289a81de2a86 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copy_sparse_to_sparse_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API copy_sparse_to_sparse_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::copy_sparse_to_sparse_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & src, bool non_blocking); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking); +}; + +struct TORCH_API copy_sparse_to_sparse_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::copy_sparse_to_sparse") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out); +}; + +struct TORCH_API copy_sparse_to_sparse { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::copy_sparse_to_sparse") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & src, bool non_blocking); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..83e1529706a96cb86da5350e5bf5b67116e533fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple cummin(const at::Tensor & self, int64_t dim); +TORCH_API ::std::tuple cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim); +TORCH_API ::std::tuple cummin_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq.h new file mode 100644 index 0000000000000000000000000000000000000000..4a17055d0fab509731c4b4e50e4d5f857ce8241d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor fft_fftfreq(int64_t n, double d=1.0, at::TensorOptions options={}) { + return at::_ops::fft_fftfreq::call(n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor fft_fftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::fft_fftfreq::call(n, d, dtype, layout, device, pin_memory); +} + +// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftfreq_out(at::Tensor & out, int64_t n, double d=1.0) { + return at::_ops::fft_fftfreq_out::call(n, d, out); +} +// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftfreq_outf(int64_t n, double d, at::Tensor & out) { + return at::_ops::fft_fftfreq_out::call(n, d, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d1505dcbffb9cab84c909ee7bcf0784c06defbf5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_ifftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_ifftn_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_jvp_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_jvp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c700e1dd0ce229eee62095a607669df3697a7e3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_jvp_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_backward_jvp { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu_backward_jvp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor") + static at::Tensor call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim); +}; + +struct TORCH_API glu_backward_jvp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu_backward_jvp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..819e120a26567a7bc6bb961cadca5b7e1efb3816 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b91f096e0a72beb6fc978452c7af98ffd27fd1fd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..551e1851e842e972401ac83b1e57bfc1fe0be40e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} + +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f4abedf4910d3717b83d1631888a7bd4ce7d39c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_householder_product { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_householder_product") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_householder_product(Tensor input, Tensor tau) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & tau); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau); +}; + +struct TORCH_API linalg_householder_product_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_householder_product") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca902f8f5b869ea54fc1e7a13324af3b4d779f7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f3b8f083891e0d6b1642fb7765af6dff484b8707 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_factor_ex { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_ldl_factor_ex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)") + static ::std::tuple call(const at::Tensor & self, bool hermitian, bool check_errors); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors); +}; + +struct TORCH_API linalg_ldl_factor_ex_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_ldl_factor_ex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)") + static ::std::tuple call(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f75cb31fecaa314e08a514e3f1bbc1ee258c761 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6f29c969c6691dac334b74e332fd6b509f1210fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d2dd1db3c1f095871172d7cfb77c10fb1899c76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor logit(const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_out(at::Tensor & out, const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_outf(const at::Tensor & self, c10::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, c10::optional eps=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..903c4ffaec4e5e93f105b542f89ea92cd526c2a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API __lshift___Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__lshift__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__lshift__.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __lshift___Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__lshift__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__lshift__.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __ilshift___Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__ilshift__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __ilshift___Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__ilshift__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __lshift___Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__lshift__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API __lshift___Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__lshift__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5d690fbef2c8ffc4b27e0dd37ebddda29766e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..524be7c0cce029725d9783d3d9d3023099944f88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & mkldnn_max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & mkldnn_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/narrow.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/narrow.h new file mode 100644 index 0000000000000000000000000000000000000000..5e22996339f96fa8bb686e2353729b949eb82c62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/narrow.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +inline at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow::call(self, dim, start, length); +} +namespace symint { + template ::value>> + at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow::call(self, dim, start, length); + } +} + +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +inline at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow::call(self, dim, start, length); +} +namespace symint { + template ::value>> + at::Tensor narrow(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow::call(self, dim, start, length); + } +} + +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +inline at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) { + return at::_ops::narrow_Tensor::call(self, dim, start, length); +} +namespace symint { + template ::value>> + at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) { + return at::_ops::narrow_Tensor::call(self, dim, start, length); + } +} + +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +inline at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { + return at::_ops::narrow_Tensor::call(self, dim, start, length); +} +namespace symint { + template ::value>> + at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { + return at::_ops::narrow_Tensor::call(self, dim, start, length); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..117965384d5be1f161aca3a2cddd908174d3d4fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/pinverse_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor pinverse(const at::Tensor & self, double rcond=1e-15); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/positive_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/positive_native.h new file mode 100644 index 0000000000000000000000000000000000000000..165a50b2a7f2319cb81538c172d6540aaec77bdf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/positive_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor positive(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/resize_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/resize_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..81f016a20055c774b95437c0d4f23ff5c8f43ffe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/resize_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt); +TORCH_API const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7aa4a8869b42c464189203e26cca32e2faf43fd6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rnn_relu_input { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +struct TORCH_API rnn_relu_data { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rnn_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "data") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f93bd89e71256026d4cc1af36efa2bcaf3f91c88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scalar_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor scalar_tensor(const at::Scalar & s, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & scalar_tensor_out(const at::Scalar & s, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c2ef5788cd0e0fca89f6ce2dc8429084382b0af3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); +}; + +struct TORCH_API sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sin_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b38bdb546022fd0aa72dd89cd460c2d9fdf6fab5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sin_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sin(const at::Tensor & self); +TORCH_API at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sin_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..11073b6027ff602cbe5b4c88355e2cfa2dc84544 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sinc : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ea7dcd2ffd363a03ef72d3ff758d052650527354 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API slice_copy_Tensor { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slice_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); +}; + +struct TORCH_API slice_copy_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, c10::optional, c10::optional, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slice_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slogdet_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slogdet_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4569985bdfd9f15f89cf61640e5bc414de0187a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slogdet_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple slogdet(const at::Tensor & self); +TORCH_API ::std::tuple slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & self); +TORCH_API ::std::tuple slogdet_outf(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h new file mode 100644 index 0000000000000000000000000000000000000000..a04cbedd321675434cf7530ae7b6bb88ec13ad68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } +} + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } +} + +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(dilation)); + } +} + +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6044e0969ba6848205c5ea7813907ac565c76c64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API soft_margin_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::soft_margin_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); +}; + +struct TORCH_API soft_margin_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::soft_margin_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5857a620b802fee09cf49216dfa7afd1a9c70d09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_csc_tensor_ccol_row_value_size { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_csc_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ccol_row_value_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API sparse_csc_tensor_ccol_row_value { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_csc_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ccol_row_value") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c380004c13e6ab26e01caea07ffd2214528af014 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..34da262406840e4ff06e1bb0dc8ee2aeb6d1e668 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_erf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_erf") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_erf(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_erf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_erf") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c3c3b83389343c1198cb84baddcba7fd9dc8d71e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_expm1 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_expm1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_expm1(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_expm1_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_expm1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_native.h new file mode 100644 index 0000000000000000000000000000000000000000..91aade17f672c72cc3e068a6ac86977a12a5240f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_laguerre_polynomial_l_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_laguerre_polynomial_l_out : public at::meta::structured_special_laguerre_polynomial_l { +void impl(const at::Tensor & x, const at::Tensor & n, const at::Tensor & out); +}; +TORCH_API at::Tensor special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_laguerre_polynomial_l_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_laguerre_polynomial_l_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p.h new file mode 100644 index 0000000000000000000000000000000000000000..06d83f91b0e0d7a56ec1ec5f7caa5940d9f8ebd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_log1p(Tensor self) -> Tensor +inline at::Tensor special_log1p(const at::Tensor & self) { + return at::_ops::special_log1p::call(self); +} + +// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_log1p_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_log1p_out::call(self, out); +} +// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_log1p_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_log1p_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..59a36708fc82a865b6621aab1f23f72e8659db16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_modified_bessel_k0(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_k0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00ca6dfc42be8d1c47723a96f188a37f2a0d3d41 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor squeeze(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, at::Dimname dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/std_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/std_native.h new file mode 100644 index 0000000000000000000000000000000000000000..21ec0c340a5fc312cef691f5699e09b74b4e93da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/std_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor std(const at::Tensor & self, bool unbiased=true); +TORCH_API at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false); +TORCH_API at::Tensor & std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor std_quantized_cpu(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_out_quantized_cpu(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false); +TORCH_API at::Tensor & std_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor std(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_out(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sub_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sub_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c38c77a7466ea23882bf22a7ab0dc92b9f295cde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sub_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sub_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd15e5e4ce49c3e24714ed029884c1d6d06239ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void sym_constrain_range(const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5bc416681bb4351183515ae5e52989af255928e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & to_padded_tensor_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt); +TORCH_API at::Tensor & to_padded_tensor_outf(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & to_padded_tensor_symint_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt); +TORCH_API at::Tensor & to_padded_tensor_symint_outf(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c36aad0d9191c31e729785b461ceb6e2293b7774 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor trunc(const at::Tensor & self); +TORCH_API at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & trunc_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..54010e4be4a12db9cb53128dc1836da99ee956b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API uniform_ { + using schema = at::Tensor & (at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::uniform_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double from, double to, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, c10::optional generator); +}; + +struct TORCH_API uniform_out { + using schema = at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::uniform") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double from, double to, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API uniform { + using schema = at::Tensor (const at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::uniform") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double from, double to, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_consecutive_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_consecutive_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..490355337a43062f3a94e191f301819104d09f22 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_consecutive_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple unique_dim_consecutive(const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5b486ca6a64aee9ce4fc725e99f77571160f7011 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +struct TORCH_API structured_upsample_bilinear2d_out_cpu : public at::meta::structured_upsample_bilinear2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_bilinear2d_out_cuda : public at::meta::structured_upsample_bilinear2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +TORCH_API at::Tensor upsample_bilinear2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..97ed9255d742b6dd9e5d6c0486d8648411187dcd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor +inline at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales); + } +} + +// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor +inline at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..df2813236356f394861bcbb2b3df47986a7e622c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_nearest2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::ArrayRef output_size, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/values_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/values_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..053c463044e25f5405d6639686127f43531cef4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/values_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API values { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "values(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/var_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/var_native.h new file mode 100644 index 0000000000000000000000000000000000000000..849c6afe71f1cb47599300cf95410fe53ff88860 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/var_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor var(const at::Tensor & self, bool unbiased=true); +TORCH_API at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor var(const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor var(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0547319778f01264a924783f72fad970d45c0136 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view_as_complex { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_complex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_complex(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops