diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d6acbd4972bdf06ae449c45678f44a7e16a4d6aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & _adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..89b1aaa425c9a0129aadd77bc11baf157b0c9274 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _cudnn_ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple _cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +TORCH_API ::std::tuple _cudnn_ctc_loss_tensor(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h new file mode 100644 index 0000000000000000000000000000000000000000..1ffcef31e54815605a54bec452b624c799c59e19 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List::call(self, other); +} + +// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_clamp_max_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max__List::call(self, other); +} + +// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_clamp_max_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); +} +// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); +} + +// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor.h new file mode 100644 index 0000000000000000000000000000000000000000..838bf614254cf92ab95c8b0d9f4dd6ac0bf3079c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_floor(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_floor(at::TensorList self) { + return at::_ops::_foreach_floor::call(self); +} + +// aten::_foreach_floor_(Tensor(a!)[] self) -> () +inline void _foreach_floor_(at::TensorList self) { + return at::_ops::_foreach_floor_::call(self); +} + +// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_floor_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_floor_out::call(self, out); +} +// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_floor_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_floor_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb5de3828be11e3c959ec24e9d1738ab7da69c15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_sinh_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_sinh_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e2fd88432faf5900bb0e46d5b3910985bf36581f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +TORCH_API ::std::tuple _native_batch_norm_legit(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c93da9b82d72ec728aa031e4985b6a6231c30475 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _native_multi_head_attention_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional mask_type=c10::nullopt); +TORCH_API ::std::tuple _native_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a74ed53df883269012f4e91419dbf693f52c3c9f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_tensor_size_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _nested_tensor_size(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1b40d1a0b7cf70e4983fc8526cad31004ae25b21 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor NestedTensor_softmax_dropout(const at::Tensor & self, const at::Tensor & query); +TORCH_API at::Tensor NestedTensor_softmax_dropout_cuda(const at::Tensor & self, const at::Tensor & query); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..5075b13005a17023ee8041880b4e4580e60cd4d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) +inline at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta_native.h new file mode 100644 index 0000000000000000000000000000000000000000..70704558effc757f0850af7971add65f7b0ace20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_new_zeros_with_same_feature_meta_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _new_zeros_with_same_feature_meta(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0); +TORCH_API at::Tensor & _new_zeros_with_same_feature_meta_out(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c09e7f54ba192b7b8694bdedc5b957c853951fc2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride); +TORCH_API at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4defabbd2bca9bf1fc997c447c44caa353d94abd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..420e3703d34a14b61e41a6251b2e8b84feb0580d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mask_projection_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_mask_projection { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_mask_projection") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches); +}; + +struct TORCH_API _sparse_mask_projection_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_mask_projection") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1d5976942a8760fa00b269000a1ad3b7d45605f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool2d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API adaptive_max_pool2d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5253848a8ebbe0e16d11e1ec1e6b14c1c79c949 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/any.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/any.h new file mode 100644 index 0000000000000000000000000000000000000000..a4fd0c79dd1fd34319078c6843dd758aa8cdf4cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/any.h @@ -0,0 +1,81 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +inline at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::any_dim::call(self, dim, keepdim); +} + +// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor any(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::any_dims::call(self, dim, keepdim); +} + +// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::any_out::call(self, dim, keepdim, out); +} +// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_out::call(self, dim, keepdim, out); +} + +// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::any_dims_out::call(self, dim, keepdim, out); +} +// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_dims_out::call(self, dim, keepdim, out); +} + +// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +inline at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::any_dimname::call(self, dim, keepdim); +} + +// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::any_dimname_out::call(self, dim, keepdim, out); +} +// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_dimname_out::call(self, dim, keepdim, out); +} + +// aten::any(Tensor self) -> Tensor +inline at::Tensor any(const at::Tensor & self) { + return at::_ops::any::call(self); +} + +// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::any_all_out::call(self, out); +} +// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & any_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::any_all_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..58d9b00c34891c0eb226df3135e66e0379a06361 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arctan(const at::Tensor & self); +TORCH_API at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arctan_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..e2a8b5a3991108951ffe9f497cad0f34a079d539 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool2d_out::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); +} +// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out) { + return at::_ops::avg_pool2d_out::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); +} + +// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor +inline at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..346c2b788c2fb4b7a590f79e08faa64c4cdde128 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); +}; + +struct TORCH_API avg_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..60054dd6baba4124d234d32e8f7408ad10bc64f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); +TORCH_API at::Tensor & avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); +TORCH_API at::Tensor & avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy.h new file mode 100644 index 0000000000000000000000000000000000000000..c8f847e5dd22ee154e97c3c804bef8b009026b11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor +inline at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy::call(self, target, weight, reduction); +} + +// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_out::call(self, target, weight, reduction, out); +} +// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out) { + return at::_ops::binary_cross_entropy_out::call(self, target, weight, reduction, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a103684b51adad393e222bd0a11fc9be04e8dd7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor binary_cross_entropy_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); +TORCH_API at::Tensor binary_cross_entropy_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f14e8031a4df14ba78dc10750c241defa62fdd42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7dfabe75ec355ebe0fcd2c32063466b337ac1349 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bmm(Tensor self, Tensor mat2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2); +}; + +struct TORCH_API bmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cat_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cat_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1c9c9b7f890356bdfec6b91bd9eb821a5434d588 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/cat_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cat { + using schema = at::Tensor (const at::ITensorListRef &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cat(Tensor[] tensors, int dim=0) -> Tensor") + static at::Tensor call(const at::ITensorListRef & tensors, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim); +}; + +struct TORCH_API cat_out { + using schema = at::Tensor & (const at::ITensorListRef &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out); +}; + +struct TORCH_API cat_names { + using schema = at::Tensor (at::TensorList, at::Dimname); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cat.names(Tensor[] tensors, Dimname dim) -> Tensor") + static at::Tensor call(at::TensorList tensors, at::Dimname dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim); +}; + +struct TORCH_API cat_names_out { + using schema = at::Tensor & (at::TensorList, at::Dimname, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cat") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Dimname dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bfc57361725f0270bcd6bcb1da8f79bc5de28280 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..072785412e57ce5e9cf8ffbbd686e194ca441004 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/col2im_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a813a2cc31bd11100eba233e339362be78d31dd8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & detach_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor detach_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..857a490cd2a051f30549884f1b728d0212aa18e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & diag_embed_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); +TORCH_API at::Tensor & diag_embed_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..9a49dae6f7f78e508c42b9824903be44e5cd4071 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor +inline at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2); +} +namespace symint { + template ::value>> + at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2); + } +} + +// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor +inline at::Tensor diagonal_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); +} +namespace symint { + template ::value>> + at::Tensor diagonal_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); + } +} + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diagonal_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); +} +namespace symint { + template ::value>> + at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4c46a3a0d4adf99f198377d053e280143b60c8f6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API digamma_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::digamma_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "digamma_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API digamma_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::digamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API digamma { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::digamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "digamma(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..021d2b3d9e04f7f9e6d4ca5aaeb053ef83ecb00e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & empty_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & empty_quantized_outf(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_strided_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_strided_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fd3a1d44caf0941417c7f1b9c9d122e4c77a0336 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_strided_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API empty_strided { + using schema = at::Tensor (c10::SymIntArrayRef, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::empty_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API empty_strided_out { + using schema = at::Tensor & (c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::empty_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmod.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmod.h new file mode 100644 index 0000000000000000000000000000000000000000..1c240eb517ba14d169acef3b2916cc62c4e816ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fmod.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar_out::call(self, other, out); +} +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::fmod_Scalar_out::call(self, other, out); +} + +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor fmod(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar::call(self, other); +} + +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor_out::call(self, other, out); +} +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmod_Tensor_out::call(self, other, out); +} + +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor::call(self, other); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24777c6952927f7b3856beee681ff6d9e5c15982 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cc89c9748a555a17c709f36e660fcd213c04b2ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_int_Scalar_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_int_Tensor_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ae24635a7bc3f11c1688bf2e39f0f7d96e9d4016 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_reduce : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..fea8090dbaea3f9be8d5592cd2cb91ac7ac1d1b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_cross : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & other, int64_t dim); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d6ab0718ad722452ddd7be8d0277a9e2f490193d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_householder_product_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_householder_product(const at::Tensor & input, const at::Tensor & tau); +TORCH_API at::Tensor & linalg_householder_product_out(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1728013043da66604f808b08dc921f5ca5afc218 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a24b989f78a64354cba3ba110fe7226ddd722539 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_out : public at::meta::structured_linalg_lu { +void impl(const at::Tensor & A, bool pivot, const at::Tensor & P, const at::Tensor & L, const at::Tensor & U); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9f7670c35a1a443d727d9e5e8d51099e124d48a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3b23a937fbe665c8e3dd44cd1f1fc42e8a07fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor +inline at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve::call(A, B, left); +} + +// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve_out::call(A, B, left, out); +} +// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { + return at::_ops::linalg_solve_out::call(A, B, left, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b4d895e76770670a078c71ffeb0f9f7a915372b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_vector_norm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b34d4cc730e2ad2e86142e4ca93cf73ea96e18ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8ce0369196b963cecd976816e0770bbd1de57f4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_logit_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a1159b5c8ad75de8f4e9f98d3ccadb49b490b483 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt); +TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mT_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mT_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b6526e6604e20298e1c90b5391c7b5066ea72a7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mT_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mT { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mT") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mT(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..961e5a0e003ef8d5335e3e4578006ff3d3a97933 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_minimum_out : public at::meta::structured_minimum { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f12ef21fa98c5ff432e2a933d42d7a48d17939 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_batch_norm { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); +}; + +struct TORCH_API miopen_batch_norm_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_transpose_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_transpose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2de30de84f78af4d86347ff90e7030400d26f9a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_transpose_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & miopen_convolution_transpose_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out); +TORCH_API at::Tensor miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd155b10e62df6fcbe23e4f342951d3121cdfe62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8677fc1cbb3f97c464bf3c9681369a32ec435d1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor new_full_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor new_full_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & new_full_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & new_full_outf(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +TORCH_API at::Tensor & new_full_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & new_full_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..c0f391e99a488774d43f464ffd282b7582a91bbe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_forward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple nll_loss_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight); + } +} + +// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +inline ::std::tuple nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index); + } +} + +// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +inline ::std::tuple nll_loss_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index); +} +namespace symint { + template ::value>> + ::std::tuple nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1a2ff600d2752711597d3cfbea4fdb0c3b0493 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector nonzero_numpy(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..74d33fc47c6855b17860fba79d80ea6bceaa95f7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API not_equal_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::not_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API not_equal_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::not_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "not_equal.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API not_equal_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::not_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API not_equal_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::not_equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "not_equal.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API not_equal__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::not_equal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API not_equal__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::not_equal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/ones_like_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/ones_like_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eface803c598707906e98c687abc82d26919db1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/ones_like_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ones_like { + using schema = at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ones_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); +}; + +struct TORCH_API ones_like_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ones_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4f4ca49085d84f749d7f355dbecae0e5c0445500 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API orgqr { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::orgqr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "orgqr(Tensor self, Tensor input2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & input2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2); +}; + +struct TORCH_API orgqr_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::orgqr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/qr_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/qr_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc07ceb6ef5ea301eeb1861537a820e91bad5ed4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/qr_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple qr(const at::Tensor & self, bool some=true); +TORCH_API ::std::tuple qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & self, bool some=true); +TORCH_API ::std::tuple qr_outf(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f5a4b6fa7cfe4b79767c5c603841799abd572ac8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API renorm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::renorm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out); +}; + +struct TORCH_API renorm { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::renorm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +}; + +struct TORCH_API renorm_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, int64_t, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::renorm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c4a4ee4822e042140f185268150cd019e4d3afcc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5989910f0ee4b47197c4ba09c559627852e453f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reshape { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reshape") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef shape); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shape); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a65c6088d30f885a1fe777caf1959fc52e33e94 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rshift_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & __rshift___Scalar_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor __rshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __irshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __rshift___Tensor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor __rshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __irshift__(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rsub.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rsub.h new file mode 100644 index 0000000000000000000000000000000000000000..57292c4463b701830d28b0274805235e4cb5e4df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rsub.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor::call(self, other, alpha); +} + +// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar::call(self, other, alpha); +} + +// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor_out::call(self, other, alpha, out); +} +// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Tensor_out::call(self, other, alpha, out); +} + +// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar_out::call(self, other, alpha, out); +} +// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Scalar_out::call(self, other, alpha, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6875effe199e73df53c80ddc5ef6b5596ee88e39 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sinc_out : public at::meta::structured_sinc { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinh.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinh.h new file mode 100644 index 0000000000000000000000000000000000000000..bad211f0f7bd33a6109a06a9da1e2e4c96361b8f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/sinh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sinh(Tensor self) -> Tensor +inline at::Tensor sinh(const at::Tensor & self) { + return at::_ops::sinh::call(self); +} + +// aten::sinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & sinh_(at::Tensor & self) { + return at::_ops::sinh_::call(self); +} + +// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinh_out::call(self, out); +} +// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinh_out::call(self, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fadc104fc26fd72316635397bc7265a2c9810db5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/soft_margin_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & soft_margin_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc8e07f3ff5e81f9e6c5a4b829152630170d2b8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_bessel_j0_out : public at::meta::structured_special_bessel_j0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b0a6e26aff1cea39c89e19d5036085b9d883281a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_i1_out : public at::meta::structured_special_i1 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..397b8eebf6a5c193fe9edd8393c36566a4bc4bd6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_ndtri(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72e37ccd2b5ca05e96d857585f13b3789e4be55f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8a274341b5a1e1360cc19e16ce1a155cf6da73c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..be8ee3dfe68c13c79375925550f4d54c8f008558 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) { + return at::_ops::tril_indices::call(row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::tril_indices::call(row, col, offset, dtype, layout, device, pin_memory); +} + +// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & tril_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) { + return at::_ops::tril_indices_out::call(row, col, offset, out); +} +// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & tril_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out) { + return at::_ops::tril_indices_out::call(row, col, offset, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c86e0483fe12b93729d646b76a46d758e8e76029 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & uniform_(at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at