Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Char_native.h +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_native.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h +44 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy_native.h +20 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets.h +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta_dispatch.h +28 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h +28 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_cpu_dispatch.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cuda_dispatch.h +26 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_native.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_native.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/asin_cuda_dispatch.h +26 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h +26 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_cpu_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cummin.h +53 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta.h +27 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cuda_dispatch.h +26 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h +50 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h +39 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h +26 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/kron_native.h +22 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h +83 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h +28 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h +24 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cuda_dispatch.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mT_compositeimplicitautograd_dispatch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h +30 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_native.h +21 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/median_cpu_dispatch.h +25 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mul.h +53 -0
- llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h +24 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Char_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _cast_Char(const at::Tensor & self, bool non_blocking=false);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _embedding_bag_per_sample_weights_backward {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_per_sample_weights_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _embedding_bag_per_sample_weights_backward_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_per_sample_weights_backward")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_native.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API void _foreach_frac_out(at::TensorList self, at::TensorList out);
|
20 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_frac_slow(at::TensorList self);
|
21 |
+
TORCH_API void foreach_tensor_frac_slow_(at::TensorList self);
|
22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_frac_cuda(at::TensorList self);
|
23 |
+
TORCH_API void foreach_tensor_frac_cuda_(at::TensorList self);
|
24 |
+
} // namespace native
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
|
26 |
+
inline at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
27 |
+
return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _grid_sampler_2d_cpu_fallback_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
32 |
+
return at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
|
33 |
+
}
|
34 |
+
// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _grid_sampler_2d_cpu_fallback_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
|
36 |
+
return at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false);
|
21 |
+
|
22 |
+
} // namespace cpu
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_mkldnn_transpose_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
|
26 |
+
inline at::Tensor _mkldnn_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) {
|
27 |
+
return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) {
|
32 |
+
return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
|
33 |
+
}
|
34 |
+
|
35 |
+
// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
|
36 |
+
inline at::Tensor & _mkldnn_transpose_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) {
|
37 |
+
return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out);
|
38 |
+
}
|
39 |
+
// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
|
40 |
+
inline at::Tensor & _mkldnn_transpose_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
|
41 |
+
return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out);
|
42 |
+
}
|
43 |
+
|
44 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_jagged_dummy_native.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
} // namespace native
|
20 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_nested_get_offsets_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_nested_get_offsets(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor _nested_get_offsets(const at::Tensor & self) {
|
27 |
+
return at::_ops::_nested_get_offsets::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API int64_t _nnz_sparse(const at::Tensor & self);
|
20 |
+
TORCH_API int64_t _nnz_sparse_csr(const at::Tensor & self);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={});
|
21 |
+
TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
|
22 |
+
|
23 |
+
} // namespace compositeimplicitautograd
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias={}, const c10::optional<at::Tensor> & hidden_bias={});
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_csc_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & _to_sparse_csc_out(const at::Tensor & self, c10::optional<int64_t> dense_dim, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor dense_to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor coo_to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor sparse_compressed_to_sparse_csc(const at::Tensor & self, c10::optional<int64_t> dense_dim=c10::nullopt);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta_dispatch.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
23 |
+
TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
24 |
+
TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
25 |
+
TORCH_API at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
26 |
+
|
27 |
+
} // namespace meta
|
28 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
26 |
+
|
27 |
+
} // namespace cpu
|
28 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/adaptive_max_pool3d_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
|
26 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
|
27 |
+
return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices);
|
28 |
+
}
|
29 |
+
// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
|
30 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
|
31 |
+
return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
|
35 |
+
inline ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
|
36 |
+
return at::_ops::adaptive_max_pool3d::call(self, output_size);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
|
21 |
+
|
22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cuda_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
|
21 |
+
TORCH_API at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
|
22 |
+
TORCH_API at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
|
24 |
+
|
25 |
+
} // namespace cuda
|
26 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor arctan(const at::Tensor & self);
|
20 |
+
TORCH_API at::Tensor & arctan_out(const at::Tensor & self, at::Tensor & out);
|
21 |
+
TORCH_API at::Tensor & arctan_(at::Tensor & self);
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor as_strided_tensorimpl(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
|
20 |
+
TORCH_API at::Tensor as_strided_tensorimpl_meta_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor as_strided_qtensorimpl(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
|
22 |
+
TORCH_API const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/asin_cuda_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor asin(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & asin_(at::Tensor & self);
|
24 |
+
|
25 |
+
} // namespace cuda
|
26 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1);
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> batch_norm_stats_cuda(const at::Tensor & input, double eps);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API binomial {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional<at::Generator>);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binomial")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API binomial_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional<at::Generator>, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binomial")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_cpu_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other);
|
21 |
+
TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
22 |
+
TORCH_API at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other);
|
24 |
+
|
25 |
+
} // namespace cpu
|
26 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_cpu_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & cauchy_(at::Tensor & self, double median=0, double sigma=1, c10::optional<at::Generator> generator=c10::nullopt);
|
21 |
+
|
22 |
+
} // namespace cpu
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cummin.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/cummin_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, int64_t dim) {
|
27 |
+
return at::_ops::cummin::call(self, dim);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
|
31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) {
|
32 |
+
return at::_ops::cummin_out::call(self, dim, values, indices);
|
33 |
+
}
|
34 |
+
// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
|
35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
|
36 |
+
return at::_ops::cummin_out::call(self, dim, values, indices);
|
37 |
+
}
|
38 |
+
|
39 |
+
// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
|
40 |
+
inline ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, at::Dimname dim) {
|
41 |
+
return at::_ops::cummin_dimname::call(self, dim);
|
42 |
+
}
|
43 |
+
|
44 |
+
// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
|
45 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) {
|
46 |
+
return at::_ops::cummin_dimname_out::call(self, dim, values, indices);
|
47 |
+
}
|
48 |
+
// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
|
49 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
|
50 |
+
return at::_ops::cummin_dimname_out::call(self, dim, values, indices);
|
51 |
+
}
|
52 |
+
|
53 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_bag_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API embedding_bag {
|
18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional<at::Tensor> &, bool);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_bag")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)")
|
24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset);
|
25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API embedding_bag_padding_idx {
|
29 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional<at::Tensor> &, bool, c10::optional<int64_t>);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_bag")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "padding_idx")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)")
|
35 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx);
|
36 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
#include <ATen/ops/erfc_meta.h>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
struct TORCH_API structured_erfc_out : public at::meta::structured_erfc {
|
20 |
+
void impl(const at::Tensor & self, const at::Tensor & out);
|
21 |
+
};
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/TensorIterator.h>
|
13 |
+
#include <ATen/TensorMeta.h>
|
14 |
+
#include <tuple>
|
15 |
+
#include <vector>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
struct TORCH_API structured_expm1 : public TensorIteratorBase {
|
21 |
+
|
22 |
+
|
23 |
+
void meta(const at::Tensor & self);
|
24 |
+
};
|
25 |
+
|
26 |
+
} // namespace native
|
27 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor fft_fft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
|
20 |
+
TORCH_API at::Tensor & fft_fft2_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cuda_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other);
|
21 |
+
TORCH_API at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
22 |
+
TORCH_API at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other);
|
24 |
+
|
25 |
+
} // namespace cuda
|
26 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API hardswish_backward {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish_backward(Tensor grad_output, Tensor self) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API hardswish_backward_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish_backward")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API hardswish_out {
|
18 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
24 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API hardswish {
|
29 |
+
using schema = at::Tensor (const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish(Tensor self) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & self);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API hardswish_ {
|
40 |
+
using schema = at::Tensor & (at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardswish_")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardswish_(Tensor(a!) self) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(at::Tensor & self);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride);
|
21 |
+
TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride);
|
22 |
+
TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/TensorIterator.h>
|
13 |
+
#include <ATen/TensorMeta.h>
|
14 |
+
#include <tuple>
|
15 |
+
#include <vector>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
struct TORCH_API structured_index_add : public at::impl::MetaBase {
|
21 |
+
|
22 |
+
template <bool DIM = false>
|
23 |
+
struct TORCH_API precompute_out {
|
24 |
+
|
25 |
+
precompute_out<true> set_dim(int64_t value) {
|
26 |
+
static_assert(DIM == false, "dim already set");
|
27 |
+
precompute_out<true> ret;
|
28 |
+
ret.dim = value;
|
29 |
+
return ret;
|
30 |
+
}
|
31 |
+
|
32 |
+
int64_t dim;
|
33 |
+
};
|
34 |
+
using meta_return_ty = precompute_out <true>;
|
35 |
+
meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha);
|
36 |
+
};
|
37 |
+
|
38 |
+
} // namespace native
|
39 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true);
|
21 |
+
TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true);
|
22 |
+
TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true);
|
24 |
+
|
25 |
+
} // namespace meta
|
26 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/kron_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor kron(const at::Tensor & self, const at::Tensor & other);
|
20 |
+
TORCH_API at::Tensor & kron_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_ops.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API less_equal_Scalar_out {
|
18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
|
24 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API less_equal_Scalar {
|
29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Scalar(Tensor self, Scalar other) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API less_equal_Tensor_out {
|
40 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
48 |
+
};
|
49 |
+
|
50 |
+
struct TORCH_API less_equal_Tensor {
|
51 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
52 |
+
using ptr_schema = schema*;
|
53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal")
|
55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal.Tensor(Tensor self, Tensor other) -> Tensor")
|
57 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API less_equal__Scalar {
|
62 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
63 |
+
using ptr_schema = schema*;
|
64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal_")
|
66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
|
68 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
|
69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
|
70 |
+
};
|
71 |
+
|
72 |
+
struct TORCH_API less_equal__Tensor {
|
73 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
74 |
+
using ptr_schema = schema*;
|
75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_equal_")
|
77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
|
79 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
|
80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
|
81 |
+
};
|
82 |
+
|
83 |
+
}} // namespace at::_ops
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false);
|
21 |
+
|
22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false);
|
21 |
+
|
22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor linalg_norm(const at::Tensor & self, const c10::optional<at::Scalar> & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor & linalg_norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
24 |
+
TORCH_API at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
25 |
+
TORCH_API at::Tensor & linalg_norm_outf(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
|
26 |
+
|
27 |
+
} // namespace compositeimplicitautograd
|
28 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
#include <ATen/ops/linalg_vector_norm_meta.h>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
struct TORCH_API structured_linalg_vector_norm_out : public at::meta::structured_linalg_vector_norm {
|
20 |
+
void impl(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, const at::Tensor & out);
|
21 |
+
};
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linspace_meta_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps);
|
21 |
+
TORCH_API at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace meta
|
24 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other);
|
21 |
+
TORCH_API at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
22 |
+
TORCH_API at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mT_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor mT(const at::Tensor & self);
|
21 |
+
|
22 |
+
} // namespace compositeimplicitautograd
|
23 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/matrix_exp_backward_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
|
26 |
+
inline at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) {
|
27 |
+
return at::_ops::matrix_exp_backward::call(self, grad);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor matrix_exp(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/median_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor median(const at::Tensor & self);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> median_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/mul.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/mul_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
|
26 |
+
inline at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
|
27 |
+
return at::_ops::mul_Tensor::call(self, other);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
|
32 |
+
return at::_ops::mul_out::call(self, other, out);
|
33 |
+
}
|
34 |
+
// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
|
36 |
+
return at::_ops::mul_out::call(self, other, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
|
40 |
+
inline at::Tensor mul(const at::Tensor & self, const at::Scalar & other) {
|
41 |
+
return at::_ops::mul_Scalar::call(self, other);
|
42 |
+
}
|
43 |
+
|
44 |
+
// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
45 |
+
inline at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
|
46 |
+
return at::_ops::mul_Scalar_out::call(self, other, out);
|
47 |
+
}
|
48 |
+
// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
49 |
+
inline at::Tensor & mul_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
|
50 |
+
return at::_ops::mul_Scalar_out::call(self, other, out);
|
51 |
+
}
|
52 |
+
|
53 |
+
}
|
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor nan_to_num(const at::Tensor & self, c10::optional<double> nan=c10::nullopt, c10::optional<double> posinf=c10::nullopt, c10::optional<double> neginf=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor & nan_to_num_(at::Tensor & self, c10::optional<double> nan=c10::nullopt, c10::optional<double> posinf=c10::nullopt, c10::optional<double> neginf=c10::nullopt);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|