Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode_native.h +21 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss.h +53 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size.h +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cuda_dispatch.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp.h +63 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h +50 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h +50 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h +116 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_cuda_dispatch.h +24 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp_cuda_dispatch.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_ops.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_backward_ops.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cuda_dispatch.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_ops.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_ops.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_cuda_dispatch.h +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_ops.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat.h +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_native.h +24 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_cuda_dispatch.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cpu_dispatch.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_ops.h +83 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/ger_ops.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/inverse.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h +26 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isin_native.h +29 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_native.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/movedim_compositeimplicitautograd_dispatch.h +24 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_ops.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/negative_native.h +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr_native.h +21 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _convolution_mode_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_ctc_loss_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) {
|
27 |
+
return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
|
31 |
+
inline ::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) {
|
32 |
+
return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
|
33 |
+
}
|
34 |
+
|
35 |
+
// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
36 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) {
|
37 |
+
return at::_ops::_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
|
38 |
+
}
|
39 |
+
// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
40 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
|
41 |
+
return at::_ops::_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
|
42 |
+
}
|
43 |
+
|
44 |
+
// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
45 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) {
|
46 |
+
return at::_ops::_ctc_loss_Tensor_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
|
47 |
+
}
|
48 |
+
// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
49 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
|
50 |
+
return at::_ops::_ctc_loss_Tensor_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
|
51 |
+
}
|
52 |
+
|
53 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()
|
26 |
+
inline void _cufft_set_plan_cache_max_size(at::DeviceIndex device_index, int64_t max_size) {
|
27 |
+
return at::_ops::_cufft_set_plan_cache_max_size::call(device_index, max_size);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_cummin_helper_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1);
|
21 |
+
|
22 |
+
} // namespace cpu
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp.h
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_foreach_lerp_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
|
26 |
+
inline ::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
|
27 |
+
return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
|
31 |
+
inline void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
|
32 |
+
return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
|
33 |
+
}
|
34 |
+
|
35 |
+
// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
|
36 |
+
inline ::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
|
37 |
+
return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
|
38 |
+
}
|
39 |
+
|
40 |
+
// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
|
41 |
+
inline void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
|
42 |
+
return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
|
43 |
+
}
|
44 |
+
|
45 |
+
// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
|
46 |
+
inline void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
|
47 |
+
return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
|
48 |
+
}
|
49 |
+
// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
|
50 |
+
inline void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
|
51 |
+
return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
|
52 |
+
}
|
53 |
+
|
54 |
+
// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
|
55 |
+
inline void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
|
56 |
+
return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
|
57 |
+
}
|
58 |
+
// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
|
59 |
+
inline void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
|
60 |
+
return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
|
61 |
+
}
|
62 |
+
|
63 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _foreach_sinh {
|
18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh(Tensor[] self) -> Tensor[]")
|
24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _foreach_sinh_ {
|
29 |
+
using schema = void (at::TensorList);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh_(Tensor(a!)[] self) -> ()")
|
35 |
+
static void call(at::TensorList self);
|
36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API _foreach_sinh_out {
|
40 |
+
using schema = void (at::TensorList, at::TensorList);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
46 |
+
static void call(at::TensorList self, at::TensorList out);
|
47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _foreach_sqrt {
|
18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt(Tensor[] self) -> Tensor[]")
|
24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _foreach_sqrt_ {
|
29 |
+
using schema = void (at::TensorList);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt_(Tensor(a!)[] self) -> ()")
|
35 |
+
static void call(at::TensorList self);
|
36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API _foreach_sqrt_out {
|
40 |
+
using schema = void (at::TensorList, at::TensorList);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
46 |
+
static void call(at::TensorList self, at::TensorList out);
|
47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _foreach_sub_Scalar {
|
18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList, const at::Scalar &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]")
|
24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self, const at::Scalar & scalar);
|
25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _foreach_sub__Scalar {
|
29 |
+
using schema = void (at::TensorList, const at::Scalar &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()")
|
35 |
+
static void call(at::TensorList self, const at::Scalar & scalar);
|
36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API _foreach_sub_List {
|
40 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList, at::TensorList, const at::Scalar &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]")
|
46 |
+
static ::std::vector<at::Tensor> call(at::TensorList self, at::TensorList other, const at::Scalar & alpha);
|
47 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha);
|
48 |
+
};
|
49 |
+
|
50 |
+
struct TORCH_API _foreach_sub__List {
|
51 |
+
using schema = void (at::TensorList, at::TensorList, const at::Scalar &);
|
52 |
+
using ptr_schema = schema*;
|
53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_")
|
55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List")
|
56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()")
|
57 |
+
static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha);
|
58 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha);
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API _foreach_sub_ScalarList {
|
62 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList, at::ArrayRef<at::Scalar>);
|
63 |
+
using ptr_schema = schema*;
|
64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub")
|
66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList")
|
67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]")
|
68 |
+
static ::std::vector<at::Tensor> call(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
69 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
70 |
+
};
|
71 |
+
|
72 |
+
struct TORCH_API _foreach_sub__ScalarList {
|
73 |
+
using schema = void (at::TensorList, at::ArrayRef<at::Scalar>);
|
74 |
+
using ptr_schema = schema*;
|
75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_")
|
77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList")
|
78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()")
|
79 |
+
static void call(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
80 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
81 |
+
};
|
82 |
+
|
83 |
+
struct TORCH_API _foreach_sub_Scalar_out {
|
84 |
+
using schema = void (at::TensorList, const at::Scalar &, at::TensorList);
|
85 |
+
using ptr_schema = schema*;
|
86 |
+
// See Note [static constexpr char* members for windows NVCC]
|
87 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub")
|
88 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
89 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()")
|
90 |
+
static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out);
|
91 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out);
|
92 |
+
};
|
93 |
+
|
94 |
+
struct TORCH_API _foreach_sub_List_out {
|
95 |
+
using schema = void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList);
|
96 |
+
using ptr_schema = schema*;
|
97 |
+
// See Note [static constexpr char* members for windows NVCC]
|
98 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub")
|
99 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out")
|
100 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()")
|
101 |
+
static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out);
|
102 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out);
|
103 |
+
};
|
104 |
+
|
105 |
+
struct TORCH_API _foreach_sub_ScalarList_out {
|
106 |
+
using schema = void (at::TensorList, at::ArrayRef<at::Scalar>, at::TensorList);
|
107 |
+
using ptr_schema = schema*;
|
108 |
+
// See Note [static constexpr char* members for windows NVCC]
|
109 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub")
|
110 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out")
|
111 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()")
|
112 |
+
static void call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out);
|
113 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out);
|
114 |
+
};
|
115 |
+
|
116 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_cuda_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_tanh(at::TensorList self);
|
21 |
+
TORCH_API void _foreach_tanh_(at::TensorList self);
|
22 |
+
|
23 |
+
} // namespace cuda
|
24 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _index_put_impl(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false);
|
21 |
+
TORCH_API at::Tensor & _index_put_impl_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false);
|
22 |
+
TORCH_API at::Tensor & _index_put_impl_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace compositeexplicitautograd
|
25 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_logcumsumexp_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _logcumsumexp(const at::Tensor & self, int64_t dim);
|
21 |
+
TORCH_API at::Tensor & _logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim);
|
22 |
+
TORCH_API at::Tensor & _logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _masked_softmax_backward {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional<int64_t>);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_masked_softmax_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _masked_softmax_backward_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional<int64_t>, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_masked_softmax_backward")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & _mps_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups);
|
21 |
+
TORCH_API at::Tensor & _mps_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out);
|
22 |
+
TORCH_API at::Tensor & _mps_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups);
|
23 |
+
TORCH_API at::Tensor & _mps_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out);
|
24 |
+
|
25 |
+
} // namespace compositeexplicitautograd
|
26 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_backward_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _pack_padded_sequence_backward {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, const at::Tensor &, bool);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _test_autograd_multiple_dispatch_view {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads);
|
21 |
+
|
22 |
+
} // namespace cpu
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_unsafe_index_put_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
|
26 |
+
inline at::Tensor _unsafe_index_put(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) {
|
27 |
+
return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor acosh(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & acosh_(at::Tensor & self);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
24 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/addbmm_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
26 |
+
inline at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
|
27 |
+
return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out);
|
28 |
+
}
|
29 |
+
// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
30 |
+
inline at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
|
31 |
+
return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
|
35 |
+
inline at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
|
36 |
+
return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API align_tensors {
|
18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::align_tensors")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "align_tensors(Tensor[] tensors) -> Tensor[]")
|
24 |
+
static ::std::vector<at::Tensor> call(at::TensorList tensors);
|
25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim=false);
|
21 |
+
TORCH_API at::Tensor all(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false);
|
22 |
+
TORCH_API at::Tensor all(const at::Tensor & self);
|
23 |
+
|
24 |
+
} // namespace compositeexplicitautogradnonfunctional
|
25 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_elemt.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/batch_norm_elemt_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
|
26 |
+
inline at::Tensor batch_norm_elemt(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
|
27 |
+
return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & batch_norm_elemt_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
|
32 |
+
return at::_ops::batch_norm_elemt_out::call(input, weight, bias, mean, invstd, eps, out);
|
33 |
+
}
|
34 |
+
// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & batch_norm_elemt_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
|
36 |
+
return at::_ops::batch_norm_elemt_out::call(input, weight, bias, mean, invstd, eps, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_cuda_dispatch.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max);
|
21 |
+
TORCH_API at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max);
|
22 |
+
TORCH_API at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max);
|
24 |
+
TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max);
|
25 |
+
TORCH_API at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max);
|
26 |
+
TORCH_API at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out);
|
27 |
+
TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max);
|
28 |
+
|
29 |
+
} // namespace cuda
|
30 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API conv_transpose2d_input {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, c10::SymIntArrayRef);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_transpose2d")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/diagflat_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::diagflat(Tensor self, int offset=0) -> Tensor
|
26 |
+
inline at::Tensor diagflat(const at::Tensor & self, int64_t offset=0) {
|
27 |
+
return at::_ops::diagflat::call(self, offset);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
|
21 |
+
TORCH_API at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out);
|
22 |
+
TORCH_API at::Tensor & embedding_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq);
|
23 |
+
TORCH_API at::Tensor & embedding_dense_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out);
|
24 |
+
|
25 |
+
} // namespace compositeexplicitautograd
|
26 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor embedding_renorm(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type);
|
20 |
+
TORCH_API at::Tensor & embedding_renorm_out(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out);
|
21 |
+
TORCH_API at::Tensor & embedding_renorm_cpu_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type);
|
22 |
+
TORCH_API at::Tensor & embedding_renorm_cuda_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_cuda_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor erfinv(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & erfinv_(at::Tensor & self);
|
24 |
+
|
25 |
+
} // namespace cuda
|
26 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cpu_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor expm1(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & expm1_(at::Tensor & self);
|
24 |
+
|
25 |
+
} // namespace cpu
|
26 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
|
27 |
+
return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
|
32 |
+
return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self, scale, zero_point, quant_min, quant_max, out0, out1);
|
33 |
+
}
|
34 |
+
// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_outf(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
|
36 |
+
return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self, scale, zero_point, quant_min, quant_max, out0, out1);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_ops.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API floor_divide {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide(Tensor self, Tensor other) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API floor_divide__Tensor {
|
29 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API floor_divide_out {
|
40 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
48 |
+
};
|
49 |
+
|
50 |
+
struct TORCH_API floor_divide_Scalar {
|
51 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
52 |
+
using ptr_schema = schema*;
|
53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide")
|
55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.Scalar(Tensor self, Scalar other) -> Tensor")
|
57 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
|
58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API floor_divide__Scalar {
|
62 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
63 |
+
using ptr_schema = schema*;
|
64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide_")
|
66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
|
68 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
|
69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
|
70 |
+
};
|
71 |
+
|
72 |
+
struct TORCH_API floor_divide_Scalar_out {
|
73 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
74 |
+
using ptr_schema = schema*;
|
75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide")
|
77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
|
79 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
81 |
+
};
|
82 |
+
|
83 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/ger_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API ger {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ger")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ger(Tensor self, Tensor vec2) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & vec2);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API ger_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ger")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
|
21 |
+
|
22 |
+
} // namespace cpu
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true);
|
21 |
+
TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true);
|
22 |
+
TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true);
|
24 |
+
|
25 |
+
} // namespace cpu
|
26 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/inverse.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/inverse_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::inverse(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor inverse(const at::Tensor & self) {
|
27 |
+
return at::_ops::inverse::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) {
|
32 |
+
return at::_ops::inverse_out::call(self, out);
|
33 |
+
}
|
34 |
+
// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) {
|
36 |
+
return at::_ops::inverse_out::call(self, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API is_distributed {
|
18 |
+
using schema = bool (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_distributed")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_distributed(Tensor self) -> bool")
|
24 |
+
static bool call(const at::Tensor & self);
|
25 |
+
static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/is_set_to_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isin_native.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
#include <ATen/ops/isin_meta.h>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
struct TORCH_API structured_isin_Tensor_Tensor_out : public at::meta::structured_isin_Tensor_Tensor {
|
20 |
+
void impl(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, const at::Tensor & out);
|
21 |
+
};
|
22 |
+
struct TORCH_API structured_isin_Tensor_Scalar_out : public at::meta::structured_isin_Tensor_Scalar {
|
23 |
+
void impl(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, const at::Tensor & out);
|
24 |
+
};
|
25 |
+
struct TORCH_API structured_isin_Scalar_Tensor_out : public at::meta::structured_isin_Scalar_Tensor {
|
26 |
+
void impl(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, const at::Tensor & out);
|
27 |
+
};
|
28 |
+
} // namespace native
|
29 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/isnan_cpu_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor isnan(const at::Tensor & self);
|
21 |
+
|
22 |
+
} // namespace cpu
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/lgamma_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
26 |
+
inline at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self) {
|
27 |
+
return at::_ops::lgamma_out::call(self, out);
|
28 |
+
}
|
29 |
+
// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
30 |
+
inline at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out) {
|
31 |
+
return at::_ops::lgamma_out::call(self, out);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::lgamma(Tensor self) -> Tensor
|
35 |
+
inline at::Tensor lgamma(const at::Tensor & self) {
|
36 |
+
return at::_ops::lgamma::call(self);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_det.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/linalg_det_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::linalg_det(Tensor A) -> Tensor
|
26 |
+
inline at::Tensor linalg_det(const at::Tensor & A) {
|
27 |
+
return at::_ops::linalg_det::call(A);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & linalg_det_out(at::Tensor & out, const at::Tensor & A) {
|
32 |
+
return at::_ops::linalg_det_out::call(A, out);
|
33 |
+
}
|
34 |
+
// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & linalg_det_outf(const at::Tensor & A, at::Tensor & out) {
|
36 |
+
return at::_ops::linalg_det_out::call(A, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API linalg_lu_factor {
|
18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, bool);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu_factor")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)")
|
24 |
+
static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & A, bool pivot);
|
25 |
+
static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API linalg_lu_factor_out {
|
29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, bool, at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lu_factor")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)")
|
35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots);
|
36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor logical_or(const at::Tensor & self, const at::Tensor & other);
|
20 |
+
TORCH_API at::Tensor & logical_or_(at::Tensor & self, const at::Tensor & other);
|
21 |
+
TORCH_API at::Tensor & logical_or_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/movedim_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination);
|
21 |
+
TORCH_API at::Tensor movedim(const at::Tensor & self, int64_t source, int64_t destination);
|
22 |
+
|
23 |
+
} // namespace compositeimplicitautograd
|
24 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API nansum {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional<at::ScalarType>);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nansum")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API nansum_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, c10::optional<at::ScalarType>, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nansum")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor neg(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & neg_(at::Tensor & self);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
24 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/negative_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor negative(const at::Tensor & self);
|
20 |
+
TORCH_API at::Tensor & negative_out(const at::Tensor & self, at::Tensor & out);
|
21 |
+
TORCH_API at::Tensor & negative_(at::Tensor & self);
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/output_nr_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API int64_t output_nr(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|