Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/25.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/7.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/7.post_attention_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat_compositeexplicitautograd_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode.h +47 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs_ops.h +50 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_cuda_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_native.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_cpu_dispatch.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cuda_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_copy.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cuda_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_cuda_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_cpu_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_native.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/conj_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_backward.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_is_acceptable_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_meta_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_ops.h +83 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fix_compositeimplicitautograd_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h +33 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_meta.h +27 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_ops.h +83 -0
ckpts/universal/global_step20/zero/25.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39122033bfe84138317950076d478df6e20c809e8f342684738712e5d052153e
|
3 |
+
size 50332749
|
ckpts/universal/global_step20/zero/5.mlp.dense_h_to_4h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b041c55ed8ff1d9c4cda5d5495a5bd1d223f00ca7e9f2d59d814e95252b8888
|
3 |
+
size 33555533
|
ckpts/universal/global_step20/zero/7.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cbe65dce78f2e0dc0b6d389cc1480dc8bb4f85c6384845b2b5af7ec885864b76
|
3 |
+
size 9372
|
ckpts/universal/global_step20/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a91f022014fbee9b2e463ad6e76a58dbe987ad839d4ee386e5e17cd1badcb84e
|
3 |
+
size 9387
|
ckpts/universal/global_step20/zero/7.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb0d57cd46600d80cc4a638a82eb35022552e9f35e6b313cfc5eb47368582d16
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_outf(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim=false);
|
23 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1);
|
24 |
+
|
25 |
+
} // namespace compositeexplicitautograd
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _cast_Double {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, bool);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Double")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Double(Tensor self, bool non_blocking=False) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, bool non_blocking);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _chunk_cat(at::TensorList tensors, int64_t dim, int64_t num_chunks);
|
21 |
+
TORCH_API at::Tensor & _chunk_cat_out(at::Tensor & out, at::TensorList tensors, int64_t dim, int64_t num_chunks);
|
22 |
+
TORCH_API at::Tensor & _chunk_cat_outf(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace compositeexplicitautograd
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_mode.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_convolution_mode_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor
|
26 |
+
inline at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
|
27 |
+
return at::_ops::_convolution_mode::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups);
|
28 |
+
}
|
29 |
+
namespace symint {
|
30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
31 |
+
at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
|
32 |
+
return at::_ops::_convolution_mode::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups);
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor
|
37 |
+
inline at::Tensor _convolution_mode_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
38 |
+
return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
|
39 |
+
}
|
40 |
+
namespace symint {
|
41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
42 |
+
at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
43 |
+
return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API void cummax_helper_cpu(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim);
|
20 |
+
TORCH_API void cummax_helper_cuda(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_fft_r2c_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
|
26 |
+
inline at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
|
27 |
+
return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
|
32 |
+
return at::_ops::_fft_r2c_out::call(self, dim, normalization, onesided, out);
|
33 |
+
}
|
34 |
+
// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
|
36 |
+
return at::_ops::_fft_r2c_out::call(self, dim, normalization, onesided, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _foreach_abs {
|
18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_abs")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_abs(Tensor[] self) -> Tensor[]")
|
24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _foreach_abs_ {
|
29 |
+
using schema = void (at::TensorList);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_abs_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_abs_(Tensor(a!)[] self) -> ()")
|
35 |
+
static void call(at::TensorList self);
|
36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API _foreach_abs_out {
|
40 |
+
using schema = void (at::TensorList, at::TensorList);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_abs")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
46 |
+
static void call(at::TensorList self, at::TensorList out);
|
47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_cuda_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_lgamma(at::TensorList self);
|
21 |
+
TORCH_API void _foreach_lgamma_(at::TensorList self);
|
22 |
+
|
23 |
+
} // namespace cuda
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_native.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API void _foreach_sinh_out(at::TensorList self, at::TensorList out);
|
20 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sinh_slow(at::TensorList self);
|
21 |
+
TORCH_API void foreach_tensor_sinh_slow_(at::TensorList self);
|
22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sinh_cuda(at::TensorList self);
|
23 |
+
TORCH_API void foreach_tensor_sinh_cuda_(at::TensorList self);
|
24 |
+
} // namespace native
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub_cpu_dispatch.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_sub(at::TensorList self, const at::Scalar & scalar);
|
21 |
+
TORCH_API void _foreach_sub_(at::TensorList self, const at::Scalar & scalar);
|
22 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_sub(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1);
|
23 |
+
TORCH_API void _foreach_sub_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1);
|
24 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_sub(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
25 |
+
TORCH_API void _foreach_sub_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
26 |
+
|
27 |
+
} // namespace cpu
|
28 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _lazy_clone {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_lazy_clone")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_lazy_clone(Tensor self) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float);
|
21 |
+
TORCH_API at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float);
|
22 |
+
TORCH_API at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_nested_get_lengths_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_nested_get_lengths(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor _nested_get_lengths(const at::Tensor & self) {
|
27 |
+
return at::_ops::_nested_get_lengths::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_copy.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_nested_get_values_copy_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_nested_get_values_copy(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor _nested_get_values_copy(const at::Tensor & self) {
|
27 |
+
return at::_ops::_nested_get_values_copy::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _nested_get_values_copy_out(at::Tensor & out, const at::Tensor & self) {
|
32 |
+
return at::_ops::_nested_get_values_copy_out::call(self, out);
|
33 |
+
}
|
34 |
+
// aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _nested_get_values_copy_outf(const at::Tensor & self, at::Tensor & out) {
|
36 |
+
return at::_ops::_nested_get_values_copy_out::call(self, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API bool NestedTensor_nested_tensor_from_mask_left_aligned(const at::Tensor & t, const at::Tensor & mask);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_remove_batch_dim_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _remove_batch_dim {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_remove_batch_dim")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor _sparse_semi_structured_linear(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional<at::Tensor> & bias={}, c10::optional<c10::string_view> activation=c10::nullopt, c10::optional<at::ScalarType> out_dtype=c10::nullopt);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_cuda_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _to_sparse(const at::Tensor & self, int64_t sparse_dim);
|
21 |
+
TORCH_API at::Tensor _to_sparse(const at::Tensor & self, c10::optional<at::Layout> layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional<int64_t> dense_dim=c10::nullopt);
|
22 |
+
|
23 |
+
} // namespace cuda
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API bool _use_cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank);
|
20 |
+
TORCH_API bool _use_cudnn_ctc_loss_tensor(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_cpu_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Generator> generator=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor & bernoulli_outf(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out);
|
22 |
+
TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator=c10::nullopt);
|
23 |
+
TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, c10::optional<at::Generator> generator=c10::nullopt);
|
24 |
+
|
25 |
+
} // namespace cpu
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor binary_cross_entropy_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
|
20 |
+
TORCH_API at::Tensor & binary_cross_entropy_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input);
|
21 |
+
TORCH_API at::Tensor binary_cross_entropy_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
|
22 |
+
TORCH_API at::Tensor & binary_cross_entropy_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API binary_cross_entropy {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, int64_t);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API binary_cross_entropy_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, int64_t, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor broadcast_to(const at::Tensor & self, at::IntArrayRef size);
|
21 |
+
TORCH_API at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size);
|
22 |
+
|
23 |
+
} // namespace compositeimplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/ccol_indices_copy_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::ccol_indices_copy(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor ccol_indices_copy(const at::Tensor & self) {
|
27 |
+
return at::_ops::ccol_indices_copy::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & ccol_indices_copy_out(at::Tensor & out, const at::Tensor & self) {
|
32 |
+
return at::_ops::ccol_indices_copy_out::call(self, out);
|
33 |
+
}
|
34 |
+
// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & ccol_indices_copy_outf(const at::Tensor & self, at::Tensor & out) {
|
36 |
+
return at::_ops::ccol_indices_copy_out::call(self, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor coalesce(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/conj_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor conj(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_backward.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
|
27 |
+
return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
|
32 |
+
return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
|
33 |
+
}
|
34 |
+
// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_outf(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
|
36 |
+
return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_is_acceptable_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API cudnn_is_acceptable {
|
18 |
+
using schema = bool (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_is_acceptable")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_is_acceptable(Tensor self) -> bool")
|
24 |
+
static bool call(const at::Tensor & self);
|
25 |
+
static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_meta_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor cumsum(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & cumsum_(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
24 |
+
|
25 |
+
} // namespace meta
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_meta_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & exponential_(at::Tensor & self, double lambd=1, c10::optional<at::Generator> generator=c10::nullopt);
|
21 |
+
|
22 |
+
} // namespace meta
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor fft_ihfft_symint(const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
20 |
+
TORCH_API at::Tensor & fft_ihfft_symint_out(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_ops.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API fill_Scalar {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Scalar(Tensor self, Scalar value) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & value);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API fill_Tensor {
|
29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Tensor(Tensor self, Tensor value) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & value);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API fill__Scalar {
|
40 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill_")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & value);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value);
|
48 |
+
};
|
49 |
+
|
50 |
+
struct TORCH_API fill__Tensor {
|
51 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
52 |
+
using ptr_schema = schema*;
|
53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill_")
|
55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)")
|
57 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & value);
|
58 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value);
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API fill_Scalar_out {
|
62 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
63 |
+
using ptr_schema = schema*;
|
64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)")
|
68 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & value, at::Tensor & out);
|
69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out);
|
70 |
+
};
|
71 |
+
|
72 |
+
struct TORCH_API fill_Tensor_out {
|
73 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
74 |
+
using ptr_schema = schema*;
|
75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill")
|
77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
|
78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)")
|
79 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & value, at::Tensor & out);
|
80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out);
|
81 |
+
};
|
82 |
+
|
83 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fix_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor fix(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & fix_out(at::Tensor & out, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & fix_outf(const at::Tensor & self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & fix_(at::Tensor & self);
|
24 |
+
|
25 |
+
} // namespace compositeimplicitautograd
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent);
|
21 |
+
TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent);
|
22 |
+
TORCH_API at::Tensor & float_power_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Tensor & exponent);
|
24 |
+
TORCH_API at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent);
|
25 |
+
TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent);
|
26 |
+
TORCH_API at::Tensor & float_power_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out);
|
27 |
+
TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent);
|
28 |
+
TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent);
|
29 |
+
TORCH_API at::Tensor & float_power_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out);
|
30 |
+
TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Scalar & exponent);
|
31 |
+
|
32 |
+
} // namespace compositeimplicitautograd
|
33 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_meta.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/TensorIterator.h>
|
13 |
+
#include <ATen/TensorMeta.h>
|
14 |
+
#include <tuple>
|
15 |
+
#include <vector>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
struct TORCH_API structured_gelu_backward : public TensorIteratorBase {
|
21 |
+
|
22 |
+
|
23 |
+
void meta(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate);
|
24 |
+
};
|
25 |
+
|
26 |
+
} // namespace native
|
27 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & glu_jvp_out(at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim);
|
21 |
+
TORCH_API at::Tensor & glu_jvp_outf(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_meta_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & hardswish_(at::Tensor & self);
|
21 |
+
|
22 |
+
} // namespace meta
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor im2col_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride);
|
20 |
+
TORCH_API at::Tensor & im2col_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out);
|
21 |
+
TORCH_API at::Tensor im2col_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride);
|
22 |
+
TORCH_API at::Tensor & im2col_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor imag(const at::Tensor & self);
|
21 |
+
|
22 |
+
} // namespace compositeimplicitautograd
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/isin_ops.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API isin_Tensor_Tensor_out {
|
18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)")
|
24 |
+
static at::Tensor & call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out);
|
25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API isin_Tensor_Tensor {
|
29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API isin_Tensor_Scalar_out {
|
40 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out);
|
48 |
+
};
|
49 |
+
|
50 |
+
struct TORCH_API isin_Tensor_Scalar {
|
51 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool);
|
52 |
+
using ptr_schema = schema*;
|
53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin")
|
55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar")
|
56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor")
|
57 |
+
static at::Tensor call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert);
|
58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert);
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API isin_Scalar_Tensor_out {
|
62 |
+
using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &);
|
63 |
+
using ptr_schema = schema*;
|
64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin")
|
66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out")
|
67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)")
|
68 |
+
static at::Tensor & call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out);
|
69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out);
|
70 |
+
};
|
71 |
+
|
72 |
+
struct TORCH_API isin_Scalar_Tensor {
|
73 |
+
using schema = at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool);
|
74 |
+
using ptr_schema = schema*;
|
75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin")
|
77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor")
|
78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor")
|
79 |
+
static at::Tensor call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert);
|
80 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert);
|
81 |
+
};
|
82 |
+
|
83 |
+
}} // namespace at::_ops
|