Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/1.word_embeddings.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_cuda_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h +113 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_backward_cuda_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf_ops.h +50 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d.h +113 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_cpu_dispatch.h +31 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_cpu_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cat_cuda_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_add_relu_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_compositeexplicitautograd_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h +43 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta.h +27 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_multi_dot_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_ops.h +50 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_ops.h +50 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_cuda_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy.h +91 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_native.h +22 -0
ckpts/universal/global_step20/zero/1.word_embeddings.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67e20200eeebb8128387f0497f997e57547048b1c059f1cf591167ef66fd0bee
|
3 |
+
size 415237404
|
ckpts/universal/global_step20/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ba11e03757ce3b88c131db1eaaa66c0c1cbce23b8f082583cc5517251199dd9
|
3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d5e035113b0195b712f707a90e51f9727886cc5dcbdf74de85b400cb1d79986
|
3 |
+
size 33555533
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & _add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1);
|
21 |
+
TORCH_API at::Tensor & _add_relu_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API int64_t _cslt_sparse_mm_search(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional<at::Tensor> & bias={}, const c10::optional<at::Tensor> & alpha={}, c10::optional<at::ScalarType> out_dtype=c10::nullopt, bool transpose_result=false);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
|
26 |
+
inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
27 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
|
28 |
+
}
|
29 |
+
namespace symint {
|
30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
31 |
+
at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
32 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
|
37 |
+
inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
38 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
|
39 |
+
}
|
40 |
+
namespace symint {
|
41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
42 |
+
at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
43 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
|
48 |
+
inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
49 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
|
50 |
+
}
|
51 |
+
namespace symint {
|
52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
53 |
+
at::Tensor _empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
54 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
|
59 |
+
inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
60 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
|
61 |
+
}
|
62 |
+
namespace symint {
|
63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
64 |
+
at::Tensor _empty_per_channel_affine_quantized(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
65 |
+
return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
|
66 |
+
}
|
67 |
+
}
|
68 |
+
|
69 |
+
// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
|
70 |
+
inline at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
71 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out);
|
72 |
+
}
|
73 |
+
namespace symint {
|
74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
75 |
+
at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
76 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out);
|
77 |
+
}
|
78 |
+
}
|
79 |
+
|
80 |
+
// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
|
81 |
+
inline at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
|
82 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out);
|
83 |
+
}
|
84 |
+
namespace symint {
|
85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
86 |
+
at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
|
87 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scales, zero_points, axis, memory_format, out);
|
88 |
+
}
|
89 |
+
}
|
90 |
+
|
91 |
+
// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
|
92 |
+
inline at::Tensor & _empty_per_channel_affine_quantized_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
93 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out);
|
94 |
+
}
|
95 |
+
namespace symint {
|
96 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
97 |
+
at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
|
98 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out);
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
|
103 |
+
inline at::Tensor & _empty_per_channel_affine_quantized_symint_outf(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
|
104 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out);
|
105 |
+
}
|
106 |
+
namespace symint {
|
107 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
108 |
+
at::Tensor & _empty_per_channel_affine_quantized_outf(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
|
109 |
+
return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out);
|
110 |
+
}
|
111 |
+
}
|
112 |
+
|
113 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous);
|
21 |
+
TORCH_API at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format);
|
22 |
+
TORCH_API at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous);
|
23 |
+
TORCH_API at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format);
|
24 |
+
|
25 |
+
} // namespace cpu
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_backward_cuda_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional<double> scale=c10::nullopt);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward_symint(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional<double> scale=c10::nullopt);
|
22 |
+
|
23 |
+
} // namespace cuda
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API void _foreach_asin_out(at::TensorList out, at::TensorList self);
|
21 |
+
TORCH_API void _foreach_asin_outf(at::TensorList self, at::TensorList out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _foreach_erf {
|
18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erf")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erf(Tensor[] self) -> Tensor[]")
|
24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _foreach_erf_ {
|
29 |
+
using schema = void (at::TensorList);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erf_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erf_(Tensor(a!)[] self) -> ()")
|
35 |
+
static void call(at::TensorList self);
|
36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API _foreach_erf_out {
|
40 |
+
using schema = void (at::TensorList, at::TensorList);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erf")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
46 |
+
static void call(at::TensorList self, at::TensorList out);
|
47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API void _foreach_frac_out(at::TensorList out, at::TensorList self);
|
21 |
+
TORCH_API void _foreach_frac_outf(at::TensorList self, at::TensorList out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & _mps_convolution_out_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, c10::optional<double> scale=c10::nullopt) {
|
27 |
+
return at::_ops::_scaled_dot_product_cudnn_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask, scale);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1);
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell_cuda(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias={}, const c10::optional<at::Tensor> & hidden_bias={});
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d.h
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_upsample_nearest_exact1d_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
|
26 |
+
inline at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
27 |
+
return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
|
28 |
+
}
|
29 |
+
namespace symint {
|
30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
31 |
+
at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
32 |
+
return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
|
37 |
+
inline at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
38 |
+
return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
|
39 |
+
}
|
40 |
+
namespace symint {
|
41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
42 |
+
at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
43 |
+
return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
|
48 |
+
inline at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
49 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
|
50 |
+
}
|
51 |
+
namespace symint {
|
52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
53 |
+
at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
54 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
|
59 |
+
inline at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
|
60 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
|
61 |
+
}
|
62 |
+
namespace symint {
|
63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
64 |
+
at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
|
65 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
|
66 |
+
}
|
67 |
+
}
|
68 |
+
|
69 |
+
// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
|
70 |
+
inline at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
71 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
|
72 |
+
}
|
73 |
+
namespace symint {
|
74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
75 |
+
at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
76 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
|
77 |
+
}
|
78 |
+
}
|
79 |
+
|
80 |
+
// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
|
81 |
+
inline at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
|
82 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
|
83 |
+
}
|
84 |
+
namespace symint {
|
85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
86 |
+
at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
|
87 |
+
return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
|
88 |
+
}
|
89 |
+
}
|
90 |
+
|
91 |
+
// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
|
92 |
+
inline at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
93 |
+
return at::_ops::_upsample_nearest_exact1d::call(self, c10::fromIntArrayRefSlow(output_size), scales);
|
94 |
+
}
|
95 |
+
namespace symint {
|
96 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
97 |
+
at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
98 |
+
return at::_ops::_upsample_nearest_exact1d::call(self, c10::fromIntArrayRefSlow(output_size), scales);
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
|
103 |
+
inline at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
104 |
+
return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
|
105 |
+
}
|
106 |
+
namespace symint {
|
107 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
108 |
+
at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
|
109 |
+
return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
|
110 |
+
}
|
111 |
+
}
|
112 |
+
|
113 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
26 |
+
|
27 |
+
} // namespace cpu
|
28 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _values {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_values")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_values(Tensor(a) self) -> Tensor(a)")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_weight_int8pack_mm_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor
|
26 |
+
inline at::Tensor _weight_int8pack_mm(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) {
|
27 |
+
return at::_ops::_weight_int8pack_mm::call(self, mat2, scales);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/any_cpu_dispatch.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim=false);
|
21 |
+
TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false);
|
22 |
+
TORCH_API at::Tensor & any_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor any(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false);
|
24 |
+
TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false);
|
25 |
+
TORCH_API at::Tensor & any_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out);
|
26 |
+
TORCH_API at::Tensor any(const at::Tensor & self);
|
27 |
+
TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self);
|
28 |
+
TORCH_API at::Tensor & any_outf(const at::Tensor & self, at::Tensor & out);
|
29 |
+
|
30 |
+
} // namespace cpu
|
31 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt);
|
21 |
+
TORCH_API at::Tensor & avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt);
|
22 |
+
TORCH_API at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cat_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim=0);
|
21 |
+
TORCH_API at::Tensor & cat_out(at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim=0);
|
22 |
+
TORCH_API at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/ccol_indices_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & conv_depthwise3d_out_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor conv_depthwise3d_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_add_relu_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API cudnn_convolution_add_relu {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Scalar> &, const c10::optional<at::Tensor> &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_add_relu")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API cudnn_convolution_add_relu_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Scalar> &, const c10::optional<at::Tensor> &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_add_relu")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/diag_embed_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
|
26 |
+
inline at::Tensor diag_embed(const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) {
|
27 |
+
return at::_ops::diag_embed::call(self, offset, dim1, dim2);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & diag_embed_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) {
|
32 |
+
return at::_ops::diag_embed_out::call(self, offset, dim1, dim2, out);
|
33 |
+
}
|
34 |
+
// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & diag_embed_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
|
36 |
+
return at::_ops::diag_embed_out::call(self, offset, dim1, dim2, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & diagonal_copy_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor diagonal_copy(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
|
20 |
+
TORCH_API at::Tensor & empty_permuted_out_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other);
|
21 |
+
TORCH_API at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
|
22 |
+
TORCH_API at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Scalar & other);
|
24 |
+
|
25 |
+
} // namespace compositeexplicitautograd
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API fractional_max_pool3d_output {
|
18 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool3d")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))")
|
24 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices);
|
25 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API fractional_max_pool3d {
|
29 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool3d")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)")
|
35 |
+
static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples);
|
36 |
+
static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/from_file_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
26 |
+
inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared=c10::nullopt, c10::optional<int64_t> size=0, at::TensorOptions options={}) {
|
27 |
+
return at::_ops::from_file::call(filename, shared, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
28 |
+
}
|
29 |
+
// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
30 |
+
inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
31 |
+
return at::_ops::from_file::call(filename, shared, size, dtype, layout, device, pin_memory);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, c10::optional<bool> shared=c10::nullopt, c10::optional<int64_t> size=0) {
|
36 |
+
return at::_ops::from_file_out::call(filename, shared, size, out);
|
37 |
+
}
|
38 |
+
// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
|
39 |
+
inline at::Tensor & from_file_outf(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) {
|
40 |
+
return at::_ops::from_file_out::call(filename, shared, size, out);
|
41 |
+
}
|
42 |
+
|
43 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_meta.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/TensorIterator.h>
|
13 |
+
#include <ATen/TensorMeta.h>
|
14 |
+
#include <tuple>
|
15 |
+
#include <vector>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
struct TORCH_API structured_gcd : public TensorIteratorBase {
|
21 |
+
|
22 |
+
|
23 |
+
void meta(const at::Tensor & self, const at::Tensor & other);
|
24 |
+
};
|
25 |
+
|
26 |
+
} // namespace native
|
27 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
|
21 |
+
|
22 |
+
} // namespace cuda
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_meta_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input);
|
23 |
+
|
24 |
+
} // namespace meta
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor hstack(at::TensorList tensors);
|
21 |
+
TORCH_API at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors);
|
22 |
+
TORCH_API at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace compositeimplicitautograd
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/huber_loss_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
|
26 |
+
inline at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) {
|
27 |
+
return at::_ops::huber_loss_out::call(self, target, reduction, delta, out);
|
28 |
+
}
|
29 |
+
// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
|
30 |
+
inline at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
|
31 |
+
return at::_ops::huber_loss_out::call(self, target, reduction, delta, out);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
|
35 |
+
inline at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) {
|
36 |
+
return at::_ops::huber_loss::call(self, target, reduction, delta);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor isfinite(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API linalg_inv_ex {
|
18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, bool);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv_ex")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)")
|
24 |
+
static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & A, bool check_errors);
|
25 |
+
static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API linalg_inv_ex_inverse {
|
29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, bool, at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv_ex")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "inverse")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)")
|
35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info);
|
36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_native.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
#include <ATen/ops/linalg_lu_solve_meta.h>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
struct TORCH_API structured_linalg_lu_solve_out : public at::meta::structured_linalg_lu_solve {
|
20 |
+
void impl(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, const at::Tensor & out);
|
21 |
+
};
|
22 |
+
} // namespace native
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_multi_dot_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor linalg_multi_dot(at::TensorList tensors);
|
20 |
+
TORCH_API at::Tensor & linalg_multi_dot_out(at::TensorList tensors, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2);
|
20 |
+
TORCH_API at::Tensor & linalg_tensorinv_out(const at::Tensor & self, int64_t ind, at::Tensor & out);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API log {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log(Tensor self) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API log_ {
|
29 |
+
using schema = at::Tensor & (at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_(Tensor(a!) self) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(at::Tensor & self);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API log_out {
|
40 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API logical_and {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_and")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_and(Tensor self, Tensor other) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API logical_and_ {
|
29 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_and_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API logical_and_out {
|
40 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_and")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & mkldnn_linear_backward_input_out(at::Tensor & out, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight);
|
21 |
+
TORCH_API at::Tensor & mkldnn_linear_backward_input_outf(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/mkldnn_max_pool3d_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
|
26 |
+
inline at::Tensor mkldnn_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
|
27 |
+
return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & mkldnn_max_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
|
32 |
+
return at::_ops::mkldnn_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
|
33 |
+
}
|
34 |
+
// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & mkldnn_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
|
36 |
+
return at::_ops::mkldnn_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean);
|
21 |
+
TORCH_API at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean);
|
22 |
+
TORCH_API at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy.h
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/narrow_copy_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
|
26 |
+
inline at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
|
27 |
+
return at::_ops::narrow_copy::call(self, dim, start, length);
|
28 |
+
}
|
29 |
+
namespace symint {
|
30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
31 |
+
at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
|
32 |
+
return at::_ops::narrow_copy::call(self, dim, start, length);
|
33 |
+
}
|
34 |
+
}
|
35 |
+
|
36 |
+
// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
|
37 |
+
inline at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
|
38 |
+
return at::_ops::narrow_copy::call(self, dim, start, length);
|
39 |
+
}
|
40 |
+
namespace symint {
|
41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
42 |
+
at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
|
43 |
+
return at::_ops::narrow_copy::call(self, dim, start, length);
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
|
48 |
+
inline at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
|
49 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
50 |
+
}
|
51 |
+
namespace symint {
|
52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
53 |
+
at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
|
54 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
|
59 |
+
inline at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) {
|
60 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
61 |
+
}
|
62 |
+
namespace symint {
|
63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
64 |
+
at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) {
|
65 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
66 |
+
}
|
67 |
+
}
|
68 |
+
|
69 |
+
// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
|
70 |
+
inline at::Tensor & narrow_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
|
71 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
72 |
+
}
|
73 |
+
namespace symint {
|
74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
75 |
+
at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
|
76 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
77 |
+
}
|
78 |
+
}
|
79 |
+
|
80 |
+
// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
|
81 |
+
inline at::Tensor & narrow_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
|
82 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
83 |
+
}
|
84 |
+
namespace symint {
|
85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
86 |
+
at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
|
87 |
+
return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
|
88 |
+
}
|
89 |
+
}
|
90 |
+
|
91 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor math_channel_shuffle(const at::Tensor & self, int64_t groups);
|
20 |
+
TORCH_API at::Tensor channel_shuffle_cpu(const at::Tensor & self, int64_t groups);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|