Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/20.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/20.attention.dense.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ArrayRef.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Backtrace.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h +576 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h +49 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions_inl.h +614 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h +94 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h +500 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Context.h +560 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Device.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h +808 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Dispatch_v2.h +186 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h +46 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h +126 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Functions.h +1427 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Generator.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/InferSize.h +87 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedTensorImpl.h +160 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapMode.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h +183 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h +31 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h +324 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h +215 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/NativeFunctions.h +1317 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/NativeMetaFunctions.h +1303 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h +203 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h +17 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/PadNd.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h +93 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h +52 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h +0 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/SavedTensorHooks.h +52 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/Scalar.h +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ScalarOps.h +53 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorImpl.h +186 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h +400 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/StorageUtils.h +49 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h +735 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h +1002 -0
ckpts/universal/global_step20/zero/20.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9b7ff1c25aac1d083a74140789d1a2ed03035b1123f2a862d638bea8a8e8c57
|
3 |
+
size 16778411
|
ckpts/universal/global_step20/zero/20.attention.dense.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:168a9feae7b126f0db6a8babd4adec260d368139c62403f8a129e491958bc2a3
|
3 |
+
size 16778317
|
venv/lib/python3.10/site-packages/torch/include/ATen/ArrayRef.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/ArrayRef.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/Backtrace.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Backtrace.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/CPUFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h
ADDED
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_cpu_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
|
20 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
|
21 |
+
#include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
|
22 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
|
23 |
+
#include <ATen/ops/_add_relu_cpu_dispatch.h>
|
24 |
+
#include <ATen/ops/_addmm_activation_cpu_dispatch.h>
|
25 |
+
#include <ATen/ops/_aminmax_cpu_dispatch.h>
|
26 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cpu_dispatch.h>
|
27 |
+
#include <ATen/ops/_amp_update_scale_cpu_dispatch.h>
|
28 |
+
#include <ATen/ops/_assert_async_cpu_dispatch.h>
|
29 |
+
#include <ATen/ops/_cdist_backward_cpu_dispatch.h>
|
30 |
+
#include <ATen/ops/_cdist_forward_cpu_dispatch.h>
|
31 |
+
#include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
|
32 |
+
#include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
|
33 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
|
34 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
|
35 |
+
#include <ATen/ops/_convert_weight_to_int4pack_cpu_dispatch.h>
|
36 |
+
#include <ATen/ops/_ctc_loss_cpu_dispatch.h>
|
37 |
+
#include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
|
38 |
+
#include <ATen/ops/_cummax_helper_cpu_dispatch.h>
|
39 |
+
#include <ATen/ops/_cummin_helper_cpu_dispatch.h>
|
40 |
+
#include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
|
41 |
+
#include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
|
42 |
+
#include <ATen/ops/_embedding_bag_cpu_dispatch.h>
|
43 |
+
#include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
|
44 |
+
#include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
|
45 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
|
46 |
+
#include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
|
47 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
|
48 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
|
49 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
|
50 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
|
51 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
|
52 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
|
53 |
+
#include <ATen/ops/_fft_c2c_cpu_dispatch.h>
|
54 |
+
#include <ATen/ops/_fft_c2r_cpu_dispatch.h>
|
55 |
+
#include <ATen/ops/_fft_r2c_cpu_dispatch.h>
|
56 |
+
#include <ATen/ops/_foobar_cpu_dispatch.h>
|
57 |
+
#include <ATen/ops/_foreach_abs_cpu_dispatch.h>
|
58 |
+
#include <ATen/ops/_foreach_acos_cpu_dispatch.h>
|
59 |
+
#include <ATen/ops/_foreach_add_cpu_dispatch.h>
|
60 |
+
#include <ATen/ops/_foreach_addcdiv_cpu_dispatch.h>
|
61 |
+
#include <ATen/ops/_foreach_addcmul_cpu_dispatch.h>
|
62 |
+
#include <ATen/ops/_foreach_asin_cpu_dispatch.h>
|
63 |
+
#include <ATen/ops/_foreach_atan_cpu_dispatch.h>
|
64 |
+
#include <ATen/ops/_foreach_ceil_cpu_dispatch.h>
|
65 |
+
#include <ATen/ops/_foreach_clamp_max_cpu_dispatch.h>
|
66 |
+
#include <ATen/ops/_foreach_clamp_min_cpu_dispatch.h>
|
67 |
+
#include <ATen/ops/_foreach_copy_cpu_dispatch.h>
|
68 |
+
#include <ATen/ops/_foreach_cos_cpu_dispatch.h>
|
69 |
+
#include <ATen/ops/_foreach_cosh_cpu_dispatch.h>
|
70 |
+
#include <ATen/ops/_foreach_div_cpu_dispatch.h>
|
71 |
+
#include <ATen/ops/_foreach_erf_cpu_dispatch.h>
|
72 |
+
#include <ATen/ops/_foreach_erfc_cpu_dispatch.h>
|
73 |
+
#include <ATen/ops/_foreach_exp_cpu_dispatch.h>
|
74 |
+
#include <ATen/ops/_foreach_expm1_cpu_dispatch.h>
|
75 |
+
#include <ATen/ops/_foreach_floor_cpu_dispatch.h>
|
76 |
+
#include <ATen/ops/_foreach_frac_cpu_dispatch.h>
|
77 |
+
#include <ATen/ops/_foreach_lerp_cpu_dispatch.h>
|
78 |
+
#include <ATen/ops/_foreach_lgamma_cpu_dispatch.h>
|
79 |
+
#include <ATen/ops/_foreach_log_cpu_dispatch.h>
|
80 |
+
#include <ATen/ops/_foreach_log10_cpu_dispatch.h>
|
81 |
+
#include <ATen/ops/_foreach_log1p_cpu_dispatch.h>
|
82 |
+
#include <ATen/ops/_foreach_log2_cpu_dispatch.h>
|
83 |
+
#include <ATen/ops/_foreach_maximum_cpu_dispatch.h>
|
84 |
+
#include <ATen/ops/_foreach_minimum_cpu_dispatch.h>
|
85 |
+
#include <ATen/ops/_foreach_mul_cpu_dispatch.h>
|
86 |
+
#include <ATen/ops/_foreach_neg_cpu_dispatch.h>
|
87 |
+
#include <ATen/ops/_foreach_norm_cpu_dispatch.h>
|
88 |
+
#include <ATen/ops/_foreach_pow_cpu_dispatch.h>
|
89 |
+
#include <ATen/ops/_foreach_reciprocal_cpu_dispatch.h>
|
90 |
+
#include <ATen/ops/_foreach_round_cpu_dispatch.h>
|
91 |
+
#include <ATen/ops/_foreach_sigmoid_cpu_dispatch.h>
|
92 |
+
#include <ATen/ops/_foreach_sign_cpu_dispatch.h>
|
93 |
+
#include <ATen/ops/_foreach_sin_cpu_dispatch.h>
|
94 |
+
#include <ATen/ops/_foreach_sinh_cpu_dispatch.h>
|
95 |
+
#include <ATen/ops/_foreach_sqrt_cpu_dispatch.h>
|
96 |
+
#include <ATen/ops/_foreach_sub_cpu_dispatch.h>
|
97 |
+
#include <ATen/ops/_foreach_tan_cpu_dispatch.h>
|
98 |
+
#include <ATen/ops/_foreach_tanh_cpu_dispatch.h>
|
99 |
+
#include <ATen/ops/_foreach_trunc_cpu_dispatch.h>
|
100 |
+
#include <ATen/ops/_foreach_zero_cpu_dispatch.h>
|
101 |
+
#include <ATen/ops/_functional_assert_async_cpu_dispatch.h>
|
102 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
|
103 |
+
#include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
|
104 |
+
#include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
|
105 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
|
106 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
|
107 |
+
#include <ATen/ops/_index_put_impl_cpu_dispatch.h>
|
108 |
+
#include <ATen/ops/_linalg_det_cpu_dispatch.h>
|
109 |
+
#include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
|
110 |
+
#include <ATen/ops/_linalg_eigvals_cpu_dispatch.h>
|
111 |
+
#include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
|
112 |
+
#include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
|
113 |
+
#include <ATen/ops/_linalg_svd_cpu_dispatch.h>
|
114 |
+
#include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
|
115 |
+
#include <ATen/ops/_log_softmax_cpu_dispatch.h>
|
116 |
+
#include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
|
117 |
+
#include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
|
118 |
+
#include <ATen/ops/_make_dep_token_cpu_dispatch.h>
|
119 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
|
120 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
|
121 |
+
#include <ATen/ops/_masked_softmax_cpu_dispatch.h>
|
122 |
+
#include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
|
123 |
+
#include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
|
124 |
+
#include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
|
125 |
+
#include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
|
126 |
+
#include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
|
127 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
|
128 |
+
#include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
|
129 |
+
#include <ATen/ops/_pdist_backward_cpu_dispatch.h>
|
130 |
+
#include <ATen/ops/_pdist_forward_cpu_dispatch.h>
|
131 |
+
#include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
|
132 |
+
#include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
|
133 |
+
#include <ATen/ops/_reshape_alias_cpu_dispatch.h>
|
134 |
+
#include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
|
135 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_cpu_dispatch.h>
|
136 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_cpu_dispatch.h>
|
137 |
+
#include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
|
138 |
+
#include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
|
139 |
+
#include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
|
140 |
+
#include <ATen/ops/_softmax_cpu_dispatch.h>
|
141 |
+
#include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
|
142 |
+
#include <ATen/ops/_spdiags_cpu_dispatch.h>
|
143 |
+
#include <ATen/ops/_stack_cpu_dispatch.h>
|
144 |
+
#include <ATen/ops/_standard_gamma_cpu_dispatch.h>
|
145 |
+
#include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
|
146 |
+
#include <ATen/ops/_test_functorch_fallback_cpu_dispatch.h>
|
147 |
+
#include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
|
148 |
+
#include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
|
149 |
+
#include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
|
150 |
+
#include <ATen/ops/_to_sparse_cpu_dispatch.h>
|
151 |
+
#include <ATen/ops/_to_sparse_bsc_cpu_dispatch.h>
|
152 |
+
#include <ATen/ops/_to_sparse_bsr_cpu_dispatch.h>
|
153 |
+
#include <ATen/ops/_to_sparse_csc_cpu_dispatch.h>
|
154 |
+
#include <ATen/ops/_to_sparse_csr_cpu_dispatch.h>
|
155 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
|
156 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
|
157 |
+
#include <ATen/ops/_unique_cpu_dispatch.h>
|
158 |
+
#include <ATen/ops/_unique2_cpu_dispatch.h>
|
159 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
|
160 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
|
161 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
|
162 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
|
163 |
+
#include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
|
164 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
|
165 |
+
#include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
|
166 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
|
167 |
+
#include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
|
168 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
|
169 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
|
170 |
+
#include <ATen/ops/_weight_int4pack_mm_cpu_dispatch.h>
|
171 |
+
#include <ATen/ops/_weight_int8pack_mm_cpu_dispatch.h>
|
172 |
+
#include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
|
173 |
+
#include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
|
174 |
+
#include <ATen/ops/abs_cpu_dispatch.h>
|
175 |
+
#include <ATen/ops/acos_cpu_dispatch.h>
|
176 |
+
#include <ATen/ops/acosh_cpu_dispatch.h>
|
177 |
+
#include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
|
178 |
+
#include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
|
179 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
|
180 |
+
#include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
|
181 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
|
182 |
+
#include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
|
183 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
|
184 |
+
#include <ATen/ops/add_cpu_dispatch.h>
|
185 |
+
#include <ATen/ops/addbmm_cpu_dispatch.h>
|
186 |
+
#include <ATen/ops/addcdiv_cpu_dispatch.h>
|
187 |
+
#include <ATen/ops/addcmul_cpu_dispatch.h>
|
188 |
+
#include <ATen/ops/addmm_cpu_dispatch.h>
|
189 |
+
#include <ATen/ops/addmv_cpu_dispatch.h>
|
190 |
+
#include <ATen/ops/addr_cpu_dispatch.h>
|
191 |
+
#include <ATen/ops/all_cpu_dispatch.h>
|
192 |
+
#include <ATen/ops/amax_cpu_dispatch.h>
|
193 |
+
#include <ATen/ops/amin_cpu_dispatch.h>
|
194 |
+
#include <ATen/ops/aminmax_cpu_dispatch.h>
|
195 |
+
#include <ATen/ops/angle_cpu_dispatch.h>
|
196 |
+
#include <ATen/ops/any_cpu_dispatch.h>
|
197 |
+
#include <ATen/ops/arange_cpu_dispatch.h>
|
198 |
+
#include <ATen/ops/argmax_cpu_dispatch.h>
|
199 |
+
#include <ATen/ops/argmin_cpu_dispatch.h>
|
200 |
+
#include <ATen/ops/argsort_cpu_dispatch.h>
|
201 |
+
#include <ATen/ops/as_strided_cpu_dispatch.h>
|
202 |
+
#include <ATen/ops/asin_cpu_dispatch.h>
|
203 |
+
#include <ATen/ops/asinh_cpu_dispatch.h>
|
204 |
+
#include <ATen/ops/atan_cpu_dispatch.h>
|
205 |
+
#include <ATen/ops/atan2_cpu_dispatch.h>
|
206 |
+
#include <ATen/ops/atanh_cpu_dispatch.h>
|
207 |
+
#include <ATen/ops/avg_pool2d_cpu_dispatch.h>
|
208 |
+
#include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
|
209 |
+
#include <ATen/ops/avg_pool3d_cpu_dispatch.h>
|
210 |
+
#include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
|
211 |
+
#include <ATen/ops/baddbmm_cpu_dispatch.h>
|
212 |
+
#include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
|
213 |
+
#include <ATen/ops/bernoulli_cpu_dispatch.h>
|
214 |
+
#include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
|
215 |
+
#include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
|
216 |
+
#include <ATen/ops/bincount_cpu_dispatch.h>
|
217 |
+
#include <ATen/ops/binomial_cpu_dispatch.h>
|
218 |
+
#include <ATen/ops/bitwise_and_cpu_dispatch.h>
|
219 |
+
#include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
|
220 |
+
#include <ATen/ops/bitwise_not_cpu_dispatch.h>
|
221 |
+
#include <ATen/ops/bitwise_or_cpu_dispatch.h>
|
222 |
+
#include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
|
223 |
+
#include <ATen/ops/bitwise_xor_cpu_dispatch.h>
|
224 |
+
#include <ATen/ops/bmm_cpu_dispatch.h>
|
225 |
+
#include <ATen/ops/bucketize_cpu_dispatch.h>
|
226 |
+
#include <ATen/ops/cat_cpu_dispatch.h>
|
227 |
+
#include <ATen/ops/cauchy_cpu_dispatch.h>
|
228 |
+
#include <ATen/ops/ceil_cpu_dispatch.h>
|
229 |
+
#include <ATen/ops/channel_shuffle_cpu_dispatch.h>
|
230 |
+
#include <ATen/ops/cholesky_cpu_dispatch.h>
|
231 |
+
#include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
|
232 |
+
#include <ATen/ops/clamp_cpu_dispatch.h>
|
233 |
+
#include <ATen/ops/clamp_max_cpu_dispatch.h>
|
234 |
+
#include <ATen/ops/clamp_min_cpu_dispatch.h>
|
235 |
+
#include <ATen/ops/col2im_cpu_dispatch.h>
|
236 |
+
#include <ATen/ops/complex_cpu_dispatch.h>
|
237 |
+
#include <ATen/ops/conj_physical_cpu_dispatch.h>
|
238 |
+
#include <ATen/ops/copysign_cpu_dispatch.h>
|
239 |
+
#include <ATen/ops/cos_cpu_dispatch.h>
|
240 |
+
#include <ATen/ops/cosh_cpu_dispatch.h>
|
241 |
+
#include <ATen/ops/count_nonzero_cpu_dispatch.h>
|
242 |
+
#include <ATen/ops/cumprod_cpu_dispatch.h>
|
243 |
+
#include <ATen/ops/cumsum_cpu_dispatch.h>
|
244 |
+
#include <ATen/ops/dense_dim_cpu_dispatch.h>
|
245 |
+
#include <ATen/ops/dequantize_cpu_dispatch.h>
|
246 |
+
#include <ATen/ops/digamma_cpu_dispatch.h>
|
247 |
+
#include <ATen/ops/div_cpu_dispatch.h>
|
248 |
+
#include <ATen/ops/dot_cpu_dispatch.h>
|
249 |
+
#include <ATen/ops/elu_cpu_dispatch.h>
|
250 |
+
#include <ATen/ops/elu_backward_cpu_dispatch.h>
|
251 |
+
#include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
|
252 |
+
#include <ATen/ops/embedding_renorm_cpu_dispatch.h>
|
253 |
+
#include <ATen/ops/empty_cpu_dispatch.h>
|
254 |
+
#include <ATen/ops/empty_strided_cpu_dispatch.h>
|
255 |
+
#include <ATen/ops/eq_cpu_dispatch.h>
|
256 |
+
#include <ATen/ops/equal_cpu_dispatch.h>
|
257 |
+
#include <ATen/ops/erf_cpu_dispatch.h>
|
258 |
+
#include <ATen/ops/erfc_cpu_dispatch.h>
|
259 |
+
#include <ATen/ops/erfinv_cpu_dispatch.h>
|
260 |
+
#include <ATen/ops/exp_cpu_dispatch.h>
|
261 |
+
#include <ATen/ops/exp2_cpu_dispatch.h>
|
262 |
+
#include <ATen/ops/expm1_cpu_dispatch.h>
|
263 |
+
#include <ATen/ops/exponential_cpu_dispatch.h>
|
264 |
+
#include <ATen/ops/eye_cpu_dispatch.h>
|
265 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
|
266 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
|
267 |
+
#include <ATen/ops/fill_cpu_dispatch.h>
|
268 |
+
#include <ATen/ops/flip_cpu_dispatch.h>
|
269 |
+
#include <ATen/ops/floor_cpu_dispatch.h>
|
270 |
+
#include <ATen/ops/floor_divide_cpu_dispatch.h>
|
271 |
+
#include <ATen/ops/fmax_cpu_dispatch.h>
|
272 |
+
#include <ATen/ops/fmin_cpu_dispatch.h>
|
273 |
+
#include <ATen/ops/fmod_cpu_dispatch.h>
|
274 |
+
#include <ATen/ops/frac_cpu_dispatch.h>
|
275 |
+
#include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
|
276 |
+
#include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
|
277 |
+
#include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
|
278 |
+
#include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
|
279 |
+
#include <ATen/ops/frexp_cpu_dispatch.h>
|
280 |
+
#include <ATen/ops/from_file_cpu_dispatch.h>
|
281 |
+
#include <ATen/ops/gather_cpu_dispatch.h>
|
282 |
+
#include <ATen/ops/gcd_cpu_dispatch.h>
|
283 |
+
#include <ATen/ops/ge_cpu_dispatch.h>
|
284 |
+
#include <ATen/ops/gelu_cpu_dispatch.h>
|
285 |
+
#include <ATen/ops/gelu_backward_cpu_dispatch.h>
|
286 |
+
#include <ATen/ops/geometric_cpu_dispatch.h>
|
287 |
+
#include <ATen/ops/geqrf_cpu_dispatch.h>
|
288 |
+
#include <ATen/ops/glu_cpu_dispatch.h>
|
289 |
+
#include <ATen/ops/glu_backward_cpu_dispatch.h>
|
290 |
+
#include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
|
291 |
+
#include <ATen/ops/glu_jvp_cpu_dispatch.h>
|
292 |
+
#include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
|
293 |
+
#include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
|
294 |
+
#include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
|
295 |
+
#include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
|
296 |
+
#include <ATen/ops/gt_cpu_dispatch.h>
|
297 |
+
#include <ATen/ops/hardshrink_cpu_dispatch.h>
|
298 |
+
#include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
|
299 |
+
#include <ATen/ops/hardsigmoid_cpu_dispatch.h>
|
300 |
+
#include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
|
301 |
+
#include <ATen/ops/hardswish_cpu_dispatch.h>
|
302 |
+
#include <ATen/ops/hardswish_backward_cpu_dispatch.h>
|
303 |
+
#include <ATen/ops/hardtanh_cpu_dispatch.h>
|
304 |
+
#include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
|
305 |
+
#include <ATen/ops/heaviside_cpu_dispatch.h>
|
306 |
+
#include <ATen/ops/histc_cpu_dispatch.h>
|
307 |
+
#include <ATen/ops/histogram_cpu_dispatch.h>
|
308 |
+
#include <ATen/ops/huber_loss_cpu_dispatch.h>
|
309 |
+
#include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
|
310 |
+
#include <ATen/ops/hypot_cpu_dispatch.h>
|
311 |
+
#include <ATen/ops/i0_cpu_dispatch.h>
|
312 |
+
#include <ATen/ops/igamma_cpu_dispatch.h>
|
313 |
+
#include <ATen/ops/igammac_cpu_dispatch.h>
|
314 |
+
#include <ATen/ops/im2col_cpu_dispatch.h>
|
315 |
+
#include <ATen/ops/index_cpu_dispatch.h>
|
316 |
+
#include <ATen/ops/index_add_cpu_dispatch.h>
|
317 |
+
#include <ATen/ops/index_copy_cpu_dispatch.h>
|
318 |
+
#include <ATen/ops/index_fill_cpu_dispatch.h>
|
319 |
+
#include <ATen/ops/index_reduce_cpu_dispatch.h>
|
320 |
+
#include <ATen/ops/index_select_cpu_dispatch.h>
|
321 |
+
#include <ATen/ops/is_set_to_cpu_dispatch.h>
|
322 |
+
#include <ATen/ops/isin_cpu_dispatch.h>
|
323 |
+
#include <ATen/ops/isnan_cpu_dispatch.h>
|
324 |
+
#include <ATen/ops/isneginf_cpu_dispatch.h>
|
325 |
+
#include <ATen/ops/isposinf_cpu_dispatch.h>
|
326 |
+
#include <ATen/ops/kthvalue_cpu_dispatch.h>
|
327 |
+
#include <ATen/ops/lcm_cpu_dispatch.h>
|
328 |
+
#include <ATen/ops/le_cpu_dispatch.h>
|
329 |
+
#include <ATen/ops/leaky_relu_cpu_dispatch.h>
|
330 |
+
#include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
|
331 |
+
#include <ATen/ops/lerp_cpu_dispatch.h>
|
332 |
+
#include <ATen/ops/lgamma_cpu_dispatch.h>
|
333 |
+
#include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
|
334 |
+
#include <ATen/ops/linalg_cross_cpu_dispatch.h>
|
335 |
+
#include <ATen/ops/linalg_eig_cpu_dispatch.h>
|
336 |
+
#include <ATen/ops/linalg_eigvals_cpu_dispatch.h>
|
337 |
+
#include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
|
338 |
+
#include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
|
339 |
+
#include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
|
340 |
+
#include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
|
341 |
+
#include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
|
342 |
+
#include <ATen/ops/linalg_lu_cpu_dispatch.h>
|
343 |
+
#include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
|
344 |
+
#include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
|
345 |
+
#include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
|
346 |
+
#include <ATen/ops/linalg_qr_cpu_dispatch.h>
|
347 |
+
#include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
|
348 |
+
#include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
|
349 |
+
#include <ATen/ops/linspace_cpu_dispatch.h>
|
350 |
+
#include <ATen/ops/log_cpu_dispatch.h>
|
351 |
+
#include <ATen/ops/log10_cpu_dispatch.h>
|
352 |
+
#include <ATen/ops/log1p_cpu_dispatch.h>
|
353 |
+
#include <ATen/ops/log2_cpu_dispatch.h>
|
354 |
+
#include <ATen/ops/log_normal_cpu_dispatch.h>
|
355 |
+
#include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
|
356 |
+
#include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
|
357 |
+
#include <ATen/ops/logaddexp_cpu_dispatch.h>
|
358 |
+
#include <ATen/ops/logaddexp2_cpu_dispatch.h>
|
359 |
+
#include <ATen/ops/logical_and_cpu_dispatch.h>
|
360 |
+
#include <ATen/ops/logical_not_cpu_dispatch.h>
|
361 |
+
#include <ATen/ops/logical_or_cpu_dispatch.h>
|
362 |
+
#include <ATen/ops/logical_xor_cpu_dispatch.h>
|
363 |
+
#include <ATen/ops/logit_cpu_dispatch.h>
|
364 |
+
#include <ATen/ops/logit_backward_cpu_dispatch.h>
|
365 |
+
#include <ATen/ops/logspace_cpu_dispatch.h>
|
366 |
+
#include <ATen/ops/lshift_cpu_dispatch.h>
|
367 |
+
#include <ATen/ops/lt_cpu_dispatch.h>
|
368 |
+
#include <ATen/ops/lu_unpack_cpu_dispatch.h>
|
369 |
+
#include <ATen/ops/masked_fill_cpu_dispatch.h>
|
370 |
+
#include <ATen/ops/masked_scatter_cpu_dispatch.h>
|
371 |
+
#include <ATen/ops/masked_select_cpu_dispatch.h>
|
372 |
+
#include <ATen/ops/max_cpu_dispatch.h>
|
373 |
+
#include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
|
374 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
|
375 |
+
#include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
|
376 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
|
377 |
+
#include <ATen/ops/max_unpool2d_cpu_dispatch.h>
|
378 |
+
#include <ATen/ops/max_unpool3d_cpu_dispatch.h>
|
379 |
+
#include <ATen/ops/maximum_cpu_dispatch.h>
|
380 |
+
#include <ATen/ops/mean_cpu_dispatch.h>
|
381 |
+
#include <ATen/ops/median_cpu_dispatch.h>
|
382 |
+
#include <ATen/ops/min_cpu_dispatch.h>
|
383 |
+
#include <ATen/ops/minimum_cpu_dispatch.h>
|
384 |
+
#include <ATen/ops/mish_cpu_dispatch.h>
|
385 |
+
#include <ATen/ops/mish_backward_cpu_dispatch.h>
|
386 |
+
#include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
|
387 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
|
388 |
+
#include <ATen/ops/mm_cpu_dispatch.h>
|
389 |
+
#include <ATen/ops/mode_cpu_dispatch.h>
|
390 |
+
#include <ATen/ops/mse_loss_cpu_dispatch.h>
|
391 |
+
#include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
|
392 |
+
#include <ATen/ops/mul_cpu_dispatch.h>
|
393 |
+
#include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
|
394 |
+
#include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
|
395 |
+
#include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
|
396 |
+
#include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
|
397 |
+
#include <ATen/ops/multinomial_cpu_dispatch.h>
|
398 |
+
#include <ATen/ops/mvlgamma_cpu_dispatch.h>
|
399 |
+
#include <ATen/ops/nan_to_num_cpu_dispatch.h>
|
400 |
+
#include <ATen/ops/nanmedian_cpu_dispatch.h>
|
401 |
+
#include <ATen/ops/nansum_cpu_dispatch.h>
|
402 |
+
#include <ATen/ops/narrow_copy_cpu_dispatch.h>
|
403 |
+
#include <ATen/ops/native_batch_norm_cpu_dispatch.h>
|
404 |
+
#include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
|
405 |
+
#include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
|
406 |
+
#include <ATen/ops/native_dropout_cpu_dispatch.h>
|
407 |
+
#include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
|
408 |
+
#include <ATen/ops/native_group_norm_cpu_dispatch.h>
|
409 |
+
#include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
|
410 |
+
#include <ATen/ops/native_layer_norm_cpu_dispatch.h>
|
411 |
+
#include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
|
412 |
+
#include <ATen/ops/ne_cpu_dispatch.h>
|
413 |
+
#include <ATen/ops/neg_cpu_dispatch.h>
|
414 |
+
#include <ATen/ops/nextafter_cpu_dispatch.h>
|
415 |
+
#include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
|
416 |
+
#include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
|
417 |
+
#include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
|
418 |
+
#include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
|
419 |
+
#include <ATen/ops/nonzero_cpu_dispatch.h>
|
420 |
+
#include <ATen/ops/nonzero_static_cpu_dispatch.h>
|
421 |
+
#include <ATen/ops/norm_cpu_dispatch.h>
|
422 |
+
#include <ATen/ops/normal_cpu_dispatch.h>
|
423 |
+
#include <ATen/ops/ormqr_cpu_dispatch.h>
|
424 |
+
#include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
|
425 |
+
#include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
|
426 |
+
#include <ATen/ops/poisson_cpu_dispatch.h>
|
427 |
+
#include <ATen/ops/polar_cpu_dispatch.h>
|
428 |
+
#include <ATen/ops/polygamma_cpu_dispatch.h>
|
429 |
+
#include <ATen/ops/pow_cpu_dispatch.h>
|
430 |
+
#include <ATen/ops/prod_cpu_dispatch.h>
|
431 |
+
#include <ATen/ops/put_cpu_dispatch.h>
|
432 |
+
#include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
|
433 |
+
#include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
|
434 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
|
435 |
+
#include <ATen/ops/random_cpu_dispatch.h>
|
436 |
+
#include <ATen/ops/randperm_cpu_dispatch.h>
|
437 |
+
#include <ATen/ops/range_cpu_dispatch.h>
|
438 |
+
#include <ATen/ops/reciprocal_cpu_dispatch.h>
|
439 |
+
#include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
|
440 |
+
#include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
|
441 |
+
#include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
|
442 |
+
#include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
|
443 |
+
#include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
|
444 |
+
#include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
|
445 |
+
#include <ATen/ops/relu_cpu_dispatch.h>
|
446 |
+
#include <ATen/ops/remainder_cpu_dispatch.h>
|
447 |
+
#include <ATen/ops/renorm_cpu_dispatch.h>
|
448 |
+
#include <ATen/ops/repeat_interleave_cpu_dispatch.h>
|
449 |
+
#include <ATen/ops/replication_pad1d_cpu_dispatch.h>
|
450 |
+
#include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
|
451 |
+
#include <ATen/ops/replication_pad2d_cpu_dispatch.h>
|
452 |
+
#include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
|
453 |
+
#include <ATen/ops/replication_pad3d_cpu_dispatch.h>
|
454 |
+
#include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
|
455 |
+
#include <ATen/ops/resize_cpu_dispatch.h>
|
456 |
+
#include <ATen/ops/roll_cpu_dispatch.h>
|
457 |
+
#include <ATen/ops/round_cpu_dispatch.h>
|
458 |
+
#include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
|
459 |
+
#include <ATen/ops/rshift_cpu_dispatch.h>
|
460 |
+
#include <ATen/ops/rsqrt_cpu_dispatch.h>
|
461 |
+
#include <ATen/ops/rsub_cpu_dispatch.h>
|
462 |
+
#include <ATen/ops/scatter_cpu_dispatch.h>
|
463 |
+
#include <ATen/ops/scatter_add_cpu_dispatch.h>
|
464 |
+
#include <ATen/ops/scatter_reduce_cpu_dispatch.h>
|
465 |
+
#include <ATen/ops/searchsorted_cpu_dispatch.h>
|
466 |
+
#include <ATen/ops/segment_reduce_cpu_dispatch.h>
|
467 |
+
#include <ATen/ops/set_cpu_dispatch.h>
|
468 |
+
#include <ATen/ops/sgn_cpu_dispatch.h>
|
469 |
+
#include <ATen/ops/sigmoid_cpu_dispatch.h>
|
470 |
+
#include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
|
471 |
+
#include <ATen/ops/sign_cpu_dispatch.h>
|
472 |
+
#include <ATen/ops/signbit_cpu_dispatch.h>
|
473 |
+
#include <ATen/ops/silu_cpu_dispatch.h>
|
474 |
+
#include <ATen/ops/silu_backward_cpu_dispatch.h>
|
475 |
+
#include <ATen/ops/sin_cpu_dispatch.h>
|
476 |
+
#include <ATen/ops/sinc_cpu_dispatch.h>
|
477 |
+
#include <ATen/ops/sinh_cpu_dispatch.h>
|
478 |
+
#include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
|
479 |
+
#include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
|
480 |
+
#include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
|
481 |
+
#include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
|
482 |
+
#include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
|
483 |
+
#include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
|
484 |
+
#include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
|
485 |
+
#include <ATen/ops/softplus_cpu_dispatch.h>
|
486 |
+
#include <ATen/ops/softplus_backward_cpu_dispatch.h>
|
487 |
+
#include <ATen/ops/softshrink_cpu_dispatch.h>
|
488 |
+
#include <ATen/ops/softshrink_backward_cpu_dispatch.h>
|
489 |
+
#include <ATen/ops/sort_cpu_dispatch.h>
|
490 |
+
#include <ATen/ops/sparse_dim_cpu_dispatch.h>
|
491 |
+
#include <ATen/ops/special_airy_ai_cpu_dispatch.h>
|
492 |
+
#include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
|
493 |
+
#include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
|
494 |
+
#include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
|
495 |
+
#include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
|
496 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
|
497 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
|
498 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
|
499 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
|
500 |
+
#include <ATen/ops/special_entr_cpu_dispatch.h>
|
501 |
+
#include <ATen/ops/special_erfcx_cpu_dispatch.h>
|
502 |
+
#include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
|
503 |
+
#include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
|
504 |
+
#include <ATen/ops/special_i0e_cpu_dispatch.h>
|
505 |
+
#include <ATen/ops/special_i1_cpu_dispatch.h>
|
506 |
+
#include <ATen/ops/special_i1e_cpu_dispatch.h>
|
507 |
+
#include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
|
508 |
+
#include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
|
509 |
+
#include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
|
510 |
+
#include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
|
511 |
+
#include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
|
512 |
+
#include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
|
513 |
+
#include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
|
514 |
+
#include <ATen/ops/special_ndtri_cpu_dispatch.h>
|
515 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
|
516 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
|
517 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
|
518 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
|
519 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
|
520 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
|
521 |
+
#include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
|
522 |
+
#include <ATen/ops/special_xlog1py_cpu_dispatch.h>
|
523 |
+
#include <ATen/ops/special_zeta_cpu_dispatch.h>
|
524 |
+
#include <ATen/ops/sqrt_cpu_dispatch.h>
|
525 |
+
#include <ATen/ops/sspaddmm_cpu_dispatch.h>
|
526 |
+
#include <ATen/ops/std_cpu_dispatch.h>
|
527 |
+
#include <ATen/ops/std_mean_cpu_dispatch.h>
|
528 |
+
#include <ATen/ops/sub_cpu_dispatch.h>
|
529 |
+
#include <ATen/ops/sum_cpu_dispatch.h>
|
530 |
+
#include <ATen/ops/take_cpu_dispatch.h>
|
531 |
+
#include <ATen/ops/tan_cpu_dispatch.h>
|
532 |
+
#include <ATen/ops/tanh_cpu_dispatch.h>
|
533 |
+
#include <ATen/ops/tanh_backward_cpu_dispatch.h>
|
534 |
+
#include <ATen/ops/threshold_cpu_dispatch.h>
|
535 |
+
#include <ATen/ops/threshold_backward_cpu_dispatch.h>
|
536 |
+
#include <ATen/ops/to_mkldnn_cpu_dispatch.h>
|
537 |
+
#include <ATen/ops/topk_cpu_dispatch.h>
|
538 |
+
#include <ATen/ops/trace_cpu_dispatch.h>
|
539 |
+
#include <ATen/ops/triangular_solve_cpu_dispatch.h>
|
540 |
+
#include <ATen/ops/tril_cpu_dispatch.h>
|
541 |
+
#include <ATen/ops/tril_indices_cpu_dispatch.h>
|
542 |
+
#include <ATen/ops/triu_cpu_dispatch.h>
|
543 |
+
#include <ATen/ops/triu_indices_cpu_dispatch.h>
|
544 |
+
#include <ATen/ops/trunc_cpu_dispatch.h>
|
545 |
+
#include <ATen/ops/unfold_cpu_dispatch.h>
|
546 |
+
#include <ATen/ops/unfold_backward_cpu_dispatch.h>
|
547 |
+
#include <ATen/ops/uniform_cpu_dispatch.h>
|
548 |
+
#include <ATen/ops/unique_consecutive_cpu_dispatch.h>
|
549 |
+
#include <ATen/ops/unique_dim_cpu_dispatch.h>
|
550 |
+
#include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
|
551 |
+
#include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
|
552 |
+
#include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
|
553 |
+
#include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
|
554 |
+
#include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
|
555 |
+
#include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
|
556 |
+
#include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
|
557 |
+
#include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
|
558 |
+
#include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
|
559 |
+
#include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
|
560 |
+
#include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
|
561 |
+
#include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
|
562 |
+
#include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
|
563 |
+
#include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
|
564 |
+
#include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
|
565 |
+
#include <ATen/ops/var_cpu_dispatch.h>
|
566 |
+
#include <ATen/ops/var_mean_cpu_dispatch.h>
|
567 |
+
#include <ATen/ops/vdot_cpu_dispatch.h>
|
568 |
+
#include <ATen/ops/view_cpu_dispatch.h>
|
569 |
+
#include <ATen/ops/view_as_complex_cpu_dispatch.h>
|
570 |
+
#include <ATen/ops/view_as_real_cpu_dispatch.h>
|
571 |
+
#include <ATen/ops/where_cpu_dispatch.h>
|
572 |
+
#include <ATen/ops/xlogy_cpu_dispatch.h>
|
573 |
+
#include <ATen/ops/zero_cpu_dispatch.h>
|
574 |
+
|
575 |
+
|
576 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Generator.h>
|
4 |
+
#include <ATen/core/MT19937RNGEngine.h>
|
5 |
+
#include <c10/core/GeneratorImpl.h>
|
6 |
+
#include <c10/util/Optional.h>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
11 |
+
// Constructors
|
12 |
+
CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
|
13 |
+
~CPUGeneratorImpl() override = default;
|
14 |
+
|
15 |
+
// CPUGeneratorImpl methods
|
16 |
+
std::shared_ptr<CPUGeneratorImpl> clone() const;
|
17 |
+
void set_current_seed(uint64_t seed) override;
|
18 |
+
void set_offset(uint64_t offset) override;
|
19 |
+
uint64_t get_offset() const override;
|
20 |
+
uint64_t current_seed() const override;
|
21 |
+
uint64_t seed() override;
|
22 |
+
void set_state(const c10::TensorImpl& new_state) override;
|
23 |
+
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
|
24 |
+
static c10::DeviceType device_type();
|
25 |
+
uint32_t random();
|
26 |
+
uint64_t random64();
|
27 |
+
c10::optional<float> next_float_normal_sample();
|
28 |
+
c10::optional<double> next_double_normal_sample();
|
29 |
+
void set_next_float_normal_sample(c10::optional<float> randn);
|
30 |
+
void set_next_double_normal_sample(c10::optional<double> randn);
|
31 |
+
at::mt19937 engine();
|
32 |
+
void set_engine(at::mt19937 engine);
|
33 |
+
|
34 |
+
private:
|
35 |
+
CPUGeneratorImpl* clone_impl() const override;
|
36 |
+
at::mt19937 engine_;
|
37 |
+
c10::optional<float> next_float_normal_sample_;
|
38 |
+
c10::optional<double> next_double_normal_sample_;
|
39 |
+
};
|
40 |
+
|
41 |
+
namespace detail {
|
42 |
+
|
43 |
+
TORCH_API const Generator& getDefaultCPUGenerator();
|
44 |
+
TORCH_API Generator
|
45 |
+
createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
|
46 |
+
|
47 |
+
} // namespace detail
|
48 |
+
|
49 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions_inl.h
ADDED
@@ -0,0 +1,614 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_cuda_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/_adaptive_avg_pool2d_cuda_dispatch.h>
|
20 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_cuda_dispatch.h>
|
21 |
+
#include <ATen/ops/_adaptive_avg_pool3d_cuda_dispatch.h>
|
22 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_cuda_dispatch.h>
|
23 |
+
#include <ATen/ops/_addmm_activation_cuda_dispatch.h>
|
24 |
+
#include <ATen/ops/_aminmax_cuda_dispatch.h>
|
25 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cuda_dispatch.h>
|
26 |
+
#include <ATen/ops/_amp_update_scale_cuda_dispatch.h>
|
27 |
+
#include <ATen/ops/_assert_async_cuda_dispatch.h>
|
28 |
+
#include <ATen/ops/_cdist_backward_cuda_dispatch.h>
|
29 |
+
#include <ATen/ops/_cdist_forward_cuda_dispatch.h>
|
30 |
+
#include <ATen/ops/_cholesky_solve_helper_cuda_dispatch.h>
|
31 |
+
#include <ATen/ops/_compute_linear_combination_cuda_dispatch.h>
|
32 |
+
#include <ATen/ops/_conv_depthwise2d_cuda_dispatch.h>
|
33 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_cuda_dispatch.h>
|
34 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_cuda_dispatch.h>
|
35 |
+
#include <ATen/ops/_convert_weight_to_int4pack_cuda_dispatch.h>
|
36 |
+
#include <ATen/ops/_cslt_compress_cuda_dispatch.h>
|
37 |
+
#include <ATen/ops/_cslt_sparse_mm_cuda_dispatch.h>
|
38 |
+
#include <ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h>
|
39 |
+
#include <ATen/ops/_ctc_loss_cuda_dispatch.h>
|
40 |
+
#include <ATen/ops/_ctc_loss_backward_cuda_dispatch.h>
|
41 |
+
#include <ATen/ops/_cudnn_ctc_loss_cuda_dispatch.h>
|
42 |
+
#include <ATen/ops/_cudnn_init_dropout_state_cuda_dispatch.h>
|
43 |
+
#include <ATen/ops/_cudnn_rnn_cuda_dispatch.h>
|
44 |
+
#include <ATen/ops/_cudnn_rnn_backward_cuda_dispatch.h>
|
45 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_cuda_dispatch.h>
|
46 |
+
#include <ATen/ops/_cummax_helper_cuda_dispatch.h>
|
47 |
+
#include <ATen/ops/_cummin_helper_cuda_dispatch.h>
|
48 |
+
#include <ATen/ops/_dirichlet_grad_cuda_dispatch.h>
|
49 |
+
#include <ATen/ops/_efficient_attention_backward_cuda_dispatch.h>
|
50 |
+
#include <ATen/ops/_efficient_attention_forward_cuda_dispatch.h>
|
51 |
+
#include <ATen/ops/_efficientzerotensor_cuda_dispatch.h>
|
52 |
+
#include <ATen/ops/_embedding_bag_cuda_dispatch.h>
|
53 |
+
#include <ATen/ops/_embedding_bag_dense_backward_cuda_dispatch.h>
|
54 |
+
#include <ATen/ops/_embedding_bag_forward_only_cuda_dispatch.h>
|
55 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_cuda_dispatch.h>
|
56 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cuda_dispatch.h>
|
57 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cuda_dispatch.h>
|
58 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cuda_dispatch.h>
|
59 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cuda_dispatch.h>
|
60 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cuda_dispatch.h>
|
61 |
+
#include <ATen/ops/_fft_c2c_cuda_dispatch.h>
|
62 |
+
#include <ATen/ops/_fft_c2r_cuda_dispatch.h>
|
63 |
+
#include <ATen/ops/_fft_r2c_cuda_dispatch.h>
|
64 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h>
|
65 |
+
#include <ATen/ops/_flash_attention_backward_cuda_dispatch.h>
|
66 |
+
#include <ATen/ops/_flash_attention_forward_cuda_dispatch.h>
|
67 |
+
#include <ATen/ops/_foreach_abs_cuda_dispatch.h>
|
68 |
+
#include <ATen/ops/_foreach_acos_cuda_dispatch.h>
|
69 |
+
#include <ATen/ops/_foreach_add_cuda_dispatch.h>
|
70 |
+
#include <ATen/ops/_foreach_addcdiv_cuda_dispatch.h>
|
71 |
+
#include <ATen/ops/_foreach_addcmul_cuda_dispatch.h>
|
72 |
+
#include <ATen/ops/_foreach_asin_cuda_dispatch.h>
|
73 |
+
#include <ATen/ops/_foreach_atan_cuda_dispatch.h>
|
74 |
+
#include <ATen/ops/_foreach_ceil_cuda_dispatch.h>
|
75 |
+
#include <ATen/ops/_foreach_clamp_max_cuda_dispatch.h>
|
76 |
+
#include <ATen/ops/_foreach_clamp_min_cuda_dispatch.h>
|
77 |
+
#include <ATen/ops/_foreach_copy_cuda_dispatch.h>
|
78 |
+
#include <ATen/ops/_foreach_cos_cuda_dispatch.h>
|
79 |
+
#include <ATen/ops/_foreach_cosh_cuda_dispatch.h>
|
80 |
+
#include <ATen/ops/_foreach_div_cuda_dispatch.h>
|
81 |
+
#include <ATen/ops/_foreach_erf_cuda_dispatch.h>
|
82 |
+
#include <ATen/ops/_foreach_erfc_cuda_dispatch.h>
|
83 |
+
#include <ATen/ops/_foreach_exp_cuda_dispatch.h>
|
84 |
+
#include <ATen/ops/_foreach_expm1_cuda_dispatch.h>
|
85 |
+
#include <ATen/ops/_foreach_floor_cuda_dispatch.h>
|
86 |
+
#include <ATen/ops/_foreach_frac_cuda_dispatch.h>
|
87 |
+
#include <ATen/ops/_foreach_lerp_cuda_dispatch.h>
|
88 |
+
#include <ATen/ops/_foreach_lgamma_cuda_dispatch.h>
|
89 |
+
#include <ATen/ops/_foreach_log_cuda_dispatch.h>
|
90 |
+
#include <ATen/ops/_foreach_log10_cuda_dispatch.h>
|
91 |
+
#include <ATen/ops/_foreach_log1p_cuda_dispatch.h>
|
92 |
+
#include <ATen/ops/_foreach_log2_cuda_dispatch.h>
|
93 |
+
#include <ATen/ops/_foreach_maximum_cuda_dispatch.h>
|
94 |
+
#include <ATen/ops/_foreach_minimum_cuda_dispatch.h>
|
95 |
+
#include <ATen/ops/_foreach_mul_cuda_dispatch.h>
|
96 |
+
#include <ATen/ops/_foreach_neg_cuda_dispatch.h>
|
97 |
+
#include <ATen/ops/_foreach_norm_cuda_dispatch.h>
|
98 |
+
#include <ATen/ops/_foreach_pow_cuda_dispatch.h>
|
99 |
+
#include <ATen/ops/_foreach_reciprocal_cuda_dispatch.h>
|
100 |
+
#include <ATen/ops/_foreach_round_cuda_dispatch.h>
|
101 |
+
#include <ATen/ops/_foreach_sigmoid_cuda_dispatch.h>
|
102 |
+
#include <ATen/ops/_foreach_sign_cuda_dispatch.h>
|
103 |
+
#include <ATen/ops/_foreach_sin_cuda_dispatch.h>
|
104 |
+
#include <ATen/ops/_foreach_sinh_cuda_dispatch.h>
|
105 |
+
#include <ATen/ops/_foreach_sqrt_cuda_dispatch.h>
|
106 |
+
#include <ATen/ops/_foreach_sub_cuda_dispatch.h>
|
107 |
+
#include <ATen/ops/_foreach_tan_cuda_dispatch.h>
|
108 |
+
#include <ATen/ops/_foreach_tanh_cuda_dispatch.h>
|
109 |
+
#include <ATen/ops/_foreach_trunc_cuda_dispatch.h>
|
110 |
+
#include <ATen/ops/_foreach_zero_cuda_dispatch.h>
|
111 |
+
#include <ATen/ops/_fused_adam_cuda_dispatch.h>
|
112 |
+
#include <ATen/ops/_fused_adamw_cuda_dispatch.h>
|
113 |
+
#include <ATen/ops/_fused_dropout_cuda_dispatch.h>
|
114 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_cuda_dispatch.h>
|
115 |
+
#include <ATen/ops/_fused_sdp_choice_cuda_dispatch.h>
|
116 |
+
#include <ATen/ops/_fused_sgd_cuda_dispatch.h>
|
117 |
+
#include <ATen/ops/_index_put_impl_cuda_dispatch.h>
|
118 |
+
#include <ATen/ops/_int_mm_cuda_dispatch.h>
|
119 |
+
#include <ATen/ops/_linalg_det_cuda_dispatch.h>
|
120 |
+
#include <ATen/ops/_linalg_eigh_cuda_dispatch.h>
|
121 |
+
#include <ATen/ops/_linalg_eigvals_cuda_dispatch.h>
|
122 |
+
#include <ATen/ops/_linalg_slogdet_cuda_dispatch.h>
|
123 |
+
#include <ATen/ops/_linalg_solve_ex_cuda_dispatch.h>
|
124 |
+
#include <ATen/ops/_linalg_svd_cuda_dispatch.h>
|
125 |
+
#include <ATen/ops/_local_scalar_dense_cuda_dispatch.h>
|
126 |
+
#include <ATen/ops/_log_softmax_cuda_dispatch.h>
|
127 |
+
#include <ATen/ops/_log_softmax_backward_data_cuda_dispatch.h>
|
128 |
+
#include <ATen/ops/_logcumsumexp_cuda_dispatch.h>
|
129 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_cuda_dispatch.h>
|
130 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_cuda_dispatch.h>
|
131 |
+
#include <ATen/ops/_masked_scale_cuda_dispatch.h>
|
132 |
+
#include <ATen/ops/_masked_softmax_cuda_dispatch.h>
|
133 |
+
#include <ATen/ops/_masked_softmax_backward_cuda_dispatch.h>
|
134 |
+
#include <ATen/ops/_mixed_dtypes_linear_cuda_dispatch.h>
|
135 |
+
#include <ATen/ops/_native_batch_norm_legit_cuda_dispatch.h>
|
136 |
+
#include <ATen/ops/_native_multi_head_attention_cuda_dispatch.h>
|
137 |
+
#include <ATen/ops/_nested_from_padded_cuda_dispatch.h>
|
138 |
+
#include <ATen/ops/_nested_tensor_from_mask_cuda_dispatch.h>
|
139 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_cuda_dispatch.h>
|
140 |
+
#include <ATen/ops/_nested_view_from_buffer_cuda_dispatch.h>
|
141 |
+
#include <ATen/ops/_pdist_backward_cuda_dispatch.h>
|
142 |
+
#include <ATen/ops/_pdist_forward_cuda_dispatch.h>
|
143 |
+
#include <ATen/ops/_pin_memory_cuda_dispatch.h>
|
144 |
+
#include <ATen/ops/_prelu_kernel_cuda_dispatch.h>
|
145 |
+
#include <ATen/ops/_prelu_kernel_backward_cuda_dispatch.h>
|
146 |
+
#include <ATen/ops/_reshape_alias_cuda_dispatch.h>
|
147 |
+
#include <ATen/ops/_sample_dirichlet_cuda_dispatch.h>
|
148 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_cuda_dispatch.h>
|
149 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_cuda_dispatch.h>
|
150 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h>
|
151 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_cuda_dispatch.h>
|
152 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_cuda_dispatch.h>
|
153 |
+
#include <ATen/ops/_scaled_mm_cuda_dispatch.h>
|
154 |
+
#include <ATen/ops/_segment_reduce_backward_cuda_dispatch.h>
|
155 |
+
#include <ATen/ops/_slow_conv2d_backward_cuda_dispatch.h>
|
156 |
+
#include <ATen/ops/_slow_conv2d_forward_cuda_dispatch.h>
|
157 |
+
#include <ATen/ops/_softmax_cuda_dispatch.h>
|
158 |
+
#include <ATen/ops/_softmax_backward_data_cuda_dispatch.h>
|
159 |
+
#include <ATen/ops/_sparse_semi_structured_linear_cuda_dispatch.h>
|
160 |
+
#include <ATen/ops/_standard_gamma_cuda_dispatch.h>
|
161 |
+
#include <ATen/ops/_standard_gamma_grad_cuda_dispatch.h>
|
162 |
+
#include <ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h>
|
163 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_cuda_dispatch.h>
|
164 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_cuda_dispatch.h>
|
165 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_cuda_dispatch.h>
|
166 |
+
#include <ATen/ops/_to_sparse_cuda_dispatch.h>
|
167 |
+
#include <ATen/ops/_to_sparse_bsc_cuda_dispatch.h>
|
168 |
+
#include <ATen/ops/_to_sparse_bsr_cuda_dispatch.h>
|
169 |
+
#include <ATen/ops/_to_sparse_csc_cuda_dispatch.h>
|
170 |
+
#include <ATen/ops/_to_sparse_csr_cuda_dispatch.h>
|
171 |
+
#include <ATen/ops/_to_sparse_semi_structured_cuda_dispatch.h>
|
172 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_cuda_dispatch.h>
|
173 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h>
|
174 |
+
#include <ATen/ops/_triton_multi_head_attention_cuda_dispatch.h>
|
175 |
+
#include <ATen/ops/_triton_scaled_dot_attention_cuda_dispatch.h>
|
176 |
+
#include <ATen/ops/_unique_cuda_dispatch.h>
|
177 |
+
#include <ATen/ops/_unique2_cuda_dispatch.h>
|
178 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_cuda_dispatch.h>
|
179 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h>
|
180 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_cuda_dispatch.h>
|
181 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_cuda_dispatch.h>
|
182 |
+
#include <ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h>
|
183 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_cuda_dispatch.h>
|
184 |
+
#include <ATen/ops/_upsample_nearest_exact2d_cuda_dispatch.h>
|
185 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_cuda_dispatch.h>
|
186 |
+
#include <ATen/ops/_upsample_nearest_exact3d_cuda_dispatch.h>
|
187 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_cuda_dispatch.h>
|
188 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h>
|
189 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_cuda_dispatch.h>
|
190 |
+
#include <ATen/ops/_weight_int4pack_mm_cuda_dispatch.h>
|
191 |
+
#include <ATen/ops/_weight_norm_interface_cuda_dispatch.h>
|
192 |
+
#include <ATen/ops/_weight_norm_interface_backward_cuda_dispatch.h>
|
193 |
+
#include <ATen/ops/abs_cuda_dispatch.h>
|
194 |
+
#include <ATen/ops/acos_cuda_dispatch.h>
|
195 |
+
#include <ATen/ops/acosh_cuda_dispatch.h>
|
196 |
+
#include <ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h>
|
197 |
+
#include <ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h>
|
198 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h>
|
199 |
+
#include <ATen/ops/adaptive_max_pool2d_cuda_dispatch.h>
|
200 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h>
|
201 |
+
#include <ATen/ops/adaptive_max_pool3d_cuda_dispatch.h>
|
202 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h>
|
203 |
+
#include <ATen/ops/add_cuda_dispatch.h>
|
204 |
+
#include <ATen/ops/addbmm_cuda_dispatch.h>
|
205 |
+
#include <ATen/ops/addcdiv_cuda_dispatch.h>
|
206 |
+
#include <ATen/ops/addcmul_cuda_dispatch.h>
|
207 |
+
#include <ATen/ops/addmm_cuda_dispatch.h>
|
208 |
+
#include <ATen/ops/addmv_cuda_dispatch.h>
|
209 |
+
#include <ATen/ops/addr_cuda_dispatch.h>
|
210 |
+
#include <ATen/ops/all_cuda_dispatch.h>
|
211 |
+
#include <ATen/ops/amax_cuda_dispatch.h>
|
212 |
+
#include <ATen/ops/amin_cuda_dispatch.h>
|
213 |
+
#include <ATen/ops/aminmax_cuda_dispatch.h>
|
214 |
+
#include <ATen/ops/angle_cuda_dispatch.h>
|
215 |
+
#include <ATen/ops/any_cuda_dispatch.h>
|
216 |
+
#include <ATen/ops/arange_cuda_dispatch.h>
|
217 |
+
#include <ATen/ops/argmax_cuda_dispatch.h>
|
218 |
+
#include <ATen/ops/argmin_cuda_dispatch.h>
|
219 |
+
#include <ATen/ops/argsort_cuda_dispatch.h>
|
220 |
+
#include <ATen/ops/as_strided_cuda_dispatch.h>
|
221 |
+
#include <ATen/ops/asin_cuda_dispatch.h>
|
222 |
+
#include <ATen/ops/asinh_cuda_dispatch.h>
|
223 |
+
#include <ATen/ops/atan_cuda_dispatch.h>
|
224 |
+
#include <ATen/ops/atan2_cuda_dispatch.h>
|
225 |
+
#include <ATen/ops/atanh_cuda_dispatch.h>
|
226 |
+
#include <ATen/ops/avg_pool2d_cuda_dispatch.h>
|
227 |
+
#include <ATen/ops/avg_pool2d_backward_cuda_dispatch.h>
|
228 |
+
#include <ATen/ops/avg_pool3d_cuda_dispatch.h>
|
229 |
+
#include <ATen/ops/avg_pool3d_backward_cuda_dispatch.h>
|
230 |
+
#include <ATen/ops/baddbmm_cuda_dispatch.h>
|
231 |
+
#include <ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h>
|
232 |
+
#include <ATen/ops/batch_norm_backward_reduce_cuda_dispatch.h>
|
233 |
+
#include <ATen/ops/batch_norm_elemt_cuda_dispatch.h>
|
234 |
+
#include <ATen/ops/batch_norm_gather_stats_cuda_dispatch.h>
|
235 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_cuda_dispatch.h>
|
236 |
+
#include <ATen/ops/batch_norm_stats_cuda_dispatch.h>
|
237 |
+
#include <ATen/ops/batch_norm_update_stats_cuda_dispatch.h>
|
238 |
+
#include <ATen/ops/bernoulli_cuda_dispatch.h>
|
239 |
+
#include <ATen/ops/binary_cross_entropy_cuda_dispatch.h>
|
240 |
+
#include <ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h>
|
241 |
+
#include <ATen/ops/bincount_cuda_dispatch.h>
|
242 |
+
#include <ATen/ops/binomial_cuda_dispatch.h>
|
243 |
+
#include <ATen/ops/bitwise_and_cuda_dispatch.h>
|
244 |
+
#include <ATen/ops/bitwise_left_shift_cuda_dispatch.h>
|
245 |
+
#include <ATen/ops/bitwise_not_cuda_dispatch.h>
|
246 |
+
#include <ATen/ops/bitwise_or_cuda_dispatch.h>
|
247 |
+
#include <ATen/ops/bitwise_right_shift_cuda_dispatch.h>
|
248 |
+
#include <ATen/ops/bitwise_xor_cuda_dispatch.h>
|
249 |
+
#include <ATen/ops/bmm_cuda_dispatch.h>
|
250 |
+
#include <ATen/ops/bucketize_cuda_dispatch.h>
|
251 |
+
#include <ATen/ops/cat_cuda_dispatch.h>
|
252 |
+
#include <ATen/ops/cauchy_cuda_dispatch.h>
|
253 |
+
#include <ATen/ops/ceil_cuda_dispatch.h>
|
254 |
+
#include <ATen/ops/channel_shuffle_cuda_dispatch.h>
|
255 |
+
#include <ATen/ops/cholesky_cuda_dispatch.h>
|
256 |
+
#include <ATen/ops/cholesky_inverse_cuda_dispatch.h>
|
257 |
+
#include <ATen/ops/clamp_cuda_dispatch.h>
|
258 |
+
#include <ATen/ops/clamp_max_cuda_dispatch.h>
|
259 |
+
#include <ATen/ops/clamp_min_cuda_dispatch.h>
|
260 |
+
#include <ATen/ops/col2im_cuda_dispatch.h>
|
261 |
+
#include <ATen/ops/complex_cuda_dispatch.h>
|
262 |
+
#include <ATen/ops/conj_physical_cuda_dispatch.h>
|
263 |
+
#include <ATen/ops/conv_depthwise3d_cuda_dispatch.h>
|
264 |
+
#include <ATen/ops/convolution_backward_cuda_dispatch.h>
|
265 |
+
#include <ATen/ops/copysign_cuda_dispatch.h>
|
266 |
+
#include <ATen/ops/cos_cuda_dispatch.h>
|
267 |
+
#include <ATen/ops/cosh_cuda_dispatch.h>
|
268 |
+
#include <ATen/ops/count_nonzero_cuda_dispatch.h>
|
269 |
+
#include <ATen/ops/cudnn_affine_grid_generator_cuda_dispatch.h>
|
270 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_cuda_dispatch.h>
|
271 |
+
#include <ATen/ops/cudnn_batch_norm_cuda_dispatch.h>
|
272 |
+
#include <ATen/ops/cudnn_batch_norm_backward_cuda_dispatch.h>
|
273 |
+
#include <ATen/ops/cudnn_convolution_cuda_dispatch.h>
|
274 |
+
#include <ATen/ops/cudnn_convolution_add_relu_cuda_dispatch.h>
|
275 |
+
#include <ATen/ops/cudnn_convolution_relu_cuda_dispatch.h>
|
276 |
+
#include <ATen/ops/cudnn_convolution_transpose_cuda_dispatch.h>
|
277 |
+
#include <ATen/ops/cudnn_grid_sampler_cuda_dispatch.h>
|
278 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_cuda_dispatch.h>
|
279 |
+
#include <ATen/ops/cumprod_cuda_dispatch.h>
|
280 |
+
#include <ATen/ops/cumsum_cuda_dispatch.h>
|
281 |
+
#include <ATen/ops/dense_dim_cuda_dispatch.h>
|
282 |
+
#include <ATen/ops/dequantize_cuda_dispatch.h>
|
283 |
+
#include <ATen/ops/digamma_cuda_dispatch.h>
|
284 |
+
#include <ATen/ops/div_cuda_dispatch.h>
|
285 |
+
#include <ATen/ops/dot_cuda_dispatch.h>
|
286 |
+
#include <ATen/ops/elu_cuda_dispatch.h>
|
287 |
+
#include <ATen/ops/elu_backward_cuda_dispatch.h>
|
288 |
+
#include <ATen/ops/embedding_dense_backward_cuda_dispatch.h>
|
289 |
+
#include <ATen/ops/embedding_renorm_cuda_dispatch.h>
|
290 |
+
#include <ATen/ops/empty_cuda_dispatch.h>
|
291 |
+
#include <ATen/ops/empty_strided_cuda_dispatch.h>
|
292 |
+
#include <ATen/ops/eq_cuda_dispatch.h>
|
293 |
+
#include <ATen/ops/equal_cuda_dispatch.h>
|
294 |
+
#include <ATen/ops/erf_cuda_dispatch.h>
|
295 |
+
#include <ATen/ops/erfc_cuda_dispatch.h>
|
296 |
+
#include <ATen/ops/erfinv_cuda_dispatch.h>
|
297 |
+
#include <ATen/ops/exp_cuda_dispatch.h>
|
298 |
+
#include <ATen/ops/exp2_cuda_dispatch.h>
|
299 |
+
#include <ATen/ops/expm1_cuda_dispatch.h>
|
300 |
+
#include <ATen/ops/exponential_cuda_dispatch.h>
|
301 |
+
#include <ATen/ops/eye_cuda_dispatch.h>
|
302 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h>
|
303 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h>
|
304 |
+
#include <ATen/ops/fill_cuda_dispatch.h>
|
305 |
+
#include <ATen/ops/flip_cuda_dispatch.h>
|
306 |
+
#include <ATen/ops/floor_cuda_dispatch.h>
|
307 |
+
#include <ATen/ops/floor_divide_cuda_dispatch.h>
|
308 |
+
#include <ATen/ops/fmax_cuda_dispatch.h>
|
309 |
+
#include <ATen/ops/fmin_cuda_dispatch.h>
|
310 |
+
#include <ATen/ops/fmod_cuda_dispatch.h>
|
311 |
+
#include <ATen/ops/frac_cuda_dispatch.h>
|
312 |
+
#include <ATen/ops/fractional_max_pool2d_cuda_dispatch.h>
|
313 |
+
#include <ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h>
|
314 |
+
#include <ATen/ops/fractional_max_pool3d_cuda_dispatch.h>
|
315 |
+
#include <ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h>
|
316 |
+
#include <ATen/ops/frexp_cuda_dispatch.h>
|
317 |
+
#include <ATen/ops/gather_cuda_dispatch.h>
|
318 |
+
#include <ATen/ops/gcd_cuda_dispatch.h>
|
319 |
+
#include <ATen/ops/ge_cuda_dispatch.h>
|
320 |
+
#include <ATen/ops/gelu_cuda_dispatch.h>
|
321 |
+
#include <ATen/ops/gelu_backward_cuda_dispatch.h>
|
322 |
+
#include <ATen/ops/geometric_cuda_dispatch.h>
|
323 |
+
#include <ATen/ops/geqrf_cuda_dispatch.h>
|
324 |
+
#include <ATen/ops/glu_cuda_dispatch.h>
|
325 |
+
#include <ATen/ops/glu_backward_cuda_dispatch.h>
|
326 |
+
#include <ATen/ops/glu_backward_jvp_cuda_dispatch.h>
|
327 |
+
#include <ATen/ops/glu_jvp_cuda_dispatch.h>
|
328 |
+
#include <ATen/ops/grid_sampler_2d_cuda_dispatch.h>
|
329 |
+
#include <ATen/ops/grid_sampler_2d_backward_cuda_dispatch.h>
|
330 |
+
#include <ATen/ops/grid_sampler_3d_cuda_dispatch.h>
|
331 |
+
#include <ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h>
|
332 |
+
#include <ATen/ops/gt_cuda_dispatch.h>
|
333 |
+
#include <ATen/ops/hardshrink_cuda_dispatch.h>
|
334 |
+
#include <ATen/ops/hardshrink_backward_cuda_dispatch.h>
|
335 |
+
#include <ATen/ops/hardsigmoid_cuda_dispatch.h>
|
336 |
+
#include <ATen/ops/hardsigmoid_backward_cuda_dispatch.h>
|
337 |
+
#include <ATen/ops/hardswish_cuda_dispatch.h>
|
338 |
+
#include <ATen/ops/hardswish_backward_cuda_dispatch.h>
|
339 |
+
#include <ATen/ops/hardtanh_cuda_dispatch.h>
|
340 |
+
#include <ATen/ops/hardtanh_backward_cuda_dispatch.h>
|
341 |
+
#include <ATen/ops/heaviside_cuda_dispatch.h>
|
342 |
+
#include <ATen/ops/histc_cuda_dispatch.h>
|
343 |
+
#include <ATen/ops/huber_loss_cuda_dispatch.h>
|
344 |
+
#include <ATen/ops/huber_loss_backward_cuda_dispatch.h>
|
345 |
+
#include <ATen/ops/hypot_cuda_dispatch.h>
|
346 |
+
#include <ATen/ops/i0_cuda_dispatch.h>
|
347 |
+
#include <ATen/ops/igamma_cuda_dispatch.h>
|
348 |
+
#include <ATen/ops/igammac_cuda_dispatch.h>
|
349 |
+
#include <ATen/ops/im2col_cuda_dispatch.h>
|
350 |
+
#include <ATen/ops/index_cuda_dispatch.h>
|
351 |
+
#include <ATen/ops/index_add_cuda_dispatch.h>
|
352 |
+
#include <ATen/ops/index_copy_cuda_dispatch.h>
|
353 |
+
#include <ATen/ops/index_fill_cuda_dispatch.h>
|
354 |
+
#include <ATen/ops/index_reduce_cuda_dispatch.h>
|
355 |
+
#include <ATen/ops/index_select_cuda_dispatch.h>
|
356 |
+
#include <ATen/ops/is_pinned_cuda_dispatch.h>
|
357 |
+
#include <ATen/ops/is_set_to_cuda_dispatch.h>
|
358 |
+
#include <ATen/ops/isin_cuda_dispatch.h>
|
359 |
+
#include <ATen/ops/isnan_cuda_dispatch.h>
|
360 |
+
#include <ATen/ops/isneginf_cuda_dispatch.h>
|
361 |
+
#include <ATen/ops/isposinf_cuda_dispatch.h>
|
362 |
+
#include <ATen/ops/kthvalue_cuda_dispatch.h>
|
363 |
+
#include <ATen/ops/lcm_cuda_dispatch.h>
|
364 |
+
#include <ATen/ops/le_cuda_dispatch.h>
|
365 |
+
#include <ATen/ops/leaky_relu_cuda_dispatch.h>
|
366 |
+
#include <ATen/ops/leaky_relu_backward_cuda_dispatch.h>
|
367 |
+
#include <ATen/ops/lerp_cuda_dispatch.h>
|
368 |
+
#include <ATen/ops/lgamma_cuda_dispatch.h>
|
369 |
+
#include <ATen/ops/linalg_cholesky_ex_cuda_dispatch.h>
|
370 |
+
#include <ATen/ops/linalg_cross_cuda_dispatch.h>
|
371 |
+
#include <ATen/ops/linalg_eig_cuda_dispatch.h>
|
372 |
+
#include <ATen/ops/linalg_eigvals_cuda_dispatch.h>
|
373 |
+
#include <ATen/ops/linalg_householder_product_cuda_dispatch.h>
|
374 |
+
#include <ATen/ops/linalg_inv_ex_cuda_dispatch.h>
|
375 |
+
#include <ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h>
|
376 |
+
#include <ATen/ops/linalg_ldl_solve_cuda_dispatch.h>
|
377 |
+
#include <ATen/ops/linalg_lstsq_cuda_dispatch.h>
|
378 |
+
#include <ATen/ops/linalg_lu_cuda_dispatch.h>
|
379 |
+
#include <ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h>
|
380 |
+
#include <ATen/ops/linalg_lu_solve_cuda_dispatch.h>
|
381 |
+
#include <ATen/ops/linalg_matrix_exp_cuda_dispatch.h>
|
382 |
+
#include <ATen/ops/linalg_qr_cuda_dispatch.h>
|
383 |
+
#include <ATen/ops/linalg_solve_triangular_cuda_dispatch.h>
|
384 |
+
#include <ATen/ops/linalg_vector_norm_cuda_dispatch.h>
|
385 |
+
#include <ATen/ops/linspace_cuda_dispatch.h>
|
386 |
+
#include <ATen/ops/log_cuda_dispatch.h>
|
387 |
+
#include <ATen/ops/log10_cuda_dispatch.h>
|
388 |
+
#include <ATen/ops/log1p_cuda_dispatch.h>
|
389 |
+
#include <ATen/ops/log2_cuda_dispatch.h>
|
390 |
+
#include <ATen/ops/log_normal_cuda_dispatch.h>
|
391 |
+
#include <ATen/ops/log_sigmoid_backward_cuda_dispatch.h>
|
392 |
+
#include <ATen/ops/log_sigmoid_forward_cuda_dispatch.h>
|
393 |
+
#include <ATen/ops/logaddexp_cuda_dispatch.h>
|
394 |
+
#include <ATen/ops/logaddexp2_cuda_dispatch.h>
|
395 |
+
#include <ATen/ops/logical_and_cuda_dispatch.h>
|
396 |
+
#include <ATen/ops/logical_not_cuda_dispatch.h>
|
397 |
+
#include <ATen/ops/logical_or_cuda_dispatch.h>
|
398 |
+
#include <ATen/ops/logical_xor_cuda_dispatch.h>
|
399 |
+
#include <ATen/ops/logit_cuda_dispatch.h>
|
400 |
+
#include <ATen/ops/logit_backward_cuda_dispatch.h>
|
401 |
+
#include <ATen/ops/logspace_cuda_dispatch.h>
|
402 |
+
#include <ATen/ops/lshift_cuda_dispatch.h>
|
403 |
+
#include <ATen/ops/lt_cuda_dispatch.h>
|
404 |
+
#include <ATen/ops/lu_unpack_cuda_dispatch.h>
|
405 |
+
#include <ATen/ops/masked_fill_cuda_dispatch.h>
|
406 |
+
#include <ATen/ops/masked_scatter_cuda_dispatch.h>
|
407 |
+
#include <ATen/ops/masked_select_cuda_dispatch.h>
|
408 |
+
#include <ATen/ops/max_cuda_dispatch.h>
|
409 |
+
#include <ATen/ops/max_pool2d_with_indices_cuda_dispatch.h>
|
410 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h>
|
411 |
+
#include <ATen/ops/max_pool3d_with_indices_cuda_dispatch.h>
|
412 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h>
|
413 |
+
#include <ATen/ops/max_unpool2d_cuda_dispatch.h>
|
414 |
+
#include <ATen/ops/max_unpool3d_cuda_dispatch.h>
|
415 |
+
#include <ATen/ops/maximum_cuda_dispatch.h>
|
416 |
+
#include <ATen/ops/mean_cuda_dispatch.h>
|
417 |
+
#include <ATen/ops/median_cuda_dispatch.h>
|
418 |
+
#include <ATen/ops/min_cuda_dispatch.h>
|
419 |
+
#include <ATen/ops/minimum_cuda_dispatch.h>
|
420 |
+
#include <ATen/ops/miopen_batch_norm_cuda_dispatch.h>
|
421 |
+
#include <ATen/ops/miopen_batch_norm_backward_cuda_dispatch.h>
|
422 |
+
#include <ATen/ops/miopen_convolution_cuda_dispatch.h>
|
423 |
+
#include <ATen/ops/miopen_convolution_add_relu_cuda_dispatch.h>
|
424 |
+
#include <ATen/ops/miopen_convolution_relu_cuda_dispatch.h>
|
425 |
+
#include <ATen/ops/miopen_convolution_transpose_cuda_dispatch.h>
|
426 |
+
#include <ATen/ops/miopen_depthwise_convolution_cuda_dispatch.h>
|
427 |
+
#include <ATen/ops/miopen_rnn_cuda_dispatch.h>
|
428 |
+
#include <ATen/ops/miopen_rnn_backward_cuda_dispatch.h>
|
429 |
+
#include <ATen/ops/mish_cuda_dispatch.h>
|
430 |
+
#include <ATen/ops/mish_backward_cuda_dispatch.h>
|
431 |
+
#include <ATen/ops/mm_cuda_dispatch.h>
|
432 |
+
#include <ATen/ops/mode_cuda_dispatch.h>
|
433 |
+
#include <ATen/ops/mse_loss_cuda_dispatch.h>
|
434 |
+
#include <ATen/ops/mse_loss_backward_cuda_dispatch.h>
|
435 |
+
#include <ATen/ops/mul_cuda_dispatch.h>
|
436 |
+
#include <ATen/ops/multi_margin_loss_cuda_dispatch.h>
|
437 |
+
#include <ATen/ops/multi_margin_loss_backward_cuda_dispatch.h>
|
438 |
+
#include <ATen/ops/multilabel_margin_loss_backward_cuda_dispatch.h>
|
439 |
+
#include <ATen/ops/multilabel_margin_loss_forward_cuda_dispatch.h>
|
440 |
+
#include <ATen/ops/multinomial_cuda_dispatch.h>
|
441 |
+
#include <ATen/ops/mvlgamma_cuda_dispatch.h>
|
442 |
+
#include <ATen/ops/nan_to_num_cuda_dispatch.h>
|
443 |
+
#include <ATen/ops/nanmedian_cuda_dispatch.h>
|
444 |
+
#include <ATen/ops/nansum_cuda_dispatch.h>
|
445 |
+
#include <ATen/ops/native_batch_norm_cuda_dispatch.h>
|
446 |
+
#include <ATen/ops/native_batch_norm_backward_cuda_dispatch.h>
|
447 |
+
#include <ATen/ops/native_dropout_cuda_dispatch.h>
|
448 |
+
#include <ATen/ops/native_dropout_backward_cuda_dispatch.h>
|
449 |
+
#include <ATen/ops/native_group_norm_cuda_dispatch.h>
|
450 |
+
#include <ATen/ops/native_group_norm_backward_cuda_dispatch.h>
|
451 |
+
#include <ATen/ops/native_layer_norm_cuda_dispatch.h>
|
452 |
+
#include <ATen/ops/native_layer_norm_backward_cuda_dispatch.h>
|
453 |
+
#include <ATen/ops/ne_cuda_dispatch.h>
|
454 |
+
#include <ATen/ops/neg_cuda_dispatch.h>
|
455 |
+
#include <ATen/ops/nextafter_cuda_dispatch.h>
|
456 |
+
#include <ATen/ops/nll_loss2d_backward_cuda_dispatch.h>
|
457 |
+
#include <ATen/ops/nll_loss2d_forward_cuda_dispatch.h>
|
458 |
+
#include <ATen/ops/nll_loss_backward_cuda_dispatch.h>
|
459 |
+
#include <ATen/ops/nll_loss_forward_cuda_dispatch.h>
|
460 |
+
#include <ATen/ops/nonzero_cuda_dispatch.h>
|
461 |
+
#include <ATen/ops/norm_cuda_dispatch.h>
|
462 |
+
#include <ATen/ops/normal_cuda_dispatch.h>
|
463 |
+
#include <ATen/ops/ormqr_cuda_dispatch.h>
|
464 |
+
#include <ATen/ops/poisson_cuda_dispatch.h>
|
465 |
+
#include <ATen/ops/polar_cuda_dispatch.h>
|
466 |
+
#include <ATen/ops/polygamma_cuda_dispatch.h>
|
467 |
+
#include <ATen/ops/pow_cuda_dispatch.h>
|
468 |
+
#include <ATen/ops/prod_cuda_dispatch.h>
|
469 |
+
#include <ATen/ops/put_cuda_dispatch.h>
|
470 |
+
#include <ATen/ops/quantize_per_channel_cuda_dispatch.h>
|
471 |
+
#include <ATen/ops/quantize_per_tensor_cuda_dispatch.h>
|
472 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_cuda_dispatch.h>
|
473 |
+
#include <ATen/ops/random_cuda_dispatch.h>
|
474 |
+
#include <ATen/ops/randperm_cuda_dispatch.h>
|
475 |
+
#include <ATen/ops/range_cuda_dispatch.h>
|
476 |
+
#include <ATen/ops/reciprocal_cuda_dispatch.h>
|
477 |
+
#include <ATen/ops/record_stream_cuda_dispatch.h>
|
478 |
+
#include <ATen/ops/reflection_pad1d_cuda_dispatch.h>
|
479 |
+
#include <ATen/ops/reflection_pad1d_backward_cuda_dispatch.h>
|
480 |
+
#include <ATen/ops/reflection_pad2d_cuda_dispatch.h>
|
481 |
+
#include <ATen/ops/reflection_pad2d_backward_cuda_dispatch.h>
|
482 |
+
#include <ATen/ops/reflection_pad3d_cuda_dispatch.h>
|
483 |
+
#include <ATen/ops/reflection_pad3d_backward_cuda_dispatch.h>
|
484 |
+
#include <ATen/ops/relu_cuda_dispatch.h>
|
485 |
+
#include <ATen/ops/remainder_cuda_dispatch.h>
|
486 |
+
#include <ATen/ops/renorm_cuda_dispatch.h>
|
487 |
+
#include <ATen/ops/repeat_interleave_cuda_dispatch.h>
|
488 |
+
#include <ATen/ops/replication_pad1d_cuda_dispatch.h>
|
489 |
+
#include <ATen/ops/replication_pad1d_backward_cuda_dispatch.h>
|
490 |
+
#include <ATen/ops/replication_pad2d_cuda_dispatch.h>
|
491 |
+
#include <ATen/ops/replication_pad2d_backward_cuda_dispatch.h>
|
492 |
+
#include <ATen/ops/replication_pad3d_cuda_dispatch.h>
|
493 |
+
#include <ATen/ops/replication_pad3d_backward_cuda_dispatch.h>
|
494 |
+
#include <ATen/ops/resize_cuda_dispatch.h>
|
495 |
+
#include <ATen/ops/roll_cuda_dispatch.h>
|
496 |
+
#include <ATen/ops/round_cuda_dispatch.h>
|
497 |
+
#include <ATen/ops/rrelu_with_noise_cuda_dispatch.h>
|
498 |
+
#include <ATen/ops/rshift_cuda_dispatch.h>
|
499 |
+
#include <ATen/ops/rsqrt_cuda_dispatch.h>
|
500 |
+
#include <ATen/ops/rsub_cuda_dispatch.h>
|
501 |
+
#include <ATen/ops/scatter_cuda_dispatch.h>
|
502 |
+
#include <ATen/ops/scatter_add_cuda_dispatch.h>
|
503 |
+
#include <ATen/ops/scatter_reduce_cuda_dispatch.h>
|
504 |
+
#include <ATen/ops/searchsorted_cuda_dispatch.h>
|
505 |
+
#include <ATen/ops/segment_reduce_cuda_dispatch.h>
|
506 |
+
#include <ATen/ops/set_cuda_dispatch.h>
|
507 |
+
#include <ATen/ops/sgn_cuda_dispatch.h>
|
508 |
+
#include <ATen/ops/sigmoid_cuda_dispatch.h>
|
509 |
+
#include <ATen/ops/sigmoid_backward_cuda_dispatch.h>
|
510 |
+
#include <ATen/ops/sign_cuda_dispatch.h>
|
511 |
+
#include <ATen/ops/signbit_cuda_dispatch.h>
|
512 |
+
#include <ATen/ops/silu_cuda_dispatch.h>
|
513 |
+
#include <ATen/ops/silu_backward_cuda_dispatch.h>
|
514 |
+
#include <ATen/ops/sin_cuda_dispatch.h>
|
515 |
+
#include <ATen/ops/sinc_cuda_dispatch.h>
|
516 |
+
#include <ATen/ops/sinh_cuda_dispatch.h>
|
517 |
+
#include <ATen/ops/slow_conv_dilated2d_cuda_dispatch.h>
|
518 |
+
#include <ATen/ops/slow_conv_dilated3d_cuda_dispatch.h>
|
519 |
+
#include <ATen/ops/slow_conv_transpose2d_cuda_dispatch.h>
|
520 |
+
#include <ATen/ops/slow_conv_transpose3d_cuda_dispatch.h>
|
521 |
+
#include <ATen/ops/smooth_l1_loss_cuda_dispatch.h>
|
522 |
+
#include <ATen/ops/smooth_l1_loss_backward_cuda_dispatch.h>
|
523 |
+
#include <ATen/ops/softplus_cuda_dispatch.h>
|
524 |
+
#include <ATen/ops/softplus_backward_cuda_dispatch.h>
|
525 |
+
#include <ATen/ops/softshrink_cuda_dispatch.h>
|
526 |
+
#include <ATen/ops/softshrink_backward_cuda_dispatch.h>
|
527 |
+
#include <ATen/ops/sort_cuda_dispatch.h>
|
528 |
+
#include <ATen/ops/sparse_dim_cuda_dispatch.h>
|
529 |
+
#include <ATen/ops/special_airy_ai_cuda_dispatch.h>
|
530 |
+
#include <ATen/ops/special_bessel_j0_cuda_dispatch.h>
|
531 |
+
#include <ATen/ops/special_bessel_j1_cuda_dispatch.h>
|
532 |
+
#include <ATen/ops/special_bessel_y0_cuda_dispatch.h>
|
533 |
+
#include <ATen/ops/special_bessel_y1_cuda_dispatch.h>
|
534 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h>
|
535 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_cuda_dispatch.h>
|
536 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_cuda_dispatch.h>
|
537 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h>
|
538 |
+
#include <ATen/ops/special_entr_cuda_dispatch.h>
|
539 |
+
#include <ATen/ops/special_erfcx_cuda_dispatch.h>
|
540 |
+
#include <ATen/ops/special_hermite_polynomial_h_cuda_dispatch.h>
|
541 |
+
#include <ATen/ops/special_hermite_polynomial_he_cuda_dispatch.h>
|
542 |
+
#include <ATen/ops/special_i0e_cuda_dispatch.h>
|
543 |
+
#include <ATen/ops/special_i1_cuda_dispatch.h>
|
544 |
+
#include <ATen/ops/special_i1e_cuda_dispatch.h>
|
545 |
+
#include <ATen/ops/special_laguerre_polynomial_l_cuda_dispatch.h>
|
546 |
+
#include <ATen/ops/special_legendre_polynomial_p_cuda_dispatch.h>
|
547 |
+
#include <ATen/ops/special_log_ndtr_cuda_dispatch.h>
|
548 |
+
#include <ATen/ops/special_modified_bessel_i0_cuda_dispatch.h>
|
549 |
+
#include <ATen/ops/special_modified_bessel_i1_cuda_dispatch.h>
|
550 |
+
#include <ATen/ops/special_modified_bessel_k0_cuda_dispatch.h>
|
551 |
+
#include <ATen/ops/special_modified_bessel_k1_cuda_dispatch.h>
|
552 |
+
#include <ATen/ops/special_ndtri_cuda_dispatch.h>
|
553 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_cuda_dispatch.h>
|
554 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_cuda_dispatch.h>
|
555 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_cuda_dispatch.h>
|
556 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_cuda_dispatch.h>
|
557 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_cuda_dispatch.h>
|
558 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h>
|
559 |
+
#include <ATen/ops/special_spherical_bessel_j0_cuda_dispatch.h>
|
560 |
+
#include <ATen/ops/special_xlog1py_cuda_dispatch.h>
|
561 |
+
#include <ATen/ops/special_zeta_cuda_dispatch.h>
|
562 |
+
#include <ATen/ops/split_with_sizes_copy_cuda_dispatch.h>
|
563 |
+
#include <ATen/ops/sqrt_cuda_dispatch.h>
|
564 |
+
#include <ATen/ops/sspaddmm_cuda_dispatch.h>
|
565 |
+
#include <ATen/ops/std_cuda_dispatch.h>
|
566 |
+
#include <ATen/ops/std_mean_cuda_dispatch.h>
|
567 |
+
#include <ATen/ops/sub_cuda_dispatch.h>
|
568 |
+
#include <ATen/ops/sum_cuda_dispatch.h>
|
569 |
+
#include <ATen/ops/take_cuda_dispatch.h>
|
570 |
+
#include <ATen/ops/tan_cuda_dispatch.h>
|
571 |
+
#include <ATen/ops/tanh_cuda_dispatch.h>
|
572 |
+
#include <ATen/ops/tanh_backward_cuda_dispatch.h>
|
573 |
+
#include <ATen/ops/threshold_cuda_dispatch.h>
|
574 |
+
#include <ATen/ops/threshold_backward_cuda_dispatch.h>
|
575 |
+
#include <ATen/ops/topk_cuda_dispatch.h>
|
576 |
+
#include <ATen/ops/trace_cuda_dispatch.h>
|
577 |
+
#include <ATen/ops/triangular_solve_cuda_dispatch.h>
|
578 |
+
#include <ATen/ops/tril_cuda_dispatch.h>
|
579 |
+
#include <ATen/ops/tril_indices_cuda_dispatch.h>
|
580 |
+
#include <ATen/ops/triu_cuda_dispatch.h>
|
581 |
+
#include <ATen/ops/triu_indices_cuda_dispatch.h>
|
582 |
+
#include <ATen/ops/trunc_cuda_dispatch.h>
|
583 |
+
#include <ATen/ops/unfold_cuda_dispatch.h>
|
584 |
+
#include <ATen/ops/unfold_backward_cuda_dispatch.h>
|
585 |
+
#include <ATen/ops/uniform_cuda_dispatch.h>
|
586 |
+
#include <ATen/ops/unique_consecutive_cuda_dispatch.h>
|
587 |
+
#include <ATen/ops/unique_dim_cuda_dispatch.h>
|
588 |
+
#include <ATen/ops/unique_dim_consecutive_cuda_dispatch.h>
|
589 |
+
#include <ATen/ops/upsample_bicubic2d_cuda_dispatch.h>
|
590 |
+
#include <ATen/ops/upsample_bicubic2d_backward_cuda_dispatch.h>
|
591 |
+
#include <ATen/ops/upsample_bilinear2d_cuda_dispatch.h>
|
592 |
+
#include <ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h>
|
593 |
+
#include <ATen/ops/upsample_linear1d_cuda_dispatch.h>
|
594 |
+
#include <ATen/ops/upsample_linear1d_backward_cuda_dispatch.h>
|
595 |
+
#include <ATen/ops/upsample_nearest1d_cuda_dispatch.h>
|
596 |
+
#include <ATen/ops/upsample_nearest1d_backward_cuda_dispatch.h>
|
597 |
+
#include <ATen/ops/upsample_nearest2d_cuda_dispatch.h>
|
598 |
+
#include <ATen/ops/upsample_nearest2d_backward_cuda_dispatch.h>
|
599 |
+
#include <ATen/ops/upsample_nearest3d_cuda_dispatch.h>
|
600 |
+
#include <ATen/ops/upsample_nearest3d_backward_cuda_dispatch.h>
|
601 |
+
#include <ATen/ops/upsample_trilinear3d_cuda_dispatch.h>
|
602 |
+
#include <ATen/ops/upsample_trilinear3d_backward_cuda_dispatch.h>
|
603 |
+
#include <ATen/ops/var_cuda_dispatch.h>
|
604 |
+
#include <ATen/ops/var_mean_cuda_dispatch.h>
|
605 |
+
#include <ATen/ops/vdot_cuda_dispatch.h>
|
606 |
+
#include <ATen/ops/view_cuda_dispatch.h>
|
607 |
+
#include <ATen/ops/view_as_complex_cuda_dispatch.h>
|
608 |
+
#include <ATen/ops/view_as_real_cuda_dispatch.h>
|
609 |
+
#include <ATen/ops/where_cuda_dispatch.h>
|
610 |
+
#include <ATen/ops/xlogy_cuda_dispatch.h>
|
611 |
+
#include <ATen/ops/zero_cuda_dispatch.h>
|
612 |
+
|
613 |
+
|
614 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/CachedTensorUtils.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ATen.h>
|
4 |
+
|
5 |
+
namespace at::caching {
|
6 |
+
|
7 |
+
// Some systems (just cudagraphs currently) will persist a static tensor output
|
8 |
+
// whose TensorImpl does not change across iterations. For these tensors caching
|
9 |
+
// dtype conversions is invalid. Additionally, there will be an extra reference
|
10 |
+
// count to these cached tensors that would prevent buffer inplacing and other
|
11 |
+
// checks on tensor uniqueness. If we are not using these systems the enabled
|
12 |
+
// flag will be false and we will avoid the hash lookup.
|
13 |
+
|
14 |
+
TORCH_API bool is_cached_tensor(const at::Tensor& t);
|
15 |
+
TORCH_API void add_cached_tensor(const at::Tensor& t);
|
16 |
+
TORCH_API void remove_cached_tensor(const at::Tensor& t);
|
17 |
+
TORCH_API void set_cached_tensors_enabled(bool enable);
|
18 |
+
|
19 |
+
// For gradient buffer stealing we will adjust the use count of tensors
|
20 |
+
// which are persisted by cudagraphs, just as we need to adjust reference
|
21 |
+
// count of tensors with hooks.
|
22 |
+
TORCH_API size_t adjusted_use_count(const at::Tensor& t);
|
23 |
+
|
24 |
+
} // namespace at::caching
|
venv/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <c10/util/Exception.h>
|
2 |
+
#include <utility>
|
3 |
+
|
4 |
+
namespace at {
|
5 |
+
|
6 |
+
/*
|
7 |
+
[collapse dims] Updates sizes, and strides to reflect a "collapse" of
|
8 |
+
the info, possibly excluding the optional excludeDim. A "collapsed" version
|
9 |
+
of the info is the fewest dims that order the tensor's elements in the same
|
10 |
+
way as the original info. If excludeDim is specified, the collapse is the
|
11 |
+
fewest dims that order the tensor's elements as the original and preserve the
|
12 |
+
excluded dimension, unless the tensor collapses to a point.
|
13 |
+
|
14 |
+
This function returns a pair of values.
|
15 |
+
|
16 |
+
1) The (new) index of the preserved dimension if excludeDim is
|
17 |
+
specified. 0 if the tensor is collapsed to a point. -1
|
18 |
+
otherwise.
|
19 |
+
|
20 |
+
2) The new number of dimensions.
|
21 |
+
*/
|
22 |
+
template <typename T>
|
23 |
+
inline std::pair<int64_t, int64_t> collapse_dims(
|
24 |
+
T* sizes,
|
25 |
+
T* strides,
|
26 |
+
int64_t dims,
|
27 |
+
const int excludeDim = -1) {
|
28 |
+
TORCH_CHECK(
|
29 |
+
excludeDim >= -1 && excludeDim < dims,
|
30 |
+
"expected excluded dim between -1 and dims - 1");
|
31 |
+
|
32 |
+
int64_t stopDim = (excludeDim == -1) ? dims : excludeDim;
|
33 |
+
int64_t newIndex = -1;
|
34 |
+
int64_t oldIndex = 0;
|
35 |
+
int64_t remappedExcludedDim = -1;
|
36 |
+
|
37 |
+
while (oldIndex < dims) {
|
38 |
+
// Finds a dimension to collapse into
|
39 |
+
for (; oldIndex < stopDim; ++oldIndex) {
|
40 |
+
if (sizes[oldIndex] == 1) {
|
41 |
+
continue;
|
42 |
+
}
|
43 |
+
|
44 |
+
++newIndex;
|
45 |
+
sizes[newIndex] = sizes[oldIndex];
|
46 |
+
strides[newIndex] = strides[oldIndex];
|
47 |
+
++oldIndex;
|
48 |
+
break;
|
49 |
+
}
|
50 |
+
|
51 |
+
// Collapses dims
|
52 |
+
for (; oldIndex < stopDim; ++oldIndex) {
|
53 |
+
if (sizes[oldIndex] == 1) {
|
54 |
+
continue;
|
55 |
+
}
|
56 |
+
|
57 |
+
if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
|
58 |
+
sizes[newIndex] *= sizes[oldIndex];
|
59 |
+
strides[newIndex] = strides[oldIndex];
|
60 |
+
} else {
|
61 |
+
++newIndex;
|
62 |
+
sizes[newIndex] = sizes[oldIndex];
|
63 |
+
strides[newIndex] = strides[oldIndex];
|
64 |
+
}
|
65 |
+
}
|
66 |
+
|
67 |
+
// Handles excludeDim being set (oldIndex == excludeDim)
|
68 |
+
if (oldIndex != dims) {
|
69 |
+
// Preserves excluded dimension
|
70 |
+
++newIndex;
|
71 |
+
sizes[newIndex] = sizes[oldIndex];
|
72 |
+
strides[newIndex] = strides[oldIndex];
|
73 |
+
remappedExcludedDim = newIndex;
|
74 |
+
|
75 |
+
// Restarts iteration after excludeDim
|
76 |
+
++oldIndex;
|
77 |
+
stopDim = dims;
|
78 |
+
}
|
79 |
+
}
|
80 |
+
|
81 |
+
// Handles special case of all dims size 1
|
82 |
+
if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) {
|
83 |
+
dims = 1;
|
84 |
+
sizes[0] = 1;
|
85 |
+
strides[0] = 1;
|
86 |
+
|
87 |
+
return std::pair<int64_t, int64_t>(0, 1);
|
88 |
+
}
|
89 |
+
|
90 |
+
dims = newIndex + 1;
|
91 |
+
return std::pair<int64_t, int64_t>(remappedExcludedDim, dims);
|
92 |
+
}
|
93 |
+
|
94 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/CompositeExplicitAutogradFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/CompositeImplicitAutogradFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h
ADDED
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_compositeimplicitautograd_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h>
|
20 |
+
#include <ATen/ops/_assert_tensor_metadata_compositeimplicitautograd_dispatch.h>
|
21 |
+
#include <ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h>
|
22 |
+
#include <ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h>
|
23 |
+
#include <ATen/ops/_backward_compositeimplicitautograd_dispatch.h>
|
24 |
+
#include <ATen/ops/_batch_norm_impl_index_compositeimplicitautograd_dispatch.h>
|
25 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h>
|
26 |
+
#include <ATen/ops/_cast_Byte_compositeimplicitautograd_dispatch.h>
|
27 |
+
#include <ATen/ops/_cast_Char_compositeimplicitautograd_dispatch.h>
|
28 |
+
#include <ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h>
|
29 |
+
#include <ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h>
|
30 |
+
#include <ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h>
|
31 |
+
#include <ATen/ops/_cast_Int_compositeimplicitautograd_dispatch.h>
|
32 |
+
#include <ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h>
|
33 |
+
#include <ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h>
|
34 |
+
#include <ATen/ops/_choose_qparams_per_tensor_compositeimplicitautograd_dispatch.h>
|
35 |
+
#include <ATen/ops/_convolution_compositeimplicitautograd_dispatch.h>
|
36 |
+
#include <ATen/ops/_convolution_double_backward_compositeimplicitautograd_dispatch.h>
|
37 |
+
#include <ATen/ops/_convolution_mode_compositeimplicitautograd_dispatch.h>
|
38 |
+
#include <ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h>
|
39 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
|
40 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h>
|
41 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
|
42 |
+
#include <ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h>
|
43 |
+
#include <ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h>
|
44 |
+
#include <ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h>
|
45 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_compositeimplicitautograd_dispatch.h>
|
46 |
+
#include <ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h>
|
47 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h>
|
48 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_compositeimplicitautograd_dispatch.h>
|
49 |
+
#include <ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h>
|
50 |
+
#include <ATen/ops/_lu_with_info_compositeimplicitautograd_dispatch.h>
|
51 |
+
#include <ATen/ops/_nnpack_available_compositeimplicitautograd_dispatch.h>
|
52 |
+
#include <ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h>
|
53 |
+
#include <ATen/ops/_pad_circular_compositeimplicitautograd_dispatch.h>
|
54 |
+
#include <ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h>
|
55 |
+
#include <ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h>
|
56 |
+
#include <ATen/ops/_propagate_xla_data_compositeimplicitautograd_dispatch.h>
|
57 |
+
#include <ATen/ops/_remove_batch_dim_compositeimplicitautograd_dispatch.h>
|
58 |
+
#include <ATen/ops/_reshape_from_tensor_compositeimplicitautograd_dispatch.h>
|
59 |
+
#include <ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h>
|
60 |
+
#include <ATen/ops/_saturate_weight_to_fp16_compositeimplicitautograd_dispatch.h>
|
61 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h>
|
62 |
+
#include <ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h>
|
63 |
+
#include <ATen/ops/_sobol_engine_draw_compositeimplicitautograd_dispatch.h>
|
64 |
+
#include <ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h>
|
65 |
+
#include <ATen/ops/_sobol_engine_initialize_state_compositeimplicitautograd_dispatch.h>
|
66 |
+
#include <ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h>
|
67 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
68 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
69 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
70 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
71 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
72 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
|
73 |
+
#include <ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h>
|
74 |
+
#include <ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h>
|
75 |
+
#include <ATen/ops/_sparse_softmax_compositeimplicitautograd_dispatch.h>
|
76 |
+
#include <ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h>
|
77 |
+
#include <ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h>
|
78 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h>
|
79 |
+
#include <ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h>
|
80 |
+
#include <ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h>
|
81 |
+
#include <ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h>
|
82 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h>
|
83 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
|
84 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
|
85 |
+
#include <ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h>
|
86 |
+
#include <ATen/ops/_unpack_dual_compositeimplicitautograd_dispatch.h>
|
87 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h>
|
88 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h>
|
89 |
+
#include <ATen/ops/_upsample_nearest_exact1d_compositeimplicitautograd_dispatch.h>
|
90 |
+
#include <ATen/ops/_upsample_nearest_exact2d_compositeimplicitautograd_dispatch.h>
|
91 |
+
#include <ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h>
|
92 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h>
|
93 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_compositeimplicitautograd_dispatch.h>
|
94 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_compositeimplicitautograd_dispatch.h>
|
95 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h>
|
96 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h>
|
97 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_compositeimplicitautograd_dispatch.h>
|
98 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_compositeimplicitautograd_dispatch.h>
|
99 |
+
#include <ATen/ops/_version_compositeimplicitautograd_dispatch.h>
|
100 |
+
#include <ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h>
|
101 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h>
|
102 |
+
#include <ATen/ops/absolute_compositeimplicitautograd_dispatch.h>
|
103 |
+
#include <ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h>
|
104 |
+
#include <ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h>
|
105 |
+
#include <ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h>
|
106 |
+
#include <ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h>
|
107 |
+
#include <ATen/ops/adjoint_compositeimplicitautograd_dispatch.h>
|
108 |
+
#include <ATen/ops/affine_grid_generator_backward_compositeimplicitautograd_dispatch.h>
|
109 |
+
#include <ATen/ops/align_as_compositeimplicitautograd_dispatch.h>
|
110 |
+
#include <ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h>
|
111 |
+
#include <ATen/ops/align_to_compositeimplicitautograd_dispatch.h>
|
112 |
+
#include <ATen/ops/all_compositeimplicitautograd_dispatch.h>
|
113 |
+
#include <ATen/ops/alpha_dropout_compositeimplicitautograd_dispatch.h>
|
114 |
+
#include <ATen/ops/and_compositeimplicitautograd_dispatch.h>
|
115 |
+
#include <ATen/ops/any_compositeimplicitautograd_dispatch.h>
|
116 |
+
#include <ATen/ops/arccos_compositeimplicitautograd_dispatch.h>
|
117 |
+
#include <ATen/ops/arccosh_compositeimplicitautograd_dispatch.h>
|
118 |
+
#include <ATen/ops/arcsin_compositeimplicitautograd_dispatch.h>
|
119 |
+
#include <ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h>
|
120 |
+
#include <ATen/ops/arctan_compositeimplicitautograd_dispatch.h>
|
121 |
+
#include <ATen/ops/arctan2_compositeimplicitautograd_dispatch.h>
|
122 |
+
#include <ATen/ops/arctanh_compositeimplicitautograd_dispatch.h>
|
123 |
+
#include <ATen/ops/argsort_compositeimplicitautograd_dispatch.h>
|
124 |
+
#include <ATen/ops/argwhere_compositeimplicitautograd_dispatch.h>
|
125 |
+
#include <ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h>
|
126 |
+
#include <ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h>
|
127 |
+
#include <ATen/ops/atleast_3d_compositeimplicitautograd_dispatch.h>
|
128 |
+
#include <ATen/ops/avg_pool1d_compositeimplicitautograd_dispatch.h>
|
129 |
+
#include <ATen/ops/batch_norm_compositeimplicitautograd_dispatch.h>
|
130 |
+
#include <ATen/ops/bilinear_compositeimplicitautograd_dispatch.h>
|
131 |
+
#include <ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h>
|
132 |
+
#include <ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h>
|
133 |
+
#include <ATen/ops/can_cast_compositeimplicitautograd_dispatch.h>
|
134 |
+
#include <ATen/ops/cartesian_prod_compositeimplicitautograd_dispatch.h>
|
135 |
+
#include <ATen/ops/cat_compositeimplicitautograd_dispatch.h>
|
136 |
+
#include <ATen/ops/cdist_compositeimplicitautograd_dispatch.h>
|
137 |
+
#include <ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h>
|
138 |
+
#include <ATen/ops/chalf_compositeimplicitautograd_dispatch.h>
|
139 |
+
#include <ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h>
|
140 |
+
#include <ATen/ops/chunk_compositeimplicitautograd_dispatch.h>
|
141 |
+
#include <ATen/ops/clip_compositeimplicitautograd_dispatch.h>
|
142 |
+
#include <ATen/ops/coalesce_compositeimplicitautograd_dispatch.h>
|
143 |
+
#include <ATen/ops/column_stack_compositeimplicitautograd_dispatch.h>
|
144 |
+
#include <ATen/ops/combinations_compositeimplicitautograd_dispatch.h>
|
145 |
+
#include <ATen/ops/concat_compositeimplicitautograd_dispatch.h>
|
146 |
+
#include <ATen/ops/concatenate_compositeimplicitautograd_dispatch.h>
|
147 |
+
#include <ATen/ops/conj_compositeimplicitautograd_dispatch.h>
|
148 |
+
#include <ATen/ops/conj_physical_compositeimplicitautograd_dispatch.h>
|
149 |
+
#include <ATen/ops/contiguous_compositeimplicitautograd_dispatch.h>
|
150 |
+
#include <ATen/ops/conv1d_compositeimplicitautograd_dispatch.h>
|
151 |
+
#include <ATen/ops/conv2d_compositeimplicitautograd_dispatch.h>
|
152 |
+
#include <ATen/ops/conv3d_compositeimplicitautograd_dispatch.h>
|
153 |
+
#include <ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h>
|
154 |
+
#include <ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h>
|
155 |
+
#include <ATen/ops/conv_transpose2d_compositeimplicitautograd_dispatch.h>
|
156 |
+
#include <ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h>
|
157 |
+
#include <ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h>
|
158 |
+
#include <ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h>
|
159 |
+
#include <ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h>
|
160 |
+
#include <ATen/ops/cov_compositeimplicitautograd_dispatch.h>
|
161 |
+
#include <ATen/ops/cross_compositeimplicitautograd_dispatch.h>
|
162 |
+
#include <ATen/ops/cross_entropy_loss_compositeimplicitautograd_dispatch.h>
|
163 |
+
#include <ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h>
|
164 |
+
#include <ATen/ops/cudnn_is_acceptable_compositeimplicitautograd_dispatch.h>
|
165 |
+
#include <ATen/ops/cummax_compositeimplicitautograd_dispatch.h>
|
166 |
+
#include <ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h>
|
167 |
+
#include <ATen/ops/cummin_compositeimplicitautograd_dispatch.h>
|
168 |
+
#include <ATen/ops/cumprod_compositeimplicitautograd_dispatch.h>
|
169 |
+
#include <ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h>
|
170 |
+
#include <ATen/ops/cumsum_compositeimplicitautograd_dispatch.h>
|
171 |
+
#include <ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h>
|
172 |
+
#include <ATen/ops/data_compositeimplicitautograd_dispatch.h>
|
173 |
+
#include <ATen/ops/det_compositeimplicitautograd_dispatch.h>
|
174 |
+
#include <ATen/ops/diag_compositeimplicitautograd_dispatch.h>
|
175 |
+
#include <ATen/ops/diagflat_compositeimplicitautograd_dispatch.h>
|
176 |
+
#include <ATen/ops/diagonal_compositeimplicitautograd_dispatch.h>
|
177 |
+
#include <ATen/ops/diff_compositeimplicitautograd_dispatch.h>
|
178 |
+
#include <ATen/ops/divide_compositeimplicitautograd_dispatch.h>
|
179 |
+
#include <ATen/ops/dropout_compositeimplicitautograd_dispatch.h>
|
180 |
+
#include <ATen/ops/dsplit_compositeimplicitautograd_dispatch.h>
|
181 |
+
#include <ATen/ops/dstack_compositeimplicitautograd_dispatch.h>
|
182 |
+
#include <ATen/ops/einsum_compositeimplicitautograd_dispatch.h>
|
183 |
+
#include <ATen/ops/embedding_backward_compositeimplicitautograd_dispatch.h>
|
184 |
+
#include <ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h>
|
185 |
+
#include <ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h>
|
186 |
+
#include <ATen/ops/empty_compositeimplicitautograd_dispatch.h>
|
187 |
+
#include <ATen/ops/expand_as_compositeimplicitautograd_dispatch.h>
|
188 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h>
|
189 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
|
190 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h>
|
191 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
|
192 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_compositeimplicitautograd_dispatch.h>
|
193 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
|
194 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h>
|
195 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
|
196 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h>
|
197 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_compositeimplicitautograd_dispatch.h>
|
198 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h>
|
199 |
+
#include <ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h>
|
200 |
+
#include <ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h>
|
201 |
+
#include <ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h>
|
202 |
+
#include <ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h>
|
203 |
+
#include <ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h>
|
204 |
+
#include <ATen/ops/fft_fftshift_compositeimplicitautograd_dispatch.h>
|
205 |
+
#include <ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h>
|
206 |
+
#include <ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h>
|
207 |
+
#include <ATen/ops/fft_hfftn_compositeimplicitautograd_dispatch.h>
|
208 |
+
#include <ATen/ops/fft_ifft_compositeimplicitautograd_dispatch.h>
|
209 |
+
#include <ATen/ops/fft_ifft2_compositeimplicitautograd_dispatch.h>
|
210 |
+
#include <ATen/ops/fft_ifftn_compositeimplicitautograd_dispatch.h>
|
211 |
+
#include <ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h>
|
212 |
+
#include <ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h>
|
213 |
+
#include <ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h>
|
214 |
+
#include <ATen/ops/fft_ihfftn_compositeimplicitautograd_dispatch.h>
|
215 |
+
#include <ATen/ops/fft_irfft_compositeimplicitautograd_dispatch.h>
|
216 |
+
#include <ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h>
|
217 |
+
#include <ATen/ops/fft_irfftn_compositeimplicitautograd_dispatch.h>
|
218 |
+
#include <ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h>
|
219 |
+
#include <ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h>
|
220 |
+
#include <ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h>
|
221 |
+
#include <ATen/ops/fill_diagonal_compositeimplicitautograd_dispatch.h>
|
222 |
+
#include <ATen/ops/fix_compositeimplicitautograd_dispatch.h>
|
223 |
+
#include <ATen/ops/flatten_compositeimplicitautograd_dispatch.h>
|
224 |
+
#include <ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h>
|
225 |
+
#include <ATen/ops/fliplr_compositeimplicitautograd_dispatch.h>
|
226 |
+
#include <ATen/ops/flipud_compositeimplicitautograd_dispatch.h>
|
227 |
+
#include <ATen/ops/float_power_compositeimplicitautograd_dispatch.h>
|
228 |
+
#include <ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h>
|
229 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h>
|
230 |
+
#include <ATen/ops/gather_compositeimplicitautograd_dispatch.h>
|
231 |
+
#include <ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h>
|
232 |
+
#include <ATen/ops/ger_compositeimplicitautograd_dispatch.h>
|
233 |
+
#include <ATen/ops/gradient_compositeimplicitautograd_dispatch.h>
|
234 |
+
#include <ATen/ops/greater_compositeimplicitautograd_dispatch.h>
|
235 |
+
#include <ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h>
|
236 |
+
#include <ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h>
|
237 |
+
#include <ATen/ops/group_norm_compositeimplicitautograd_dispatch.h>
|
238 |
+
#include <ATen/ops/gru_compositeimplicitautograd_dispatch.h>
|
239 |
+
#include <ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h>
|
240 |
+
#include <ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h>
|
241 |
+
#include <ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h>
|
242 |
+
#include <ATen/ops/hsplit_compositeimplicitautograd_dispatch.h>
|
243 |
+
#include <ATen/ops/hstack_compositeimplicitautograd_dispatch.h>
|
244 |
+
#include <ATen/ops/imag_compositeimplicitautograd_dispatch.h>
|
245 |
+
#include <ATen/ops/index_add_compositeimplicitautograd_dispatch.h>
|
246 |
+
#include <ATen/ops/index_copy_compositeimplicitautograd_dispatch.h>
|
247 |
+
#include <ATen/ops/index_fill_compositeimplicitautograd_dispatch.h>
|
248 |
+
#include <ATen/ops/index_select_compositeimplicitautograd_dispatch.h>
|
249 |
+
#include <ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h>
|
250 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h>
|
251 |
+
#include <ATen/ops/inner_compositeimplicitautograd_dispatch.h>
|
252 |
+
#include <ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h>
|
253 |
+
#include <ATen/ops/inverse_compositeimplicitautograd_dispatch.h>
|
254 |
+
#include <ATen/ops/is_complex_compositeimplicitautograd_dispatch.h>
|
255 |
+
#include <ATen/ops/is_conj_compositeimplicitautograd_dispatch.h>
|
256 |
+
#include <ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h>
|
257 |
+
#include <ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h>
|
258 |
+
#include <ATen/ops/is_inference_compositeimplicitautograd_dispatch.h>
|
259 |
+
#include <ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h>
|
260 |
+
#include <ATen/ops/is_neg_compositeimplicitautograd_dispatch.h>
|
261 |
+
#include <ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h>
|
262 |
+
#include <ATen/ops/is_signed_compositeimplicitautograd_dispatch.h>
|
263 |
+
#include <ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h>
|
264 |
+
#include <ATen/ops/isclose_compositeimplicitautograd_dispatch.h>
|
265 |
+
#include <ATen/ops/isfinite_compositeimplicitautograd_dispatch.h>
|
266 |
+
#include <ATen/ops/isreal_compositeimplicitautograd_dispatch.h>
|
267 |
+
#include <ATen/ops/istft_compositeimplicitautograd_dispatch.h>
|
268 |
+
#include <ATen/ops/item_compositeimplicitautograd_dispatch.h>
|
269 |
+
#include <ATen/ops/kl_div_compositeimplicitautograd_dispatch.h>
|
270 |
+
#include <ATen/ops/kron_compositeimplicitautograd_dispatch.h>
|
271 |
+
#include <ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h>
|
272 |
+
#include <ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h>
|
273 |
+
#include <ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h>
|
274 |
+
#include <ATen/ops/ldexp_compositeimplicitautograd_dispatch.h>
|
275 |
+
#include <ATen/ops/less_compositeimplicitautograd_dispatch.h>
|
276 |
+
#include <ATen/ops/less_equal_compositeimplicitautograd_dispatch.h>
|
277 |
+
#include <ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h>
|
278 |
+
#include <ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h>
|
279 |
+
#include <ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h>
|
280 |
+
#include <ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h>
|
281 |
+
#include <ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h>
|
282 |
+
#include <ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h>
|
283 |
+
#include <ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h>
|
284 |
+
#include <ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h>
|
285 |
+
#include <ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h>
|
286 |
+
#include <ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h>
|
287 |
+
#include <ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h>
|
288 |
+
#include <ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h>
|
289 |
+
#include <ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h>
|
290 |
+
#include <ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h>
|
291 |
+
#include <ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h>
|
292 |
+
#include <ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h>
|
293 |
+
#include <ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h>
|
294 |
+
#include <ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h>
|
295 |
+
#include <ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h>
|
296 |
+
#include <ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h>
|
297 |
+
#include <ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h>
|
298 |
+
#include <ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h>
|
299 |
+
#include <ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h>
|
300 |
+
#include <ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h>
|
301 |
+
#include <ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h>
|
302 |
+
#include <ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h>
|
303 |
+
#include <ATen/ops/linear_compositeimplicitautograd_dispatch.h>
|
304 |
+
#include <ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h>
|
305 |
+
#include <ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h>
|
306 |
+
#include <ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h>
|
307 |
+
#include <ATen/ops/logdet_compositeimplicitautograd_dispatch.h>
|
308 |
+
#include <ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h>
|
309 |
+
#include <ATen/ops/lstm_compositeimplicitautograd_dispatch.h>
|
310 |
+
#include <ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h>
|
311 |
+
#include <ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h>
|
312 |
+
#include <ATen/ops/mH_compositeimplicitautograd_dispatch.h>
|
313 |
+
#include <ATen/ops/mT_compositeimplicitautograd_dispatch.h>
|
314 |
+
#include <ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h>
|
315 |
+
#include <ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h>
|
316 |
+
#include <ATen/ops/matmul_compositeimplicitautograd_dispatch.h>
|
317 |
+
#include <ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h>
|
318 |
+
#include <ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h>
|
319 |
+
#include <ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h>
|
320 |
+
#include <ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h>
|
321 |
+
#include <ATen/ops/max_compositeimplicitautograd_dispatch.h>
|
322 |
+
#include <ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h>
|
323 |
+
#include <ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h>
|
324 |
+
#include <ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h>
|
325 |
+
#include <ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h>
|
326 |
+
#include <ATen/ops/mean_compositeimplicitautograd_dispatch.h>
|
327 |
+
#include <ATen/ops/median_compositeimplicitautograd_dispatch.h>
|
328 |
+
#include <ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h>
|
329 |
+
#include <ATen/ops/min_compositeimplicitautograd_dispatch.h>
|
330 |
+
#include <ATen/ops/mish_backward_compositeimplicitautograd_dispatch.h>
|
331 |
+
#include <ATen/ops/mode_compositeimplicitautograd_dispatch.h>
|
332 |
+
#include <ATen/ops/moveaxis_compositeimplicitautograd_dispatch.h>
|
333 |
+
#include <ATen/ops/movedim_compositeimplicitautograd_dispatch.h>
|
334 |
+
#include <ATen/ops/msort_compositeimplicitautograd_dispatch.h>
|
335 |
+
#include <ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h>
|
336 |
+
#include <ATen/ops/multiply_compositeimplicitautograd_dispatch.h>
|
337 |
+
#include <ATen/ops/nanmean_compositeimplicitautograd_dispatch.h>
|
338 |
+
#include <ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h>
|
339 |
+
#include <ATen/ops/nanquantile_compositeimplicitautograd_dispatch.h>
|
340 |
+
#include <ATen/ops/narrow_compositeimplicitautograd_dispatch.h>
|
341 |
+
#include <ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h>
|
342 |
+
#include <ATen/ops/negative_compositeimplicitautograd_dispatch.h>
|
343 |
+
#include <ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h>
|
344 |
+
#include <ATen/ops/nll_loss_compositeimplicitautograd_dispatch.h>
|
345 |
+
#include <ATen/ops/nll_loss2d_compositeimplicitautograd_dispatch.h>
|
346 |
+
#include <ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h>
|
347 |
+
#include <ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h>
|
348 |
+
#include <ATen/ops/norm_compositeimplicitautograd_dispatch.h>
|
349 |
+
#include <ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h>
|
350 |
+
#include <ATen/ops/not_equal_compositeimplicitautograd_dispatch.h>
|
351 |
+
#include <ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h>
|
352 |
+
#include <ATen/ops/numpy_T_compositeimplicitautograd_dispatch.h>
|
353 |
+
#include <ATen/ops/one_hot_compositeimplicitautograd_dispatch.h>
|
354 |
+
#include <ATen/ops/or_compositeimplicitautograd_dispatch.h>
|
355 |
+
#include <ATen/ops/orgqr_compositeimplicitautograd_dispatch.h>
|
356 |
+
#include <ATen/ops/outer_compositeimplicitautograd_dispatch.h>
|
357 |
+
#include <ATen/ops/output_nr_compositeimplicitautograd_dispatch.h>
|
358 |
+
#include <ATen/ops/pad_compositeimplicitautograd_dispatch.h>
|
359 |
+
#include <ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h>
|
360 |
+
#include <ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h>
|
361 |
+
#include <ATen/ops/pdist_compositeimplicitautograd_dispatch.h>
|
362 |
+
#include <ATen/ops/pin_memory_compositeimplicitautograd_dispatch.h>
|
363 |
+
#include <ATen/ops/pinverse_compositeimplicitautograd_dispatch.h>
|
364 |
+
#include <ATen/ops/poisson_nll_loss_compositeimplicitautograd_dispatch.h>
|
365 |
+
#include <ATen/ops/positive_compositeimplicitautograd_dispatch.h>
|
366 |
+
#include <ATen/ops/prelu_compositeimplicitautograd_dispatch.h>
|
367 |
+
#include <ATen/ops/prod_compositeimplicitautograd_dispatch.h>
|
368 |
+
#include <ATen/ops/promote_types_compositeimplicitautograd_dispatch.h>
|
369 |
+
#include <ATen/ops/qr_compositeimplicitautograd_dispatch.h>
|
370 |
+
#include <ATen/ops/quantile_compositeimplicitautograd_dispatch.h>
|
371 |
+
#include <ATen/ops/quantized_gru_cell_compositeimplicitautograd_dispatch.h>
|
372 |
+
#include <ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h>
|
373 |
+
#include <ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h>
|
374 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
|
375 |
+
#include <ATen/ops/rand_compositeimplicitautograd_dispatch.h>
|
376 |
+
#include <ATen/ops/randn_compositeimplicitautograd_dispatch.h>
|
377 |
+
#include <ATen/ops/ravel_compositeimplicitautograd_dispatch.h>
|
378 |
+
#include <ATen/ops/real_compositeimplicitautograd_dispatch.h>
|
379 |
+
#include <ATen/ops/refine_names_compositeimplicitautograd_dispatch.h>
|
380 |
+
#include <ATen/ops/relu6_compositeimplicitautograd_dispatch.h>
|
381 |
+
#include <ATen/ops/rename_compositeimplicitautograd_dispatch.h>
|
382 |
+
#include <ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h>
|
383 |
+
#include <ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h>
|
384 |
+
#include <ATen/ops/reshape_compositeimplicitautograd_dispatch.h>
|
385 |
+
#include <ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h>
|
386 |
+
#include <ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h>
|
387 |
+
#include <ATen/ops/resolve_neg_compositeimplicitautograd_dispatch.h>
|
388 |
+
#include <ATen/ops/result_type_compositeimplicitautograd_dispatch.h>
|
389 |
+
#include <ATen/ops/retain_grad_compositeimplicitautograd_dispatch.h>
|
390 |
+
#include <ATen/ops/retains_grad_compositeimplicitautograd_dispatch.h>
|
391 |
+
#include <ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h>
|
392 |
+
#include <ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h>
|
393 |
+
#include <ATen/ops/rnn_tanh_compositeimplicitautograd_dispatch.h>
|
394 |
+
#include <ATen/ops/rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
|
395 |
+
#include <ATen/ops/row_stack_compositeimplicitautograd_dispatch.h>
|
396 |
+
#include <ATen/ops/rrelu_compositeimplicitautograd_dispatch.h>
|
397 |
+
#include <ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h>
|
398 |
+
#include <ATen/ops/scatter_compositeimplicitautograd_dispatch.h>
|
399 |
+
#include <ATen/ops/scatter_add_compositeimplicitautograd_dispatch.h>
|
400 |
+
#include <ATen/ops/select_compositeimplicitautograd_dispatch.h>
|
401 |
+
#include <ATen/ops/selu_compositeimplicitautograd_dispatch.h>
|
402 |
+
#include <ATen/ops/set_compositeimplicitautograd_dispatch.h>
|
403 |
+
#include <ATen/ops/set_data_compositeimplicitautograd_dispatch.h>
|
404 |
+
#include <ATen/ops/silu_backward_compositeimplicitautograd_dispatch.h>
|
405 |
+
#include <ATen/ops/size_compositeimplicitautograd_dispatch.h>
|
406 |
+
#include <ATen/ops/slogdet_compositeimplicitautograd_dispatch.h>
|
407 |
+
#include <ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h>
|
408 |
+
#include <ATen/ops/smm_compositeimplicitautograd_dispatch.h>
|
409 |
+
#include <ATen/ops/softmax_compositeimplicitautograd_dispatch.h>
|
410 |
+
#include <ATen/ops/sort_compositeimplicitautograd_dispatch.h>
|
411 |
+
#include <ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h>
|
412 |
+
#include <ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h>
|
413 |
+
#include <ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h>
|
414 |
+
#include <ATen/ops/sparse_csc_tensor_compositeimplicitautograd_dispatch.h>
|
415 |
+
#include <ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h>
|
416 |
+
#include <ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h>
|
417 |
+
#include <ATen/ops/special_erf_compositeimplicitautograd_dispatch.h>
|
418 |
+
#include <ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h>
|
419 |
+
#include <ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h>
|
420 |
+
#include <ATen/ops/special_exp2_compositeimplicitautograd_dispatch.h>
|
421 |
+
#include <ATen/ops/special_expit_compositeimplicitautograd_dispatch.h>
|
422 |
+
#include <ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h>
|
423 |
+
#include <ATen/ops/special_gammainc_compositeimplicitautograd_dispatch.h>
|
424 |
+
#include <ATen/ops/special_gammaincc_compositeimplicitautograd_dispatch.h>
|
425 |
+
#include <ATen/ops/special_gammaln_compositeimplicitautograd_dispatch.h>
|
426 |
+
#include <ATen/ops/special_i0_compositeimplicitautograd_dispatch.h>
|
427 |
+
#include <ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h>
|
428 |
+
#include <ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h>
|
429 |
+
#include <ATen/ops/special_logit_compositeimplicitautograd_dispatch.h>
|
430 |
+
#include <ATen/ops/special_logsumexp_compositeimplicitautograd_dispatch.h>
|
431 |
+
#include <ATen/ops/special_multigammaln_compositeimplicitautograd_dispatch.h>
|
432 |
+
#include <ATen/ops/special_ndtr_compositeimplicitautograd_dispatch.h>
|
433 |
+
#include <ATen/ops/special_polygamma_compositeimplicitautograd_dispatch.h>
|
434 |
+
#include <ATen/ops/special_psi_compositeimplicitautograd_dispatch.h>
|
435 |
+
#include <ATen/ops/special_round_compositeimplicitautograd_dispatch.h>
|
436 |
+
#include <ATen/ops/special_sinc_compositeimplicitautograd_dispatch.h>
|
437 |
+
#include <ATen/ops/special_softmax_compositeimplicitautograd_dispatch.h>
|
438 |
+
#include <ATen/ops/special_xlogy_compositeimplicitautograd_dispatch.h>
|
439 |
+
#include <ATen/ops/split_compositeimplicitautograd_dispatch.h>
|
440 |
+
#include <ATen/ops/square_compositeimplicitautograd_dispatch.h>
|
441 |
+
#include <ATen/ops/squeeze_compositeimplicitautograd_dispatch.h>
|
442 |
+
#include <ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h>
|
443 |
+
#include <ATen/ops/std_compositeimplicitautograd_dispatch.h>
|
444 |
+
#include <ATen/ops/std_mean_compositeimplicitautograd_dispatch.h>
|
445 |
+
#include <ATen/ops/stft_compositeimplicitautograd_dispatch.h>
|
446 |
+
#include <ATen/ops/stride_compositeimplicitautograd_dispatch.h>
|
447 |
+
#include <ATen/ops/subtract_compositeimplicitautograd_dispatch.h>
|
448 |
+
#include <ATen/ops/sum_compositeimplicitautograd_dispatch.h>
|
449 |
+
#include <ATen/ops/sum_to_size_compositeimplicitautograd_dispatch.h>
|
450 |
+
#include <ATen/ops/svd_compositeimplicitautograd_dispatch.h>
|
451 |
+
#include <ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h>
|
452 |
+
#include <ATen/ops/swapdims_compositeimplicitautograd_dispatch.h>
|
453 |
+
#include <ATen/ops/sym_numel_compositeimplicitautograd_dispatch.h>
|
454 |
+
#include <ATen/ops/sym_size_compositeimplicitautograd_dispatch.h>
|
455 |
+
#include <ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h>
|
456 |
+
#include <ATen/ops/sym_stride_compositeimplicitautograd_dispatch.h>
|
457 |
+
#include <ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h>
|
458 |
+
#include <ATen/ops/tensor_split_compositeimplicitautograd_dispatch.h>
|
459 |
+
#include <ATen/ops/tensordot_compositeimplicitautograd_dispatch.h>
|
460 |
+
#include <ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h>
|
461 |
+
#include <ATen/ops/tile_compositeimplicitautograd_dispatch.h>
|
462 |
+
#include <ATen/ops/to_compositeimplicitautograd_dispatch.h>
|
463 |
+
#include <ATen/ops/to_dense_compositeimplicitautograd_dispatch.h>
|
464 |
+
#include <ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h>
|
465 |
+
#include <ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h>
|
466 |
+
#include <ATen/ops/to_sparse_compositeimplicitautograd_dispatch.h>
|
467 |
+
#include <ATen/ops/to_sparse_bsc_compositeimplicitautograd_dispatch.h>
|
468 |
+
#include <ATen/ops/to_sparse_bsr_compositeimplicitautograd_dispatch.h>
|
469 |
+
#include <ATen/ops/to_sparse_csc_compositeimplicitautograd_dispatch.h>
|
470 |
+
#include <ATen/ops/to_sparse_csr_compositeimplicitautograd_dispatch.h>
|
471 |
+
#include <ATen/ops/trace_backward_compositeimplicitautograd_dispatch.h>
|
472 |
+
#include <ATen/ops/transpose_compositeimplicitautograd_dispatch.h>
|
473 |
+
#include <ATen/ops/trapezoid_compositeimplicitautograd_dispatch.h>
|
474 |
+
#include <ATen/ops/trapz_compositeimplicitautograd_dispatch.h>
|
475 |
+
#include <ATen/ops/triplet_margin_loss_compositeimplicitautograd_dispatch.h>
|
476 |
+
#include <ATen/ops/true_divide_compositeimplicitautograd_dispatch.h>
|
477 |
+
#include <ATen/ops/type_as_compositeimplicitautograd_dispatch.h>
|
478 |
+
#include <ATen/ops/unbind_compositeimplicitautograd_dispatch.h>
|
479 |
+
#include <ATen/ops/unflatten_compositeimplicitautograd_dispatch.h>
|
480 |
+
#include <ATen/ops/unflatten_dense_tensors_compositeimplicitautograd_dispatch.h>
|
481 |
+
#include <ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h>
|
482 |
+
#include <ATen/ops/upsample_bicubic2d_compositeimplicitautograd_dispatch.h>
|
483 |
+
#include <ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h>
|
484 |
+
#include <ATen/ops/upsample_linear1d_compositeimplicitautograd_dispatch.h>
|
485 |
+
#include <ATen/ops/upsample_nearest1d_compositeimplicitautograd_dispatch.h>
|
486 |
+
#include <ATen/ops/upsample_nearest2d_compositeimplicitautograd_dispatch.h>
|
487 |
+
#include <ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h>
|
488 |
+
#include <ATen/ops/upsample_trilinear3d_compositeimplicitautograd_dispatch.h>
|
489 |
+
#include <ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h>
|
490 |
+
#include <ATen/ops/vander_compositeimplicitautograd_dispatch.h>
|
491 |
+
#include <ATen/ops/var_compositeimplicitautograd_dispatch.h>
|
492 |
+
#include <ATen/ops/var_mean_compositeimplicitautograd_dispatch.h>
|
493 |
+
#include <ATen/ops/view_as_compositeimplicitautograd_dispatch.h>
|
494 |
+
#include <ATen/ops/vsplit_compositeimplicitautograd_dispatch.h>
|
495 |
+
#include <ATen/ops/vstack_compositeimplicitautograd_dispatch.h>
|
496 |
+
#include <ATen/ops/where_compositeimplicitautograd_dispatch.h>
|
497 |
+
#include <ATen/ops/xor_compositeimplicitautograd_dispatch.h>
|
498 |
+
|
499 |
+
|
500 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBody.h>
|
2 |
+
|
3 |
+
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
|
4 |
+
// Code introduced to avoid cyclic dependency in static dispatch is no longer
|
5 |
+
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
|
6 |
+
// to Operators.cpp for supporting multiple backends with multiple kernels.
|
7 |
+
//
|
8 |
+
// Note [Avoiding Include Cycles In Static Dispatch]
|
9 |
+
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
|
10 |
+
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
|
11 |
+
//
|
12 |
+
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
|
13 |
+
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
|
14 |
+
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
|
15 |
+
// directly inlined into TensorBody.h.
|
16 |
+
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
|
17 |
+
// which include functions that have defaultable optional<Tensor> arguments.
|
18 |
+
// That requires knowing the full Tensor class definition.
|
19 |
+
//
|
20 |
+
// We break the cycle by doing the following:
|
21 |
+
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
|
22 |
+
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
|
23 |
+
// - CPUFunctions_inl.h includes everything else
|
24 |
+
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
|
25 |
+
// and then it includes CPUFunctions_inl.h.
|
26 |
+
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
|
27 |
+
// - This also means that static dispatch build, CPUFunctions.h only needs to
|
28 |
+
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
|
29 |
+
#include <ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_compositeimplicitautogradnestedtensor_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/randn_like_compositeimplicitautogradnestedtensor_dispatch.h>
|
20 |
+
#include <ATen/ops/reshape_compositeimplicitautogradnestedtensor_dispatch.h>
|
21 |
+
#include <ATen/ops/reshape_as_compositeimplicitautogradnestedtensor_dispatch.h>
|
22 |
+
#include <ATen/ops/zeros_like_compositeimplicitautogradnestedtensor_dispatch.h>
|
23 |
+
|
24 |
+
|
25 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/Context.h
ADDED
@@ -0,0 +1,560 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/CPUGeneratorImpl.h>
|
4 |
+
#include <ATen/DeviceAccelerator.h>
|
5 |
+
#include <ATen/LinalgBackend.h>
|
6 |
+
#include <ATen/core/ATenGeneral.h>
|
7 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
8 |
+
#include <ATen/core/Generator.h>
|
9 |
+
#include <ATen/core/LegacyTypeDispatch.h>
|
10 |
+
#include <ATen/detail/AcceleratorHooksInterface.h>
|
11 |
+
#include <ATen/detail/CUDAHooksInterface.h>
|
12 |
+
#include <ATen/detail/HIPHooksInterface.h>
|
13 |
+
#include <ATen/detail/IPUHooksInterface.h>
|
14 |
+
#include <ATen/detail/MPSHooksInterface.h>
|
15 |
+
#include <ATen/detail/MTIAHooksInterface.h>
|
16 |
+
#include <ATen/detail/ORTHooksInterface.h>
|
17 |
+
#include <ATen/detail/PrivateUse1HooksInterface.h>
|
18 |
+
#include <ATen/detail/XPUHooksInterface.h>
|
19 |
+
#include <c10/core/QEngine.h>
|
20 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
21 |
+
#include <c10/util/CallOnce.h>
|
22 |
+
#include <c10/util/Exception.h>
|
23 |
+
#include <c10/util/env.h>
|
24 |
+
#include <c10/util/irange.h>
|
25 |
+
|
26 |
+
#include <cstdint>
|
27 |
+
#include <mutex>
|
28 |
+
|
29 |
+
namespace at {
|
30 |
+
|
31 |
+
class Tensor;
|
32 |
+
|
33 |
+
enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM };
|
34 |
+
|
35 |
+
class TORCH_API Context {
|
36 |
+
public:
|
37 |
+
Context();
|
38 |
+
|
39 |
+
const Generator& defaultGenerator(Device device) {
|
40 |
+
c10::DeviceType device_type = device.type();
|
41 |
+
initCUDAIfNeeded(device_type);
|
42 |
+
initHIPIfNeeded(device_type);
|
43 |
+
if (device_type == at::kCPU) {
|
44 |
+
return at::detail::getDefaultCPUGenerator();
|
45 |
+
} else if (device_type == at::kCUDA) {
|
46 |
+
return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index());
|
47 |
+
} else if (device_type == at::kMPS) {
|
48 |
+
return at::detail::getMPSHooks().getDefaultMPSGenerator();
|
49 |
+
} else if (device_type == at::kXPU) {
|
50 |
+
return at::detail::getXPUHooks().getDefaultXPUGenerator(device.index());
|
51 |
+
} else if (device_type == at::kIPU) {
|
52 |
+
return at::detail::getIPUHooks().getDefaultIPUGenerator(device.index());
|
53 |
+
} else if (device_type == at::kPrivateUse1) {
|
54 |
+
return at::GetPrivateUse1HooksInterface()->getDefaultGenerator(
|
55 |
+
device.index());
|
56 |
+
} else {
|
57 |
+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
|
58 |
+
}
|
59 |
+
}
|
60 |
+
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
|
61 |
+
c10::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
|
62 |
+
c10::DeviceType device_type = opt_device_type.has_value()
|
63 |
+
? opt_device_type.value()
|
64 |
+
: at::getAccelerator(true).value();
|
65 |
+
if (device_type == at::kCUDA) {
|
66 |
+
return at::detail::getCUDAHooks();
|
67 |
+
} else if (device_type == at::kMPS) {
|
68 |
+
return at::detail::getMPSHooks();
|
69 |
+
} else if (device_type == at::kPrivateUse1) {
|
70 |
+
return at::detail::getPrivateUse1Hooks();
|
71 |
+
} else {
|
72 |
+
AT_ERROR(
|
73 |
+
c10::DeviceTypeName(device_type), " device type not an accelerator.");
|
74 |
+
}
|
75 |
+
}
|
76 |
+
Device getDeviceFromPtr(void* data, c10::DeviceType device_type) {
|
77 |
+
initCUDAIfNeeded(device_type);
|
78 |
+
initHIPIfNeeded(device_type);
|
79 |
+
initXPUIfNeeded(device_type);
|
80 |
+
if (device_type == at::kCPU) {
|
81 |
+
return c10::DeviceType::CPU;
|
82 |
+
} else if (device_type == at::kCUDA) {
|
83 |
+
return at::detail::getCUDAHooks().getDeviceFromPtr(data);
|
84 |
+
} else if (device_type == at::kXPU) {
|
85 |
+
return at::detail::getXPUHooks().getDeviceFromPtr(data);
|
86 |
+
} else if (device_type == at::kPrivateUse1) {
|
87 |
+
return at::GetPrivateUse1HooksInterface()->getDeviceFromPtr(data);
|
88 |
+
} else {
|
89 |
+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
|
90 |
+
}
|
91 |
+
}
|
92 |
+
static bool isPinnedPtr(const void* data) {
|
93 |
+
return detail::getCUDAHooks().isPinnedPtr(data);
|
94 |
+
}
|
95 |
+
static bool hasOpenMP();
|
96 |
+
static bool hasMKL();
|
97 |
+
static bool hasLAPACK();
|
98 |
+
static bool hasMKLDNN();
|
99 |
+
static bool hasMAGMA() {
|
100 |
+
return detail::getCUDAHooks().hasMAGMA();
|
101 |
+
}
|
102 |
+
static bool hasCUDA() {
|
103 |
+
return detail::getCUDAHooks().hasCUDA();
|
104 |
+
}
|
105 |
+
static bool hasMTIA() {
|
106 |
+
return detail::getMTIAHooks().hasMTIA();
|
107 |
+
}
|
108 |
+
static bool hasCUDART() {
|
109 |
+
return detail::getCUDAHooks().hasCUDART();
|
110 |
+
}
|
111 |
+
static long versionCUDART() {
|
112 |
+
return detail::getCUDAHooks().versionCUDART();
|
113 |
+
}
|
114 |
+
static bool hasCuDNN() {
|
115 |
+
return detail::getCUDAHooks().hasCuDNN();
|
116 |
+
}
|
117 |
+
static long versionCuDNN() {
|
118 |
+
return detail::getCUDAHooks().versionCuDNN();
|
119 |
+
}
|
120 |
+
static bool hasCuSOLVER() {
|
121 |
+
return detail::getCUDAHooks().hasCuSOLVER();
|
122 |
+
}
|
123 |
+
static bool hasHIP() {
|
124 |
+
return detail::getHIPHooks().hasHIP();
|
125 |
+
}
|
126 |
+
static bool hasMPS() {
|
127 |
+
return detail::getMPSHooks().hasMPS();
|
128 |
+
}
|
129 |
+
static bool hasIPU() {
|
130 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
|
131 |
+
}
|
132 |
+
static bool hasXLA() {
|
133 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
|
134 |
+
}
|
135 |
+
static bool hasXPU() {
|
136 |
+
return detail::getXPUHooks().hasXPU();
|
137 |
+
}
|
138 |
+
static bool hasLazy() {
|
139 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
|
140 |
+
}
|
141 |
+
static bool hasORT() {
|
142 |
+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT);
|
143 |
+
}
|
144 |
+
// defined in header so that getNonVariableType has ability to inline
|
145 |
+
// call_once check. getNonVariableType is called fairly frequently
|
146 |
+
void lazyInitCUDA() {
|
147 |
+
c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); });
|
148 |
+
}
|
149 |
+
void lazyInitHIP() {
|
150 |
+
c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); });
|
151 |
+
}
|
152 |
+
void lazyInitXPU() {
|
153 |
+
c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); });
|
154 |
+
}
|
155 |
+
void lazyInitPrivateUse1() {
|
156 |
+
c10::call_once(thp_init, [&] {
|
157 |
+
if (isPrivateUse1HooksRegistered()) {
|
158 |
+
at::GetPrivateUse1HooksInterface()->initPrivateUse1();
|
159 |
+
}
|
160 |
+
});
|
161 |
+
}
|
162 |
+
static const at::cuda::NVRTC& getNVRTC() {
|
163 |
+
return detail::getCUDAHooks().nvrtc();
|
164 |
+
}
|
165 |
+
|
166 |
+
static bool setFlushDenormal(bool on);
|
167 |
+
|
168 |
+
// NB: This method is *purely* whether or not a user requested
|
169 |
+
// that CuDNN was enabled, it doesn't actually say anything about
|
170 |
+
// whether or not CuDNN is actually usable. Use cudnn_is_acceptable
|
171 |
+
// to test this instead
|
172 |
+
bool userEnabledCuDNN() const;
|
173 |
+
void setUserEnabledCuDNN(bool e);
|
174 |
+
bool userEnabledMkldnn() const;
|
175 |
+
void setUserEnabledMkldnn(bool e);
|
176 |
+
bool benchmarkCuDNN() const;
|
177 |
+
void setBenchmarkCuDNN(bool);
|
178 |
+
int benchmarkLimitCuDNN() const;
|
179 |
+
void setBenchmarkLimitCuDNN(int);
|
180 |
+
bool deterministicCuDNN() const;
|
181 |
+
void setDeterministicCuDNN(bool);
|
182 |
+
bool userEnabledNNPACK() const;
|
183 |
+
void setUserEnabledNNPACK(bool e);
|
184 |
+
|
185 |
+
// Note [Disabling Fused SDP Kernels]
|
186 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
187 |
+
// Flash and Memory Efficient SDP kernels are enabled by default.
|
188 |
+
// However, they can be disabled by setting
|
189 |
+
// at::globalContext().setUserEnabledFlashSDP(false) flag.
|
190 |
+
// This is useful for debugging purposes. For example, if you want to
|
191 |
+
// compare the performance of the flash SDP kernels with the unfused
|
192 |
+
// kernel, you can disable the flash SDP kernels. By disabling
|
193 |
+
// the math SDP kernel, you can force your code to use flash kernels.
|
194 |
+
// The math SDP kernel can be disabled by setting
|
195 |
+
// at::globalContext().setUserEnabledMathSDP(false) flag.
|
196 |
+
void setSDPUseFlash(bool);
|
197 |
+
bool userEnabledFlashSDP() const;
|
198 |
+
|
199 |
+
void setSDPUseMemEfficient(bool);
|
200 |
+
bool userEnabledMemEfficientSDP() const;
|
201 |
+
|
202 |
+
void setSDPUseMath(bool);
|
203 |
+
bool userEnabledMathSDP() const;
|
204 |
+
|
205 |
+
void setSDPUseCuDNN(bool);
|
206 |
+
bool userEnabledCuDNNSDP() const;
|
207 |
+
|
208 |
+
at::LinalgBackend linalgPreferredBackend() const;
|
209 |
+
void setLinalgPreferredBackend(at::LinalgBackend);
|
210 |
+
|
211 |
+
// Note [Enabling Deterministic Operations]
|
212 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
213 |
+
// Operations in PyTorch that normally act nondeterministically, but have an
|
214 |
+
// alternate deterministic implementation, should satisfy the following
|
215 |
+
// requirements:
|
216 |
+
//
|
217 |
+
// * Include this comment: "See Note [Enabling Deterministic Operations]"
|
218 |
+
//
|
219 |
+
// * Check the value of `at::globalContext().deterministicAlgorithms()` to
|
220 |
+
// toggle
|
221 |
+
// between nondeterministic and deterministic implementations.
|
222 |
+
//
|
223 |
+
// * Have an entry in the list of PyTorch operations that toggle between
|
224 |
+
// nondeterministic
|
225 |
+
// and deterministic implementations, in the docstring of
|
226 |
+
// `use_deterministic_algorithms()` in torch/__init__.py
|
227 |
+
//
|
228 |
+
// `example_func()` below shows an example of toggling between
|
229 |
+
// nondeterministic and deterministic implementations:
|
230 |
+
//
|
231 |
+
// void example_func() {
|
232 |
+
// // See Note [Enabling Deterministic Operations]
|
233 |
+
// if (at::globalContext().deterministicAlgorithms()) {
|
234 |
+
// example_func_deterministic();
|
235 |
+
// } else {
|
236 |
+
// example_func_nondeterministic();
|
237 |
+
// }
|
238 |
+
// }
|
239 |
+
|
240 |
+
bool deterministicAlgorithms() const;
|
241 |
+
bool deterministicAlgorithmsWarnOnly() const;
|
242 |
+
void setDeterministicAlgorithms(bool, bool);
|
243 |
+
bool deterministicFillUninitializedMemory() const;
|
244 |
+
void setDeterministicFillUninitializedMemory(bool);
|
245 |
+
|
246 |
+
// Note [Writing Nondeterministic Operations]
|
247 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
248 |
+
// Operations in PyTorch that act nondeterministically and do not have an
|
249 |
+
// alternate deterministic implementation should satisfy the following
|
250 |
+
// requirements:
|
251 |
+
//
|
252 |
+
// * Include this comment: "See Note [Writing Nondeterministic Operations]"
|
253 |
+
//
|
254 |
+
// * Include a comment explaining why the operation is nondeterministic.
|
255 |
+
//
|
256 |
+
// * Throw an error when `Context::deterministicAlgorithms()` is true. Most
|
257 |
+
// of the time, this should be accomplished by calling
|
258 |
+
// `at::globalContext().alertNotDeterminstic()`. However, if the
|
259 |
+
// nondeterministic behavior is caused by the CuBLAS workspace
|
260 |
+
// configuration in CUDA >= 10.2,
|
261 |
+
// `at::globalContext().alertCuBLASConfigNotDeterministic()` should be
|
262 |
+
// called instead (in this case, a comment explaining why the operation is
|
263 |
+
// nondeterministic is not necessary). See below for details on these
|
264 |
+
// methods.
|
265 |
+
//
|
266 |
+
// * Have an entry in the list of nondeterministic PyTorch operations in the
|
267 |
+
// docstring of `use_deterministic_algorithms()` in torch/__init__.py
|
268 |
+
//
|
269 |
+
// * Have a test function in `test/test_torch.py` whose name begins with
|
270 |
+
// `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace
|
271 |
+
// configuration is the reason for nondeterminism, the operation should be
|
272 |
+
// included in the `test_cublas_config_nondeterministic_alert` test. Any new
|
273 |
+
// tests should ideally follow a pattern similar to the existing ones.
|
274 |
+
//
|
275 |
+
// `example_func()` below shows an example of the comments and error-throwing
|
276 |
+
// code for a nondeterministic operation:
|
277 |
+
//
|
278 |
+
// void example_func() {
|
279 |
+
// // See Note [Writing Nondeterministic Operations]
|
280 |
+
// // Nondeterministic because <reason>
|
281 |
+
// at::globalContext().alertNondeterministic("example_func");
|
282 |
+
// ...
|
283 |
+
// }
|
284 |
+
|
285 |
+
// Throws an error if `Context::deterministicAlgorithms()` is true
|
286 |
+
static void alertNotDeterministic(c10::string_view const& caller);
|
287 |
+
|
288 |
+
// Throws an error if `Context::deterministicAlgorithms()` is true, CUDA
|
289 |
+
// >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or
|
290 |
+
// ":4096:8". For more details:
|
291 |
+
// https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility
|
292 |
+
void alertCuBLASConfigNotDeterministic() const;
|
293 |
+
|
294 |
+
void setFloat32MatmulPrecision(const std::string& s);
|
295 |
+
bool allowTF32CuDNN() const;
|
296 |
+
void setAllowTF32CuDNN(bool);
|
297 |
+
bool allowTF32CuBLAS() const;
|
298 |
+
void setAllowTF32CuBLAS(bool);
|
299 |
+
Float32MatmulPrecision float32MatmulPrecision() const;
|
300 |
+
void setFloat32MatmulPrecision(Float32MatmulPrecision p);
|
301 |
+
bool allowFP16ReductionCuBLAS() const;
|
302 |
+
void setAllowFP16ReductionCuBLAS(bool);
|
303 |
+
bool allowBF16ReductionCuBLAS() const;
|
304 |
+
void setAllowBF16ReductionCuBLAS(bool);
|
305 |
+
at::QEngine qEngine() const;
|
306 |
+
void setQEngine(at::QEngine e);
|
307 |
+
static const std::vector<at::QEngine>& supportedQEngines();
|
308 |
+
static bool isXNNPACKAvailable();
|
309 |
+
void setCheckSparseTensorInvariants(bool e);
|
310 |
+
bool checkSparseTensorInvariants() const;
|
311 |
+
// This method is used to release the original weight after pre-packing.
|
312 |
+
// It should be called once before loading/running the model.
|
313 |
+
// NB: By default it is set to true for mobile builds.
|
314 |
+
void setReleaseWeightsWhenPrepacking(bool e);
|
315 |
+
bool releaseWeightsWhenPrepacking() const;
|
316 |
+
|
317 |
+
void setDisplayVmapFallbackWarnings(bool enabled);
|
318 |
+
bool areVmapFallbackWarningsEnabled() const;
|
319 |
+
|
320 |
+
void setDefaultMobileCPUAllocator();
|
321 |
+
void unsetDefaultMobileCPUAllocator();
|
322 |
+
bool allowFP16ReductionCPU() const;
|
323 |
+
void setAllowFP16ReductionCPU(bool);
|
324 |
+
|
325 |
+
private:
|
326 |
+
void initCUDAIfNeeded(c10::DeviceType p) {
|
327 |
+
if (p == c10::DeviceType::CUDA) {
|
328 |
+
lazyInitCUDA();
|
329 |
+
}
|
330 |
+
}
|
331 |
+
void initHIPIfNeeded(c10::DeviceType p) {
|
332 |
+
if (p == c10::DeviceType::HIP) {
|
333 |
+
lazyInitHIP();
|
334 |
+
}
|
335 |
+
}
|
336 |
+
void initXPUIfNeeded(c10::DeviceType p) {
|
337 |
+
if (p == c10::DeviceType::XPU) {
|
338 |
+
lazyInitXPU();
|
339 |
+
}
|
340 |
+
}
|
341 |
+
static bool checkCuBLASConfigDeterministic();
|
342 |
+
c10::once_flag thc_init;
|
343 |
+
c10::once_flag thh_init;
|
344 |
+
c10::once_flag thx_init;
|
345 |
+
c10::once_flag thp_init;
|
346 |
+
bool enabled_cudnn = true;
|
347 |
+
bool deterministic_cudnn = false;
|
348 |
+
bool _deterministic_algorithms = false;
|
349 |
+
bool _deterministic_algorithms_warn_only = false;
|
350 |
+
bool _deterministic_fill_uninitialized_memory = true;
|
351 |
+
bool enabled_flashSDP = true;
|
352 |
+
bool enabled_mem_efficientSDP = true;
|
353 |
+
bool enabled_mathSDP = true;
|
354 |
+
bool enabled_cudnnSDP = false;
|
355 |
+
#ifdef USE_ROCM
|
356 |
+
bool benchmark_cudnn = true;
|
357 |
+
#else
|
358 |
+
bool benchmark_cudnn = false;
|
359 |
+
#endif
|
360 |
+
Float32MatmulPrecision float32_matmul_precision =
|
361 |
+
c10::utils::check_env("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE") == true
|
362 |
+
? at::Float32MatmulPrecision::HIGH
|
363 |
+
: at::Float32MatmulPrecision::HIGHEST;
|
364 |
+
int benchmark_limit_cudnn = 10;
|
365 |
+
bool allow_tf32_cudnn = true;
|
366 |
+
bool allow_fp16_reduction_cublas = true;
|
367 |
+
bool allow_bf16_reduction_cublas = true;
|
368 |
+
bool enabled_mkldnn = true;
|
369 |
+
bool enabled_nnpack = true;
|
370 |
+
at::LinalgBackend linalg_preferred_backend =
|
371 |
+
c10::utils::check_env("TORCH_LINALG_PREFER_CUSOLVER") == true
|
372 |
+
? at::LinalgBackend::Cusolver
|
373 |
+
: at::LinalgBackend::Default;
|
374 |
+
#ifdef C10_MOBILE
|
375 |
+
bool release_original_weights = true;
|
376 |
+
#else
|
377 |
+
bool release_original_weights = false;
|
378 |
+
#endif
|
379 |
+
bool display_vmap_fallback_warnings_ = false;
|
380 |
+
c10::optional<at::QEngine> quantized_engine = c10::nullopt;
|
381 |
+
bool enable_sparse_tensor_invariant_checks = false;
|
382 |
+
bool allow_fp16_reduction_cpu = false;
|
383 |
+
|
384 |
+
Allocator* prev_allocator_ptr_{nullptr};
|
385 |
+
};
|
386 |
+
|
387 |
+
TORCH_API Context& globalContext();
|
388 |
+
|
389 |
+
static inline void init() {
|
390 |
+
globalContext();
|
391 |
+
}
|
392 |
+
|
393 |
+
TORCH_API Allocator* getCPUAllocator();
|
394 |
+
|
395 |
+
static inline DeprecatedTypeProperties& getDeprecatedTypeProperties(
|
396 |
+
Backend p,
|
397 |
+
ScalarType s) {
|
398 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
399 |
+
p, s);
|
400 |
+
}
|
401 |
+
|
402 |
+
static inline DeprecatedTypeProperties& CPU(ScalarType s) {
|
403 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
404 |
+
Backend::CPU, s);
|
405 |
+
}
|
406 |
+
|
407 |
+
static inline DeprecatedTypeProperties& CUDA(ScalarType s) {
|
408 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
409 |
+
Backend::CUDA, s);
|
410 |
+
}
|
411 |
+
|
412 |
+
static inline DeprecatedTypeProperties& HIP(ScalarType s) {
|
413 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
414 |
+
Backend::HIP, s);
|
415 |
+
}
|
416 |
+
|
417 |
+
static inline DeprecatedTypeProperties& MPS(ScalarType s) {
|
418 |
+
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
419 |
+
Backend::MPS, s);
|
420 |
+
}
|
421 |
+
|
422 |
+
static inline bool hasCUDA() {
|
423 |
+
return globalContext().hasCUDA();
|
424 |
+
}
|
425 |
+
|
426 |
+
static inline bool hasMTIA() {
|
427 |
+
return globalContext().hasMTIA();
|
428 |
+
}
|
429 |
+
|
430 |
+
static inline bool hasHIP() {
|
431 |
+
return globalContext().hasHIP();
|
432 |
+
}
|
433 |
+
|
434 |
+
static inline bool hasIPU() {
|
435 |
+
return globalContext().hasIPU();
|
436 |
+
}
|
437 |
+
|
438 |
+
static inline bool hasXLA() {
|
439 |
+
return globalContext().hasXLA();
|
440 |
+
}
|
441 |
+
|
442 |
+
static inline bool hasMPS() {
|
443 |
+
return globalContext().hasMPS();
|
444 |
+
}
|
445 |
+
|
446 |
+
static inline bool hasORT() {
|
447 |
+
return globalContext().hasORT();
|
448 |
+
}
|
449 |
+
|
450 |
+
static inline bool hasXPU() {
|
451 |
+
return globalContext().hasXPU();
|
452 |
+
}
|
453 |
+
|
454 |
+
// Despite its name, this function returns the number of *CUDA* GPUs.
|
455 |
+
static inline size_t getNumGPUs() {
|
456 |
+
// WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS
|
457 |
+
// FUNCTION. If you are interested in interrogating the number of
|
458 |
+
// devices for a specific device type, add that function to the
|
459 |
+
// relevant library (e.g., similar to at::cuda::device_count())
|
460 |
+
if (hasCUDA() && hasHIP()) {
|
461 |
+
throw std::runtime_error(
|
462 |
+
"Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades "
|
463 |
+
"to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually "
|
464 |
+
"means HIP. Rebuild PyTorch with one or the other disabled.");
|
465 |
+
} else if (hasCUDA()) {
|
466 |
+
return detail::getCUDAHooks().getNumGPUs();
|
467 |
+
} else if (hasHIP()) {
|
468 |
+
return detail::getHIPHooks().getNumGPUs();
|
469 |
+
} else {
|
470 |
+
return 0;
|
471 |
+
}
|
472 |
+
}
|
473 |
+
|
474 |
+
static inline bool hasOpenMP() {
|
475 |
+
return globalContext().hasOpenMP();
|
476 |
+
}
|
477 |
+
|
478 |
+
static inline bool hasMKL() {
|
479 |
+
return globalContext().hasMKL();
|
480 |
+
}
|
481 |
+
|
482 |
+
static inline bool hasLAPACK() {
|
483 |
+
return globalContext().hasLAPACK();
|
484 |
+
}
|
485 |
+
|
486 |
+
static inline bool hasMAGMA() {
|
487 |
+
return globalContext().hasMAGMA();
|
488 |
+
}
|
489 |
+
|
490 |
+
static inline bool hasMKLDNN() {
|
491 |
+
return globalContext().hasMKLDNN();
|
492 |
+
}
|
493 |
+
|
494 |
+
static inline void manual_seed(uint64_t seed) {
|
495 |
+
auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
|
496 |
+
{
|
497 |
+
// See Note [Acquire lock when using random generators]
|
498 |
+
std::lock_guard<std::mutex> lock(gen.mutex());
|
499 |
+
gen.set_current_seed(seed);
|
500 |
+
}
|
501 |
+
// NB: Sometimes we build with CUDA, but we don't have any GPUs
|
502 |
+
// available. In that case, we must not seed CUDA; it will fail!
|
503 |
+
const auto cuda_num_gpus = detail::getCUDAHooks().getNumGPUs();
|
504 |
+
if (hasCUDA() && cuda_num_gpus > 0) {
|
505 |
+
for (const auto i : c10::irange(cuda_num_gpus)) {
|
506 |
+
auto cuda_gen = globalContext().defaultGenerator(
|
507 |
+
Device(at::kCUDA, static_cast<c10::DeviceIndex>(i)));
|
508 |
+
{
|
509 |
+
// See Note [Acquire lock when using random generators]
|
510 |
+
std::lock_guard<std::mutex> lock(cuda_gen.mutex());
|
511 |
+
cuda_gen.set_current_seed(seed);
|
512 |
+
}
|
513 |
+
}
|
514 |
+
}
|
515 |
+
|
516 |
+
const auto xpu_num_gpus = detail::getXPUHooks().getNumGPUs();
|
517 |
+
if (hasXPU() && xpu_num_gpus) {
|
518 |
+
for (const auto i : c10::irange(xpu_num_gpus)) {
|
519 |
+
auto xpu_gen = globalContext().defaultGenerator(
|
520 |
+
Device(at::kXPU, static_cast<c10::DeviceIndex>(i)));
|
521 |
+
{
|
522 |
+
// See Note [Acquire lock when using random generators]
|
523 |
+
std::lock_guard<std::mutex> lock(xpu_gen.mutex());
|
524 |
+
xpu_gen.set_current_seed(seed);
|
525 |
+
}
|
526 |
+
}
|
527 |
+
}
|
528 |
+
|
529 |
+
if (hasMPS()) {
|
530 |
+
auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS);
|
531 |
+
// See Note [Acquire lock when using random generators]
|
532 |
+
std::lock_guard<std::mutex> lock(mps_gen.mutex());
|
533 |
+
mps_gen.set_current_seed(seed);
|
534 |
+
}
|
535 |
+
}
|
536 |
+
|
537 |
+
// When the global flag `allow_tf32` is set to true, cuBLAS handles are
|
538 |
+
// automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH.
|
539 |
+
// For some operators, such as addmv, TF32 offers no performance improvement
|
540 |
+
// but causes precision loss. To help this case, this class implements
|
541 |
+
// a RAII guard that can be used to quickly disable TF32 within its scope.
|
542 |
+
//
|
543 |
+
// Usage:
|
544 |
+
// NoTF32Guard disable_tf32;
|
545 |
+
struct TORCH_API NoTF32Guard {
|
546 |
+
NoTF32Guard();
|
547 |
+
~NoTF32Guard();
|
548 |
+
static bool should_disable_tf32();
|
549 |
+
|
550 |
+
private:
|
551 |
+
bool changed = false;
|
552 |
+
};
|
553 |
+
|
554 |
+
struct TORCH_API ROCmBackwardPassGuard {
|
555 |
+
ROCmBackwardPassGuard();
|
556 |
+
~ROCmBackwardPassGuard();
|
557 |
+
static bool is_backward_pass();
|
558 |
+
};
|
559 |
+
|
560 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Device.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/core/Device.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h
ADDED
@@ -0,0 +1,808 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
4 |
+
#include <c10/macros/Macros.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/Half.h>
|
7 |
+
#include <c10/util/Metaprogramming.h>
|
8 |
+
#include <c10/util/complex.h>
|
9 |
+
#include <c10/util/string_view.h>
|
10 |
+
|
11 |
+
#ifdef __CUDACC__
|
12 |
+
#include <cuda.h> // For CUDA_VERSION
|
13 |
+
#endif
|
14 |
+
|
15 |
+
#ifdef TEMPLATE_SELECTIVE_BUILD
|
16 |
+
#include <ATen/selected_mobile_ops.h>
|
17 |
+
#else
|
18 |
+
namespace at {
|
19 |
+
/**
|
20 |
+
* The method should_include_kernel_dtype() returns true/false
|
21 |
+
* based on whether the switching code for a specific dtype should be
|
22 |
+
* included based on build time constants generated from tracing model
|
23 |
+
* execution. This method will be implmeneted via code-generation and
|
24 |
+
* included in this file when code-gen is ready.
|
25 |
+
*/
|
26 |
+
inline constexpr bool should_include_kernel_dtype(
|
27 |
+
const char* /*kernel_tag_str*/,
|
28 |
+
at::ScalarType /*scalar_type*/
|
29 |
+
) {
|
30 |
+
return true;
|
31 |
+
}
|
32 |
+
} // namespace at
|
33 |
+
#endif
|
34 |
+
|
35 |
+
/**
|
36 |
+
* In the Facebook internal build (using BUCK), this macro is enabled by
|
37 |
+
* passing in -c pt.enable_record_kernel_dtype=1 when building the tracer
|
38 |
+
* binary.
|
39 |
+
*/
|
40 |
+
#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE
|
41 |
+
namespace at {
|
42 |
+
namespace detail {
|
43 |
+
TORCH_API void record_kernel_function_dtype(std::string name);
|
44 |
+
}
|
45 |
+
} // namespace at
|
46 |
+
|
47 |
+
#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) \
|
48 |
+
at::detail::record_kernel_function_dtype( \
|
49 |
+
std::string(NAME) + "$" + toString(enum_type));
|
50 |
+
#else
|
51 |
+
#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type)
|
52 |
+
#endif
|
53 |
+
|
54 |
+
#define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) \
|
55 |
+
do { \
|
56 |
+
if constexpr (!at::should_include_kernel_dtype( \
|
57 |
+
at_dispatch_name, enum_type)) { \
|
58 |
+
AT_ERROR( \
|
59 |
+
"dtype '", \
|
60 |
+
toString(enum_type), \
|
61 |
+
"' not selected for kernel tag ", \
|
62 |
+
at_dispatch_name); \
|
63 |
+
} \
|
64 |
+
} while (0)
|
65 |
+
|
66 |
+
#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
|
67 |
+
case enum_type: { \
|
68 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
69 |
+
using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
|
70 |
+
return __VA_ARGS__(); \
|
71 |
+
}
|
72 |
+
|
73 |
+
#define AT_DISPATCH_CASE(enum_type, ...) \
|
74 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
|
75 |
+
|
76 |
+
#define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \
|
77 |
+
case enum_type: { \
|
78 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
79 |
+
using scalar_t = scalar_type; \
|
80 |
+
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
|
81 |
+
const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
|
82 |
+
const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
|
83 |
+
return __VA_ARGS__(); \
|
84 |
+
}
|
85 |
+
|
86 |
+
#define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
87 |
+
enum_type, scalar_type, bitwidth, qmin, qmax, ...) \
|
88 |
+
case enum_type: { \
|
89 |
+
AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
|
90 |
+
using scalar_t = scalar_type; \
|
91 |
+
using underlying_t C10_UNUSED = typename scalar_t::underlying; \
|
92 |
+
const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
|
93 |
+
const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
|
94 |
+
C10_UNUSED int bit_width = bitwidth; \
|
95 |
+
C10_UNUSED int64_t quant_min = qmin; \
|
96 |
+
C10_UNUSED int64_t quant_max = qmax; \
|
97 |
+
return __VA_ARGS__(); \
|
98 |
+
}
|
99 |
+
|
100 |
+
namespace detail {
|
101 |
+
|
102 |
+
inline at::ScalarType scalar_type(at::ScalarType s) {
|
103 |
+
return s;
|
104 |
+
}
|
105 |
+
|
106 |
+
C10_DEPRECATED_MESSAGE(
|
107 |
+
"passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, "
|
108 |
+
"pass an at::ScalarType instead")
|
109 |
+
inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) {
|
110 |
+
return t.scalarType();
|
111 |
+
}
|
112 |
+
|
113 |
+
C10_DEPRECATED_MESSAGE(
|
114 |
+
"AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, "
|
115 |
+
"use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")
|
116 |
+
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {}
|
117 |
+
|
118 |
+
C10_DEPRECATED_MESSAGE(
|
119 |
+
"AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, "
|
120 |
+
"use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) "
|
121 |
+
"instead")
|
122 |
+
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
|
123 |
+
|
124 |
+
} // namespace detail
|
125 |
+
|
126 |
+
// The AT_DISPATCH_* family of macros provides the ability to
|
127 |
+
// conveniently generate specializations of a kernel over all of the
|
128 |
+
// dtypes we care about in PyTorch. We call it "dispatch" because
|
129 |
+
// we are "dispatching" to the correct, dtype-specific kernel.
|
130 |
+
//
|
131 |
+
// A standard usage looks like:
|
132 |
+
//
|
133 |
+
// AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] {
|
134 |
+
// // Your code here, with 'scalar_t' now defined to
|
135 |
+
// // be the dtype in question
|
136 |
+
// });
|
137 |
+
//
|
138 |
+
// There are many variations of this macro, so it's important to
|
139 |
+
// understand exactly /which/ dtypes you want to get instantiated, as
|
140 |
+
// well as what the "default" set is.
|
141 |
+
//
|
142 |
+
// The default set of dtypes that are instantiated (e.g., by
|
143 |
+
// AT_DISPATCH_ALL_TYPES) are floating point types (float, double),
|
144 |
+
// and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t),
|
145 |
+
// but NOT booleans (bool), half-precision floats (Half) or
|
146 |
+
// complex number (c10::complex<float>, c10::complex<double>).
|
147 |
+
// This "cut" is somewhat historical (the default types are the
|
148 |
+
// ones that TH historically supported), but it also reflects the
|
149 |
+
// fact that the non-default types are "poorly" behaved (booleans
|
150 |
+
// are NOT integers mod 2, half precision operations ~essentially
|
151 |
+
// don't exist on CPU, complex numbers are an experimental application).
|
152 |
+
//
|
153 |
+
// Here are the questions you should generally ask to decide which
|
154 |
+
// dispatch you want:
|
155 |
+
//
|
156 |
+
// 1. Is this an integral or floating point specific operation?
|
157 |
+
// (If so, you'll want one of the FLOATING or INTEGRAL macros.)
|
158 |
+
//
|
159 |
+
// 2. Should half be supported? (If you're on CPU, the answer is almost
|
160 |
+
// definitely no. If you do want support, use one of the AND_HALF
|
161 |
+
// macros)
|
162 |
+
//
|
163 |
+
// Much rarer situations:
|
164 |
+
//
|
165 |
+
// 3. Should bool be supported? (You often have to write your kernel
|
166 |
+
// differently if arithmetic operations are involved.) If so,
|
167 |
+
// Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool
|
168 |
+
//
|
169 |
+
// 4. Should complex be supported? The answer is almost always no,
|
170 |
+
// unless you are working on "generic" code that should work on
|
171 |
+
// all dtypes.
|
172 |
+
//
|
173 |
+
// Parameters:
|
174 |
+
// -----------
|
175 |
+
//
|
176 |
+
// 1. The NAME argument is a "tag" that is used to trace and then
|
177 |
+
// conditionally compile fragments of the case statements such
|
178 |
+
// that the kernel functions are specialized only for the dtypes
|
179 |
+
// that are needed. The NAME parameter *must* be a build time
|
180 |
+
// const char* (can't be std::string, etc...)
|
181 |
+
//
|
182 |
+
// Please ensure that the NAME is unique for every implementation
|
183 |
+
// or you run the risk of over-including code for the kernel
|
184 |
+
// functions. There is no risk of missing out on any code, so
|
185 |
+
// it's mostly a risk of a Type-2 error, and not a Type-1 error.
|
186 |
+
//
|
187 |
+
// Switch-like syntax:
|
188 |
+
// -------------------
|
189 |
+
// There is also a switch-case like syntax which is useful if a kernel
|
190 |
+
// needs to be specialized for particular scalar types
|
191 |
+
//
|
192 |
+
// AT_DISPATCH_SWITCH(self.scalar_type(), "op_name",
|
193 |
+
// AT_DISPATCH_CASE_INTEGRAL_TYPES([&] {
|
194 |
+
// op_integral<scalar_t>(iter);
|
195 |
+
// })
|
196 |
+
// AT_DISPATCH_CASE_FLOATING_TYPES([&] {
|
197 |
+
// op_floating<scalar_t>(iter);
|
198 |
+
// })
|
199 |
+
// AT_DISPATCH_CASE(kBool, [&] {
|
200 |
+
// op_bool(iter);
|
201 |
+
// })
|
202 |
+
// );
|
203 |
+
//
|
204 |
+
// For each AT_DISPATCH_FOO macro, there is a corresponding
|
205 |
+
// AT_DISPATCH_CASE_FOO macro which can be used inside of an
|
206 |
+
// AT_DISPATCH_SWITCH block.
|
207 |
+
|
208 |
+
// NB: the the_type variable is not used, but we have kept it for
|
209 |
+
// backwards compatibility. It's probably not used by anyone though;
|
210 |
+
// but we're just being safe (and it doesn't hurt.) Note we must
|
211 |
+
// use it to shut up warnings about unused store.
|
212 |
+
|
213 |
+
#define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \
|
214 |
+
[&] { \
|
215 |
+
const auto& the_type = TYPE; \
|
216 |
+
constexpr const char* at_dispatch_name = NAME; \
|
217 |
+
/* don't use TYPE again in case it is an expensive or side-effect op */ \
|
218 |
+
at::ScalarType _st = ::detail::scalar_type(the_type); \
|
219 |
+
RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
|
220 |
+
switch (_st) { \
|
221 |
+
__VA_ARGS__ \
|
222 |
+
default: \
|
223 |
+
AT_ERROR( \
|
224 |
+
'"', \
|
225 |
+
at_dispatch_name, \
|
226 |
+
"\" not implemented for '", \
|
227 |
+
toString(_st), \
|
228 |
+
"'"); \
|
229 |
+
} \
|
230 |
+
}()
|
231 |
+
|
232 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
|
233 |
+
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
|
234 |
+
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__)
|
235 |
+
|
236 |
+
#define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
|
237 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
|
238 |
+
|
239 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) \
|
240 |
+
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
|
241 |
+
AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
|
242 |
+
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__)
|
243 |
+
|
244 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \
|
245 |
+
AT_DISPATCH_SWITCH( \
|
246 |
+
TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__))
|
247 |
+
|
248 |
+
#define AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(...) \
|
249 |
+
AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
|
250 |
+
AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
|
251 |
+
|
252 |
+
#define AT_DISPATCH_REDUCED_FLOATING_TYPES(TYPE, NAME, ...) \
|
253 |
+
AT_DISPATCH_SWITCH( \
|
254 |
+
TYPE, NAME, AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__))
|
255 |
+
|
256 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) \
|
257 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
258 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
259 |
+
|
260 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
261 |
+
AT_DISPATCH_SWITCH( \
|
262 |
+
TYPE, \
|
263 |
+
NAME, \
|
264 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
265 |
+
|
266 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
|
267 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
268 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
269 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
270 |
+
|
271 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND2( \
|
272 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
273 |
+
AT_DISPATCH_SWITCH( \
|
274 |
+
TYPE, \
|
275 |
+
NAME, \
|
276 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND2( \
|
277 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
278 |
+
|
279 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
|
280 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
281 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
282 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
283 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
284 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
285 |
+
|
286 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND3( \
|
287 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
288 |
+
AT_DISPATCH_SWITCH( \
|
289 |
+
TYPE, \
|
290 |
+
NAME, \
|
291 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
|
292 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
293 |
+
|
294 |
+
#define AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
|
295 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
296 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
297 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
298 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
299 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
300 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
301 |
+
|
302 |
+
#define AT_DISPATCH_FLOATING_TYPES_AND4( \
|
303 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
304 |
+
AT_DISPATCH_SWITCH( \
|
305 |
+
TYPE, \
|
306 |
+
NAME, \
|
307 |
+
AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
|
308 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
309 |
+
|
310 |
+
#define AT_DISPATCH_CASE_COMPLEX_TYPES(...) \
|
311 |
+
AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) \
|
312 |
+
AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__)
|
313 |
+
|
314 |
+
#define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) \
|
315 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__))
|
316 |
+
|
317 |
+
#define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) \
|
318 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) \
|
319 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
320 |
+
|
321 |
+
#define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
322 |
+
AT_DISPATCH_SWITCH( \
|
323 |
+
TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
324 |
+
|
325 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) \
|
326 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
|
327 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
|
328 |
+
|
329 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) \
|
330 |
+
AT_DISPATCH_SWITCH( \
|
331 |
+
TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__))
|
332 |
+
|
333 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) \
|
334 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
335 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
336 |
+
|
337 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( \
|
338 |
+
SCALARTYPE, TYPE, NAME, ...) \
|
339 |
+
AT_DISPATCH_SWITCH( \
|
340 |
+
TYPE, \
|
341 |
+
NAME, \
|
342 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( \
|
343 |
+
SCALARTYPE, __VA_ARGS__))
|
344 |
+
|
345 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
346 |
+
SCALARTYPE1, SCALARTYPE2, ...) \
|
347 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
348 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
349 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
350 |
+
|
351 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
352 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
353 |
+
AT_DISPATCH_SWITCH( \
|
354 |
+
TYPE, \
|
355 |
+
NAME, \
|
356 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
|
357 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
358 |
+
|
359 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
360 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
361 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
362 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
363 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
364 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
365 |
+
|
366 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
367 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
368 |
+
AT_DISPATCH_SWITCH( \
|
369 |
+
TYPE, \
|
370 |
+
NAME, \
|
371 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
|
372 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
373 |
+
|
374 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
375 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
376 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
377 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
378 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
379 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
380 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
381 |
+
|
382 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
383 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
384 |
+
AT_DISPATCH_SWITCH( \
|
385 |
+
TYPE, \
|
386 |
+
NAME, \
|
387 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
|
388 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
389 |
+
|
390 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
391 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
|
392 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
393 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
394 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
395 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
396 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
397 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
|
398 |
+
|
399 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
400 |
+
SCALARTYPE1, \
|
401 |
+
SCALARTYPE2, \
|
402 |
+
SCALARTYPE3, \
|
403 |
+
SCALARTYPE4, \
|
404 |
+
SCALARTYPE5, \
|
405 |
+
TYPE, \
|
406 |
+
NAME, \
|
407 |
+
...) \
|
408 |
+
AT_DISPATCH_SWITCH( \
|
409 |
+
TYPE, \
|
410 |
+
NAME, \
|
411 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
|
412 |
+
SCALARTYPE1, \
|
413 |
+
SCALARTYPE2, \
|
414 |
+
SCALARTYPE3, \
|
415 |
+
SCALARTYPE4, \
|
416 |
+
SCALARTYPE5, \
|
417 |
+
__VA_ARGS__))
|
418 |
+
|
419 |
+
#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
420 |
+
SCALARTYPE1, \
|
421 |
+
SCALARTYPE2, \
|
422 |
+
SCALARTYPE3, \
|
423 |
+
SCALARTYPE4, \
|
424 |
+
SCALARTYPE5, \
|
425 |
+
SCALARTYPE6, \
|
426 |
+
...) \
|
427 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
|
428 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
429 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
430 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
431 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
432 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
433 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
|
434 |
+
|
435 |
+
#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
436 |
+
SCALARTYPE1, \
|
437 |
+
SCALARTYPE2, \
|
438 |
+
SCALARTYPE3, \
|
439 |
+
SCALARTYPE4, \
|
440 |
+
SCALARTYPE5, \
|
441 |
+
SCALARTYPE6, \
|
442 |
+
TYPE, \
|
443 |
+
NAME, \
|
444 |
+
...) \
|
445 |
+
AT_DISPATCH_SWITCH( \
|
446 |
+
TYPE, \
|
447 |
+
NAME, \
|
448 |
+
AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
|
449 |
+
SCALARTYPE1, \
|
450 |
+
SCALARTYPE2, \
|
451 |
+
SCALARTYPE3, \
|
452 |
+
SCALARTYPE4, \
|
453 |
+
SCALARTYPE5, \
|
454 |
+
SCALARTYPE6, \
|
455 |
+
__VA_ARGS__))
|
456 |
+
|
457 |
+
#define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) \
|
458 |
+
AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
|
459 |
+
AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
|
460 |
+
AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
|
461 |
+
AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
|
462 |
+
AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__)
|
463 |
+
|
464 |
+
#define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
|
465 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
|
466 |
+
|
467 |
+
#define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) \
|
468 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
|
469 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
470 |
+
|
471 |
+
#define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
472 |
+
AT_DISPATCH_SWITCH( \
|
473 |
+
TYPE, \
|
474 |
+
NAME, \
|
475 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
476 |
+
|
477 |
+
#define AT_DISPATCH_CASE_ALL_TYPES(...) \
|
478 |
+
AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
|
479 |
+
AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)
|
480 |
+
|
481 |
+
#define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \
|
482 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__))
|
483 |
+
|
484 |
+
#define AT_DISPATCH_CASE_QINT_TYPES(...) \
|
485 |
+
AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
|
486 |
+
AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) \
|
487 |
+
AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__)
|
488 |
+
|
489 |
+
#define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) \
|
490 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__))
|
491 |
+
|
492 |
+
#define AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, ...) \
|
493 |
+
AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__) \
|
494 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
495 |
+
|
496 |
+
#define AT_DISPATCH_QINT_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
497 |
+
AT_DISPATCH_SWITCH( \
|
498 |
+
TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
499 |
+
|
500 |
+
#define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) \
|
501 |
+
AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
|
502 |
+
AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__)
|
503 |
+
|
504 |
+
#define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) \
|
505 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__))
|
506 |
+
|
507 |
+
#define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) \
|
508 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
509 |
+
at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) \
|
510 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
511 |
+
at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) \
|
512 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
513 |
+
at::kQInt32, \
|
514 |
+
at::qint32, \
|
515 |
+
CHAR_BIT * sizeof(int), \
|
516 |
+
INT_MIN, \
|
517 |
+
INT_MAX, \
|
518 |
+
__VA_ARGS__) \
|
519 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
520 |
+
at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) \
|
521 |
+
AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
|
522 |
+
at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__)
|
523 |
+
|
524 |
+
#define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) \
|
525 |
+
AT_DISPATCH_SWITCH( \
|
526 |
+
TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__))
|
527 |
+
|
528 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) \
|
529 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
530 |
+
AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
|
531 |
+
|
532 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) \
|
533 |
+
AT_DISPATCH_SWITCH( \
|
534 |
+
TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__))
|
535 |
+
|
536 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) \
|
537 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
538 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
539 |
+
|
540 |
+
#define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
|
541 |
+
AT_DISPATCH_SWITCH( \
|
542 |
+
TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
|
543 |
+
|
544 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) \
|
545 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
546 |
+
AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
|
547 |
+
|
548 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) \
|
549 |
+
AT_DISPATCH_SWITCH( \
|
550 |
+
TYPE, \
|
551 |
+
NAME, \
|
552 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__))
|
553 |
+
|
554 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
|
555 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
556 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
557 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
558 |
+
|
559 |
+
#define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
560 |
+
AT_DISPATCH_SWITCH( \
|
561 |
+
TYPE, \
|
562 |
+
NAME, \
|
563 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
564 |
+
|
565 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
|
566 |
+
SCALARTYPE1, SCALARTYPE2, ...) \
|
567 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
568 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
569 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
|
570 |
+
|
571 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( \
|
572 |
+
SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
|
573 |
+
AT_DISPATCH_SWITCH( \
|
574 |
+
TYPE, \
|
575 |
+
NAME, \
|
576 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
|
577 |
+
SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
|
578 |
+
|
579 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND3( \
|
580 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
581 |
+
AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
|
582 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
583 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
584 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
585 |
+
|
586 |
+
#define AT_DISPATCH_ALL_TYPES_AND3( \
|
587 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
588 |
+
AT_DISPATCH_SWITCH( \
|
589 |
+
TYPE, \
|
590 |
+
NAME, \
|
591 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND3( \
|
592 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
593 |
+
|
594 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
|
595 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
|
596 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
597 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
598 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
599 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
|
600 |
+
|
601 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( \
|
602 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
|
603 |
+
AT_DISPATCH_SWITCH( \
|
604 |
+
TYPE, \
|
605 |
+
NAME, \
|
606 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
|
607 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
|
608 |
+
|
609 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
610 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
|
611 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
612 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
613 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
614 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
615 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
|
616 |
+
|
617 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( \
|
618 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
|
619 |
+
AT_DISPATCH_SWITCH( \
|
620 |
+
TYPE, \
|
621 |
+
NAME, \
|
622 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
|
623 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
|
624 |
+
|
625 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
|
626 |
+
SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
|
627 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
628 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
629 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
630 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
631 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
632 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
|
633 |
+
|
634 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND5( \
|
635 |
+
SCALARTYPE1, \
|
636 |
+
SCALARTYPE2, \
|
637 |
+
SCALARTYPE3, \
|
638 |
+
SCALARTYPE4, \
|
639 |
+
SCALARTYPE5, \
|
640 |
+
TYPE, \
|
641 |
+
NAME, \
|
642 |
+
...) \
|
643 |
+
AT_DISPATCH_SWITCH( \
|
644 |
+
TYPE, \
|
645 |
+
NAME, \
|
646 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
|
647 |
+
SCALARTYPE1, \
|
648 |
+
SCALARTYPE2, \
|
649 |
+
SCALARTYPE3, \
|
650 |
+
SCALARTYPE4, \
|
651 |
+
SCALARTYPE5, \
|
652 |
+
__VA_ARGS__))
|
653 |
+
|
654 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
|
655 |
+
SCALARTYPE1, \
|
656 |
+
SCALARTYPE2, \
|
657 |
+
SCALARTYPE3, \
|
658 |
+
SCALARTYPE4, \
|
659 |
+
SCALARTYPE5, \
|
660 |
+
SCALARTYPE6, \
|
661 |
+
...) \
|
662 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
663 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
664 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
665 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
666 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
667 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
668 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
|
669 |
+
|
670 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND6( \
|
671 |
+
SCALARTYPE1, \
|
672 |
+
SCALARTYPE2, \
|
673 |
+
SCALARTYPE3, \
|
674 |
+
SCALARTYPE4, \
|
675 |
+
SCALARTYPE5, \
|
676 |
+
SCALARTYPE6, \
|
677 |
+
TYPE, \
|
678 |
+
NAME, \
|
679 |
+
...) \
|
680 |
+
AT_DISPATCH_SWITCH( \
|
681 |
+
TYPE, \
|
682 |
+
NAME, \
|
683 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
|
684 |
+
SCALARTYPE1, \
|
685 |
+
SCALARTYPE2, \
|
686 |
+
SCALARTYPE3, \
|
687 |
+
SCALARTYPE4, \
|
688 |
+
SCALARTYPE5, \
|
689 |
+
SCALARTYPE6, \
|
690 |
+
__VA_ARGS__))
|
691 |
+
|
692 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
|
693 |
+
SCALARTYPE1, \
|
694 |
+
SCALARTYPE2, \
|
695 |
+
SCALARTYPE3, \
|
696 |
+
SCALARTYPE4, \
|
697 |
+
SCALARTYPE5, \
|
698 |
+
SCALARTYPE6, \
|
699 |
+
SCALARTYPE7, \
|
700 |
+
...) \
|
701 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
702 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
703 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
704 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
705 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
706 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
707 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
|
708 |
+
AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__)
|
709 |
+
|
710 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND7( \
|
711 |
+
SCALARTYPE1, \
|
712 |
+
SCALARTYPE2, \
|
713 |
+
SCALARTYPE3, \
|
714 |
+
SCALARTYPE4, \
|
715 |
+
SCALARTYPE5, \
|
716 |
+
SCALARTYPE6, \
|
717 |
+
SCALARTYPE7, \
|
718 |
+
TYPE, \
|
719 |
+
NAME, \
|
720 |
+
...) \
|
721 |
+
AT_DISPATCH_SWITCH( \
|
722 |
+
TYPE, \
|
723 |
+
NAME, \
|
724 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
|
725 |
+
SCALARTYPE1, \
|
726 |
+
SCALARTYPE2, \
|
727 |
+
SCALARTYPE3, \
|
728 |
+
SCALARTYPE4, \
|
729 |
+
SCALARTYPE5, \
|
730 |
+
SCALARTYPE6, \
|
731 |
+
SCALARTYPE7, \
|
732 |
+
__VA_ARGS__))
|
733 |
+
|
734 |
+
#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
|
735 |
+
SCALARTYPE1, \
|
736 |
+
SCALARTYPE2, \
|
737 |
+
SCALARTYPE3, \
|
738 |
+
SCALARTYPE4, \
|
739 |
+
SCALARTYPE5, \
|
740 |
+
SCALARTYPE6, \
|
741 |
+
SCALARTYPE7, \
|
742 |
+
SCALARTYPE8, \
|
743 |
+
...) \
|
744 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
|
745 |
+
AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
|
746 |
+
AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
|
747 |
+
AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
|
748 |
+
AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
|
749 |
+
AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
|
750 |
+
AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
|
751 |
+
AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__) \
|
752 |
+
AT_DISPATCH_CASE(SCALARTYPE8, __VA_ARGS__)
|
753 |
+
|
754 |
+
#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND8( \
|
755 |
+
SCALARTYPE1, \
|
756 |
+
SCALARTYPE2, \
|
757 |
+
SCALARTYPE3, \
|
758 |
+
SCALARTYPE4, \
|
759 |
+
SCALARTYPE5, \
|
760 |
+
SCALARTYPE6, \
|
761 |
+
SCALARTYPE7, \
|
762 |
+
SCALARTYPE8, \
|
763 |
+
TYPE, \
|
764 |
+
NAME, \
|
765 |
+
...) \
|
766 |
+
AT_DISPATCH_SWITCH( \
|
767 |
+
TYPE, \
|
768 |
+
NAME, \
|
769 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
|
770 |
+
SCALARTYPE1, \
|
771 |
+
SCALARTYPE2, \
|
772 |
+
SCALARTYPE3, \
|
773 |
+
SCALARTYPE4, \
|
774 |
+
SCALARTYPE5, \
|
775 |
+
SCALARTYPE6, \
|
776 |
+
SCALARTYPE7, \
|
777 |
+
SCALARTYPE8, \
|
778 |
+
__VA_ARGS__))
|
779 |
+
|
780 |
+
#define AT_DISPATCH_CASE_BIT_TYPES(...) \
|
781 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits1x8, __VA_ARGS__) \
|
782 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits2x4, __VA_ARGS__) \
|
783 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits4x2, __VA_ARGS__) \
|
784 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits8, __VA_ARGS__) \
|
785 |
+
AT_DISPATCH_CASE(at::ScalarType::Bits16, __VA_ARGS__)
|
786 |
+
|
787 |
+
#define AT_DISPATCH_BIT_TYPES(TYPE, NAME, ...) \
|
788 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_BIT_TYPES(__VA_ARGS__))
|
789 |
+
|
790 |
+
#define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) \
|
791 |
+
AT_DISPATCH_SWITCH( \
|
792 |
+
TYPE, \
|
793 |
+
NAME, \
|
794 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT( \
|
795 |
+
at::ScalarType::Int, index_t, __VA_ARGS__) \
|
796 |
+
AT_PRIVATE_CASE_TYPE_USING_HINT( \
|
797 |
+
at::ScalarType::Long, index_t, __VA_ARGS__))
|
798 |
+
|
799 |
+
// ----------------------------------------------------------------------------
|
800 |
+
// DEPRECATED MACROS, DON'T USE THESE
|
801 |
+
// ----------------------------------------------------------------------------
|
802 |
+
|
803 |
+
#define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \
|
804 |
+
detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); \
|
805 |
+
AT_DISPATCH_SWITCH( \
|
806 |
+
TYPE, \
|
807 |
+
NAME, \
|
808 |
+
AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__))
|
venv/lib/python3.10/site-packages/torch/include/ATen/Dispatch_v2.h
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/Dispatch.h>
|
2 |
+
|
3 |
+
// This is a new implementation of the AT_DISPATCH macro family from
|
4 |
+
// ATen/Dispatch.h
|
5 |
+
//
|
6 |
+
// The intended usage is:
|
7 |
+
//
|
8 |
+
// ScalarType scalar_type;
|
9 |
+
//
|
10 |
+
// AT_DISPATCH_V2(
|
11 |
+
// scalar_type,
|
12 |
+
// "debug string",
|
13 |
+
// AT_WRAP([&] {
|
14 |
+
// ... code to specialize with scalar_t ...
|
15 |
+
// }),
|
16 |
+
// kHalf,
|
17 |
+
// AT_EXPAND(AT_ALL_TYPES),
|
18 |
+
// ... as many types arguments as needed ...
|
19 |
+
// )
|
20 |
+
//
|
21 |
+
// For example, given an old style:
|
22 |
+
//
|
23 |
+
// AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
|
24 |
+
// kComplexHalf,
|
25 |
+
// kHalf,
|
26 |
+
// self.scalar_type(),
|
27 |
+
// "_local_scalar_dense_cpu",
|
28 |
+
// [&] {
|
29 |
+
// scalar_t value = *self.data_ptr<scalar_t>();
|
30 |
+
// r = Scalar(value);
|
31 |
+
// }
|
32 |
+
// )
|
33 |
+
//
|
34 |
+
// You now write:
|
35 |
+
//
|
36 |
+
// AT_DISPATCH_V2(
|
37 |
+
// self.scalar_type(),
|
38 |
+
// "_local_scalar_dense_cpu",
|
39 |
+
// AT_WRAP([&] {
|
40 |
+
// scalar_t value = *self.data_ptr<scalar_t>();
|
41 |
+
// r = Scalar(value);
|
42 |
+
// }),
|
43 |
+
// AT_EXPAND(AT_ALL_TYPES),
|
44 |
+
// AT_EXPAND(AT_COMPLEX_TYPES),
|
45 |
+
// kComplexHalf,
|
46 |
+
// kHalf,
|
47 |
+
// )
|
48 |
+
//
|
49 |
+
// Notably, it sports the following improvements:
|
50 |
+
//
|
51 |
+
// - It is not necessary to specify the arity (e.g.,
|
52 |
+
// AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND{2,3,4,...})
|
53 |
+
// when using the macro
|
54 |
+
//
|
55 |
+
// - It is not necessary to specify each dtype individually; if
|
56 |
+
// there is a set of related dtypes and you want to dispatch
|
57 |
+
// over all of them, you can simply say, e.g., AT_EXPAND(AT_INTEGRAL_TYPES)
|
58 |
+
// in your argument list.
|
59 |
+
//
|
60 |
+
// However, you must remember to wrap the payload body in AT_WRAP, or commas
|
61 |
+
// inside your lambda will be improperly handled. Furthermore, if you more
|
62 |
+
// entries to ScalarType than can be supported by this macro, it will fail
|
63 |
+
// with an obscure error (due to attempting to concatenate AT_AP with
|
64 |
+
// something that is not a number).
|
65 |
+
//
|
66 |
+
// The implementation strategy is to use the count arguments trick
|
67 |
+
// (e.g., as described in https://stackoverflow.com/a/2124385/23845)
|
68 |
+
// to discover how many dtypes have been passed, and then dispatch to a
|
69 |
+
// hand-written macro for each arity that applies as many DISPATCH_CASE as
|
70 |
+
// necessary. The hand-written macros can be regenerated for other arities
|
71 |
+
// with the script below.
|
72 |
+
//
|
73 |
+
// There is some delicacy in the implementation in controlling when
|
74 |
+
// macro expansion occurs, mediated with AT_EXPAND and AT_GUARD. I mostly
|
75 |
+
// relied on GPT4 to help me get it right.
|
76 |
+
|
77 |
+
// Public API macros
|
78 |
+
|
79 |
+
// See documentation above
|
80 |
+
#define AT_DISPATCH_V2(TYPE, NAME, BODY, ...) \
|
81 |
+
AT_DISPATCH_SWITCH(TYPE, NAME, AT_AP_VAR(AT_WRAP(BODY), TYPE, __VA_ARGS__))
|
82 |
+
|
83 |
+
// This macro lets you pass an arbitrary expression that may contain internal
|
84 |
+
// commas to another macro without having the commas causing the expression
|
85 |
+
// to be interpreted as being multiple arguments
|
86 |
+
#define AT_WRAP(...) __VA_ARGS__
|
87 |
+
|
88 |
+
#define AT_FLOAT8_TYPES \
|
89 |
+
c10::kFloat8_e5m2, c10::kFloat8_e5m2fnuz, c10::kFloat8_e4m3fn, \
|
90 |
+
c10::kFloat8_e4m3fnuz
|
91 |
+
|
92 |
+
#define AT_INTEGRAL_TYPES \
|
93 |
+
c10::kByte, c10::kChar, c10::kInt, c10::kLong, c10::kShort
|
94 |
+
#define AT_FLOATING_TYPES c10::kDouble, c10::kFloat
|
95 |
+
#define AT_BAREBONES_UNSIGNED_TYPES c10::kUInt16, c10::kUInt32, c10::kUInt64
|
96 |
+
#define AT_INTEGRAL_TYPES_V2 \
|
97 |
+
AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)
|
98 |
+
#define AT_COMPLEX_TYPES c10::kComplexDouble, c10::kComplexFloat
|
99 |
+
#define AT_QINT_TYPES c10::kQInt8, c10::kQUInt8, c10::kQInt32
|
100 |
+
// NB: not *actually* all types
|
101 |
+
#define AT_ALL_TYPES AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES)
|
102 |
+
#define AT_ALL_TYPES_AND_COMPLEX \
|
103 |
+
AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_COMPLEX_TYPES)
|
104 |
+
|
105 |
+
// Helper macros
|
106 |
+
|
107 |
+
#define AT_AP_VAR(N, T, ...) \
|
108 |
+
AT_EXPAND(AT_CONCAT(AT_AP, AT_NUM_ARGS(__VA_ARGS__))(AT_WRAP(N), __VA_ARGS__))
|
109 |
+
#define AT_CONCAT(a, b) AT_CONCAT_AUX(a, b)
|
110 |
+
#define AT_CONCAT_AUX(a, b) a##b
|
111 |
+
#define AT_EXPAND(X) X
|
112 |
+
|
113 |
+
// Ensure we never have too many scalar types for the expansion here to
|
114 |
+
// support. To bump this, you must regenerate the macros below.
|
115 |
+
static_assert(static_cast<int>(c10::ScalarType::NumOptions) < 45);
|
116 |
+
|
117 |
+
// Python code to regenerate generate code below:
|
118 |
+
#if 0
|
119 |
+
|
120 |
+
num_args = 45
|
121 |
+
|
122 |
+
nums = ', '.join(str(i) for i in reversed(range(num_args+1)))
|
123 |
+
args = ', '.join(f'_{i}' for i in range(1, num_args+1))
|
124 |
+
|
125 |
+
print(f'#define AT_NUM_ARGS(...) AT_EXPAND(AT_NUM_ARGS_AUX(__VA_ARGS__, {nums}))')
|
126 |
+
print(f'#define AT_NUM_ARGS_AUX({args}, N, ...) N')
|
127 |
+
|
128 |
+
for i in range(1, num_args+1):
|
129 |
+
args = ', '.join(f'_{i}' for i in range(1, i+1))
|
130 |
+
cases = ' '.join([f'AT_DISPATCH_CASE(_{j}, N)' for j in range(1, i+1)])
|
131 |
+
print(f'#define AT_AP{i}(N, {args}) {cases}')
|
132 |
+
|
133 |
+
#endif
|
134 |
+
|
135 |
+
// Begin generated code
|
136 |
+
// clang-format off
|
137 |
+
|
138 |
+
#define AT_NUM_ARGS(...) AT_EXPAND(AT_NUM_ARGS_AUX(__VA_ARGS__, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0))
|
139 |
+
#define AT_NUM_ARGS_AUX(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, N, ...) N
|
140 |
+
#define AT_AP1(N, _1) AT_DISPATCH_CASE(_1, N)
|
141 |
+
#define AT_AP2(N, _1, _2) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N)
|
142 |
+
#define AT_AP3(N, _1, _2, _3) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N)
|
143 |
+
#define AT_AP4(N, _1, _2, _3, _4) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N)
|
144 |
+
#define AT_AP5(N, _1, _2, _3, _4, _5) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N)
|
145 |
+
#define AT_AP6(N, _1, _2, _3, _4, _5, _6) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N)
|
146 |
+
#define AT_AP7(N, _1, _2, _3, _4, _5, _6, _7) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N)
|
147 |
+
#define AT_AP8(N, _1, _2, _3, _4, _5, _6, _7, _8) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N)
|
148 |
+
#define AT_AP9(N, _1, _2, _3, _4, _5, _6, _7, _8, _9) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N)
|
149 |
+
#define AT_AP10(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N)
|
150 |
+
#define AT_AP11(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N)
|
151 |
+
#define AT_AP12(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N)
|
152 |
+
#define AT_AP13(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N)
|
153 |
+
#define AT_AP14(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N)
|
154 |
+
#define AT_AP15(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N)
|
155 |
+
#define AT_AP16(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N)
|
156 |
+
#define AT_AP17(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N)
|
157 |
+
#define AT_AP18(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N)
|
158 |
+
#define AT_AP19(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N)
|
159 |
+
#define AT_AP20(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N)
|
160 |
+
#define AT_AP21(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N)
|
161 |
+
#define AT_AP22(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N)
|
162 |
+
#define AT_AP23(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N)
|
163 |
+
#define AT_AP24(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N)
|
164 |
+
#define AT_AP25(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N)
|
165 |
+
#define AT_AP26(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N)
|
166 |
+
#define AT_AP27(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N)
|
167 |
+
#define AT_AP28(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N)
|
168 |
+
#define AT_AP29(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N)
|
169 |
+
#define AT_AP30(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N)
|
170 |
+
#define AT_AP31(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N)
|
171 |
+
#define AT_AP32(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N)
|
172 |
+
#define AT_AP33(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N)
|
173 |
+
#define AT_AP34(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N)
|
174 |
+
#define AT_AP35(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N)
|
175 |
+
#define AT_AP36(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N)
|
176 |
+
#define AT_AP37(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N)
|
177 |
+
#define AT_AP38(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N)
|
178 |
+
#define AT_AP39(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N)
|
179 |
+
#define AT_AP40(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N)
|
180 |
+
#define AT_AP41(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N)
|
181 |
+
#define AT_AP42(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N)
|
182 |
+
#define AT_AP43(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N) AT_DISPATCH_CASE(_43, N)
|
183 |
+
#define AT_AP44(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N) AT_DISPATCH_CASE(_43, N) AT_DISPATCH_CASE(_44, N)
|
184 |
+
#define AT_AP45(N, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) AT_DISPATCH_CASE(_4, N) AT_DISPATCH_CASE(_5, N) AT_DISPATCH_CASE(_6, N) AT_DISPATCH_CASE(_7, N) AT_DISPATCH_CASE(_8, N) AT_DISPATCH_CASE(_9, N) AT_DISPATCH_CASE(_10, N) AT_DISPATCH_CASE(_11, N) AT_DISPATCH_CASE(_12, N) AT_DISPATCH_CASE(_13, N) AT_DISPATCH_CASE(_14, N) AT_DISPATCH_CASE(_15, N) AT_DISPATCH_CASE(_16, N) AT_DISPATCH_CASE(_17, N) AT_DISPATCH_CASE(_18, N) AT_DISPATCH_CASE(_19, N) AT_DISPATCH_CASE(_20, N) AT_DISPATCH_CASE(_21, N) AT_DISPATCH_CASE(_22, N) AT_DISPATCH_CASE(_23, N) AT_DISPATCH_CASE(_24, N) AT_DISPATCH_CASE(_25, N) AT_DISPATCH_CASE(_26, N) AT_DISPATCH_CASE(_27, N) AT_DISPATCH_CASE(_28, N) AT_DISPATCH_CASE(_29, N) AT_DISPATCH_CASE(_30, N) AT_DISPATCH_CASE(_31, N) AT_DISPATCH_CASE(_32, N) AT_DISPATCH_CASE(_33, N) AT_DISPATCH_CASE(_34, N) AT_DISPATCH_CASE(_35, N) AT_DISPATCH_CASE(_36, N) AT_DISPATCH_CASE(_37, N) AT_DISPATCH_CASE(_38, N) AT_DISPATCH_CASE(_39, N) AT_DISPATCH_CASE(_40, N) AT_DISPATCH_CASE(_41, N) AT_DISPATCH_CASE(_42, N) AT_DISPATCH_CASE(_43, N) AT_DISPATCH_CASE(_44, N) AT_DISPATCH_CASE(_45, N)
|
185 |
+
// End generated code
|
186 |
+
// clang-format on
|
venv/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/TensorBase.h>
|
2 |
+
|
3 |
+
// Broadcasting utilities for working with TensorBase
|
4 |
+
namespace at {
|
5 |
+
namespace internal {
|
6 |
+
TORCH_API TensorBase expand_slow_path(const TensorBase& self, IntArrayRef size);
|
7 |
+
} // namespace internal
|
8 |
+
|
9 |
+
inline c10::MaybeOwned<TensorBase> expand_size(
|
10 |
+
const TensorBase& self,
|
11 |
+
IntArrayRef size) {
|
12 |
+
if (size.equals(self.sizes())) {
|
13 |
+
return c10::MaybeOwned<TensorBase>::borrowed(self);
|
14 |
+
}
|
15 |
+
return c10::MaybeOwned<TensorBase>::owned(
|
16 |
+
at::internal::expand_slow_path(self, size));
|
17 |
+
}
|
18 |
+
c10::MaybeOwned<TensorBase> expand_size(TensorBase&& self, IntArrayRef size) =
|
19 |
+
delete;
|
20 |
+
|
21 |
+
inline c10::MaybeOwned<TensorBase> expand_inplace(
|
22 |
+
const TensorBase& tensor,
|
23 |
+
const TensorBase& to_expand) {
|
24 |
+
return expand_size(to_expand, tensor.sizes());
|
25 |
+
}
|
26 |
+
c10::MaybeOwned<TensorBase> expand_inplace(
|
27 |
+
const TensorBase& tensor,
|
28 |
+
TensorBase&& to_expand) = delete;
|
29 |
+
|
30 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <memory>
|
5 |
+
|
6 |
+
namespace at::functorch {
|
7 |
+
|
8 |
+
// NOTE [functorch TLS in pytorch/pytorch]
|
9 |
+
//
|
10 |
+
// functorch lives out-of-tree. However, it has some TLS that needs to be
|
11 |
+
// propagated. The solution for that is we store a pointer to the TLS
|
12 |
+
// inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to
|
13 |
+
// include whatever functorch needs.
|
14 |
+
//
|
15 |
+
// We need to store a pointer due to the indirection:
|
16 |
+
// inside functorch, we will create a subclass of FunctorchTLSBase called
|
17 |
+
// FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack.
|
18 |
+
// FuncTorchTLSBase doesn't have any metadata because it hasn't been defined
|
19 |
+
// yet.
|
20 |
+
//
|
21 |
+
// Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside
|
22 |
+
// functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*.
|
23 |
+
// We can't directly pass around FunctorchTLSBase (without a pointer) because
|
24 |
+
// FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having
|
25 |
+
// more elements.
|
26 |
+
struct TORCH_API FuncTorchTLSBase {
|
27 |
+
virtual ~FuncTorchTLSBase() = default;
|
28 |
+
virtual std::unique_ptr<FuncTorchTLSBase> deepcopy() const = 0;
|
29 |
+
|
30 |
+
virtual int64_t checkSupportsSingleLevelAutogradFunction() const = 0;
|
31 |
+
virtual void checkSupportsCppAutogradFunction() const = 0;
|
32 |
+
virtual void checkSupportsInplaceRequiresGrad() const = 0;
|
33 |
+
virtual void checkSupportsRetainGrad() const = 0;
|
34 |
+
};
|
35 |
+
|
36 |
+
// returns deepcopy of the functorch tls
|
37 |
+
TORCH_API std::unique_ptr<FuncTorchTLSBase> getCopyOfFuncTorchTLS();
|
38 |
+
|
39 |
+
// sets the functorch tls. always does a deep copy.
|
40 |
+
TORCH_API void setFuncTorchTLS(
|
41 |
+
const std::shared_ptr<const FuncTorchTLSBase>& state);
|
42 |
+
|
43 |
+
// get a mutable reference to the functorch tls
|
44 |
+
TORCH_API std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor();
|
45 |
+
|
46 |
+
} // namespace at::functorch
|
venv/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Tensor.h>
|
4 |
+
|
5 |
+
namespace at::functionalization {
|
6 |
+
|
7 |
+
// See Note [Functionalization Pass In Core]
|
8 |
+
|
9 |
+
// ViewMeta is a class used by the functionalization pass to navigate between
|
10 |
+
// a base tensor and a view tensor.
|
11 |
+
// For example, if I call `b = a.view1(...)`
|
12 |
+
// the functionalization pass will generate and store a ViewMeta on b that looks
|
13 |
+
// like:
|
14 |
+
//
|
15 |
+
// ViewMeta(
|
16 |
+
// [<captures>](const Tensor& base, int64_t mutated_view_idx) {
|
17 |
+
// return base.view1(...);
|
18 |
+
// },
|
19 |
+
// [<captures>](const at::Tensor& base, const at::Tensor& mutated_view,
|
20 |
+
// int64_t mutated_view_idx) -> at::Tensor {
|
21 |
+
// return at::functionalization::impl::view1_inverse(base, mutated_view,
|
22 |
+
// ...);
|
23 |
+
// }
|
24 |
+
//
|
25 |
+
// The forward_fn lambda describes how to replay view1 on a tensor.
|
26 |
+
//
|
27 |
+
// The reverse_fn lambda describes how, given a tensor that is already a view,
|
28 |
+
// how to get the corresponding base tensor. See Note [Functionalization Pass:
|
29 |
+
// View Inverses] for details.
|
30 |
+
struct ViewMeta {
|
31 |
+
ViewMeta(
|
32 |
+
std::function<Tensor(const Tensor&, int64_t)> forward,
|
33 |
+
std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse,
|
34 |
+
bool is_multi_output = false,
|
35 |
+
int64_t out_idx = 0)
|
36 |
+
: forward_fn(std::move(forward)),
|
37 |
+
reverse_fn(std::move(reverse)),
|
38 |
+
out_index(out_idx),
|
39 |
+
is_multi_output(is_multi_output) {}
|
40 |
+
|
41 |
+
std::function<Tensor(const Tensor&, int64_t)> forward_fn;
|
42 |
+
std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse_fn;
|
43 |
+
// See Note [out_idx in ViewMeta]
|
44 |
+
int64_t out_index;
|
45 |
+
|
46 |
+
// Tells us if this is a multi-output view
|
47 |
+
bool is_multi_output;
|
48 |
+
|
49 |
+
// Returns a copy of the current ViewMeta, if out_idx matches the current
|
50 |
+
// out_index. Otherwise, returns a new ViewMeta with the same forward/reverse
|
51 |
+
// functions, but a new out index.
|
52 |
+
ViewMeta to_out_idx(int64_t out_idx);
|
53 |
+
};
|
54 |
+
|
55 |
+
// FunctionalStorageImpl is a subclass of StorageImpl used by the
|
56 |
+
// functionalization pass. It has no underlying data (similar to meta storage).
|
57 |
+
// It also knows how to reflect mutations to tensors in the absence of a valid
|
58 |
+
// data pointer.
|
59 |
+
//
|
60 |
+
// A storage represents the state shared by (potentially multiple) views of the
|
61 |
+
// same tensor. For example, in the following code:
|
62 |
+
//
|
63 |
+
// b = a.view1(...)
|
64 |
+
// c = b.view2(...)
|
65 |
+
// b.add_(1)
|
66 |
+
// --> storage.add_update(b, {view1_meta})
|
67 |
+
//
|
68 |
+
// The call to add_(1) will result in a call to alias.add_update(b,
|
69 |
+
// {view1_meta}), queueing up the mutation from b onto the alias. Later, suppose
|
70 |
+
// c is used in an expression (e.g. you try to print c, or pass it to an
|
71 |
+
// operator). Doing so will involve "syncing" c. First we apply any pending
|
72 |
+
// updates to the alias, and then we regenerate c by replaying its views off of
|
73 |
+
// the updated alias. E.g:
|
74 |
+
//
|
75 |
+
// print(str(c))
|
76 |
+
// --> c.sync_()
|
77 |
+
// --> alias.apply_updates() // after this, the alias will be updated to
|
78 |
+
// reflect the mutation to b
|
79 |
+
struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl {
|
80 |
+
public:
|
81 |
+
struct Update {
|
82 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
83 |
+
const at::Tensor new_val;
|
84 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
85 |
+
const std::vector<ViewMeta> view_metas;
|
86 |
+
};
|
87 |
+
|
88 |
+
explicit FunctionalStorageImpl(const Tensor& value);
|
89 |
+
|
90 |
+
void add_update(
|
91 |
+
const Tensor& updated_val,
|
92 |
+
const std::vector<ViewMeta>& view_metas);
|
93 |
+
bool apply_updates();
|
94 |
+
const Tensor& base() {
|
95 |
+
return base_;
|
96 |
+
}
|
97 |
+
size_t generation() const {
|
98 |
+
return generation_;
|
99 |
+
}
|
100 |
+
void freeze() {
|
101 |
+
frozen_ = true;
|
102 |
+
}
|
103 |
+
|
104 |
+
~FunctionalStorageImpl() override = default;
|
105 |
+
|
106 |
+
private:
|
107 |
+
// NB: base_ should always point to a tensor BELOW the current
|
108 |
+
// functionalization layer. This is mainly to avoid reference cycles. e.g.
|
109 |
+
// given `b = a.view(...)` Both a.storage_ and b.storage_ are a
|
110 |
+
// FunctionStorageImpl containing an Walualias, with contains a Tensor
|
111 |
+
// `base_`. In this case (where a and b are FunctionalTensorWrapper's), base_
|
112 |
+
// should point not to a, but to a's unwrapped value, a.value_` See Note
|
113 |
+
// [Functionalization: Walualias Removal] for a diagram that shows this
|
114 |
+
// visually.
|
115 |
+
at::Tensor base_;
|
116 |
+
std::vector<Update> updates_;
|
117 |
+
// generation_ gets incremented every time a mutation is queued onto the
|
118 |
+
// alias. It is used to determine if a given tensor is "up to date", or if it
|
119 |
+
// needs to be regenerated from the alias.
|
120 |
+
size_t generation_ = 0;
|
121 |
+
// If frozen, no more mutations are allowed on this storage. Once frozen, a
|
122 |
+
// storage cannot be unfrozen.
|
123 |
+
bool frozen_ = false;
|
124 |
+
};
|
125 |
+
|
126 |
+
} // namespace at::functionalization
|
venv/lib/python3.10/site-packages/torch/include/ATen/Functions.h
ADDED
@@ -0,0 +1,1427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Functions.h
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
|
17 |
+
see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
18 |
+
#endif
|
19 |
+
|
20 |
+
// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
|
21 |
+
//
|
22 |
+
// In ATen, certain generated headers files include the definitions of
|
23 |
+
// every single operator in PyTorch. Unfortunately this means every
|
24 |
+
// time an operator signature is updated or changed in
|
25 |
+
// native_functions.yaml, you (and every other PyTorch developer) need
|
26 |
+
// to recompile every source file that includes any of these headers.
|
27 |
+
//
|
28 |
+
// To break up these header dependencies, and improve incremental
|
29 |
+
// build times for all PyTorch developers. These headers are split
|
30 |
+
// into per-operator headers in the `ATen/ops` folder. This limits
|
31 |
+
// incremental builds to only changes to methods of `Tensor`, or files
|
32 |
+
// that use the specific operator being changed. With `at::sum` as an
|
33 |
+
// example, you should include
|
34 |
+
//
|
35 |
+
// <ATen/ops/sum.h> // instead of ATen/Functions.h
|
36 |
+
// <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
|
37 |
+
// <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
|
38 |
+
// <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
|
39 |
+
//
|
40 |
+
// However, even if you're careful to use this in your own code.
|
41 |
+
// `Functions.h` might be included indirectly through another header
|
42 |
+
// without you realising. To avoid this, you can add
|
43 |
+
//
|
44 |
+
// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
45 |
+
//
|
46 |
+
// to the top of your source file. This way any time the non-specific
|
47 |
+
// headers are included, the compiler will error out.
|
48 |
+
//
|
49 |
+
// Also, be aware that `ops` are not available in all build
|
50 |
+
// configurations (namely fb-internal) so you must guard these
|
51 |
+
// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
|
52 |
+
//
|
53 |
+
// #ifndef AT_PER_OPERATOR_HEADERS
|
54 |
+
// #include <ATen/Functions.h>
|
55 |
+
// #else
|
56 |
+
// #include <ATen/ops/sum.h>
|
57 |
+
// #endif
|
58 |
+
|
59 |
+
#include <ATen/Context.h>
|
60 |
+
#include <ATen/DeviceGuard.h>
|
61 |
+
#include <ATen/TensorUtils.h>
|
62 |
+
#include <ATen/TracerMode.h>
|
63 |
+
#include <ATen/core/Generator.h>
|
64 |
+
#include <ATen/core/Reduction.h>
|
65 |
+
#include <c10/core/SymInt.h>
|
66 |
+
#include <ATen/core/Tensor.h>
|
67 |
+
#include <c10/core/Scalar.h>
|
68 |
+
#include <c10/core/Storage.h>
|
69 |
+
#include <c10/core/TensorOptions.h>
|
70 |
+
#include <c10/util/Deprecated.h>
|
71 |
+
#include <c10/util/Optional.h>
|
72 |
+
#include <c10/util/OptionalArrayRef.h>
|
73 |
+
|
74 |
+
#include <ATen/ops/from_blob.h>
|
75 |
+
#include <ATen/ops/tensor.h>
|
76 |
+
|
77 |
+
#include <ATen/ops/_adaptive_avg_pool2d.h>
|
78 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward.h>
|
79 |
+
#include <ATen/ops/_adaptive_avg_pool3d.h>
|
80 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward.h>
|
81 |
+
#include <ATen/ops/_add_batch_dim.h>
|
82 |
+
#include <ATen/ops/_add_relu.h>
|
83 |
+
#include <ATen/ops/_addmm_activation.h>
|
84 |
+
#include <ATen/ops/_aminmax.h>
|
85 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
|
86 |
+
#include <ATen/ops/_amp_update_scale.h>
|
87 |
+
#include <ATen/ops/_assert_async.h>
|
88 |
+
#include <ATen/ops/_assert_scalar.h>
|
89 |
+
#include <ATen/ops/_assert_tensor_metadata.h>
|
90 |
+
#include <ATen/ops/_autocast_to_full_precision.h>
|
91 |
+
#include <ATen/ops/_autocast_to_reduced_precision.h>
|
92 |
+
#include <ATen/ops/_backward.h>
|
93 |
+
#include <ATen/ops/_batch_norm_impl_index.h>
|
94 |
+
#include <ATen/ops/_batch_norm_impl_index_backward.h>
|
95 |
+
#include <ATen/ops/_cast_Byte.h>
|
96 |
+
#include <ATen/ops/_cast_Char.h>
|
97 |
+
#include <ATen/ops/_cast_Double.h>
|
98 |
+
#include <ATen/ops/_cast_Float.h>
|
99 |
+
#include <ATen/ops/_cast_Half.h>
|
100 |
+
#include <ATen/ops/_cast_Int.h>
|
101 |
+
#include <ATen/ops/_cast_Long.h>
|
102 |
+
#include <ATen/ops/_cast_Short.h>
|
103 |
+
#include <ATen/ops/_cdist_backward.h>
|
104 |
+
#include <ATen/ops/_cdist_forward.h>
|
105 |
+
#include <ATen/ops/_cholesky_solve_helper.h>
|
106 |
+
#include <ATen/ops/_choose_qparams_per_tensor.h>
|
107 |
+
#include <ATen/ops/_chunk_cat.h>
|
108 |
+
#include <ATen/ops/_coalesce.h>
|
109 |
+
#include <ATen/ops/_coalesced.h>
|
110 |
+
#include <ATen/ops/_compute_linear_combination.h>
|
111 |
+
#include <ATen/ops/_conj.h>
|
112 |
+
#include <ATen/ops/_conj_copy.h>
|
113 |
+
#include <ATen/ops/_conj_physical.h>
|
114 |
+
#include <ATen/ops/_conv_depthwise2d.h>
|
115 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr.h>
|
116 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo.h>
|
117 |
+
#include <ATen/ops/_convert_weight_to_int4pack.h>
|
118 |
+
#include <ATen/ops/_convolution.h>
|
119 |
+
#include <ATen/ops/_convolution_double_backward.h>
|
120 |
+
#include <ATen/ops/_convolution_mode.h>
|
121 |
+
#include <ATen/ops/_copy_from.h>
|
122 |
+
#include <ATen/ops/_copy_from_and_resize.h>
|
123 |
+
#include <ATen/ops/_cslt_compress.h>
|
124 |
+
#include <ATen/ops/_cslt_sparse_mm.h>
|
125 |
+
#include <ATen/ops/_cslt_sparse_mm_search.h>
|
126 |
+
#include <ATen/ops/_ctc_loss.h>
|
127 |
+
#include <ATen/ops/_ctc_loss_backward.h>
|
128 |
+
#include <ATen/ops/_cudnn_ctc_loss.h>
|
129 |
+
#include <ATen/ops/_cudnn_init_dropout_state.h>
|
130 |
+
#include <ATen/ops/_cudnn_rnn.h>
|
131 |
+
#include <ATen/ops/_cudnn_rnn_backward.h>
|
132 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight.h>
|
133 |
+
#include <ATen/ops/_cufft_clear_plan_cache.h>
|
134 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size.h>
|
135 |
+
#include <ATen/ops/_cufft_get_plan_cache_size.h>
|
136 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size.h>
|
137 |
+
#include <ATen/ops/_cummax_helper.h>
|
138 |
+
#include <ATen/ops/_cummin_helper.h>
|
139 |
+
#include <ATen/ops/_debug_has_internal_overlap.h>
|
140 |
+
#include <ATen/ops/_dimI.h>
|
141 |
+
#include <ATen/ops/_dimV.h>
|
142 |
+
#include <ATen/ops/_dim_arange.h>
|
143 |
+
#include <ATen/ops/_dirichlet_grad.h>
|
144 |
+
#include <ATen/ops/_efficient_attention_backward.h>
|
145 |
+
#include <ATen/ops/_efficient_attention_forward.h>
|
146 |
+
#include <ATen/ops/_efficientzerotensor.h>
|
147 |
+
#include <ATen/ops/_embedding_bag.h>
|
148 |
+
#include <ATen/ops/_embedding_bag_backward.h>
|
149 |
+
#include <ATen/ops/_embedding_bag_dense_backward.h>
|
150 |
+
#include <ATen/ops/_embedding_bag_forward_only.h>
|
151 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
|
152 |
+
#include <ATen/ops/_embedding_bag_sparse_backward.h>
|
153 |
+
#include <ATen/ops/_empty_affine_quantized.h>
|
154 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
|
155 |
+
#include <ATen/ops/_euclidean_dist.h>
|
156 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
|
157 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h>
|
158 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
|
159 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h>
|
160 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
|
161 |
+
#include <ATen/ops/_fft_c2c.h>
|
162 |
+
#include <ATen/ops/_fft_c2r.h>
|
163 |
+
#include <ATen/ops/_fft_r2c.h>
|
164 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask.h>
|
165 |
+
#include <ATen/ops/_flash_attention_backward.h>
|
166 |
+
#include <ATen/ops/_flash_attention_forward.h>
|
167 |
+
#include <ATen/ops/_foobar.h>
|
168 |
+
#include <ATen/ops/_foreach_abs.h>
|
169 |
+
#include <ATen/ops/_foreach_acos.h>
|
170 |
+
#include <ATen/ops/_foreach_add.h>
|
171 |
+
#include <ATen/ops/_foreach_addcdiv.h>
|
172 |
+
#include <ATen/ops/_foreach_addcmul.h>
|
173 |
+
#include <ATen/ops/_foreach_asin.h>
|
174 |
+
#include <ATen/ops/_foreach_atan.h>
|
175 |
+
#include <ATen/ops/_foreach_ceil.h>
|
176 |
+
#include <ATen/ops/_foreach_clamp_max.h>
|
177 |
+
#include <ATen/ops/_foreach_clamp_min.h>
|
178 |
+
#include <ATen/ops/_foreach_copy.h>
|
179 |
+
#include <ATen/ops/_foreach_cos.h>
|
180 |
+
#include <ATen/ops/_foreach_cosh.h>
|
181 |
+
#include <ATen/ops/_foreach_div.h>
|
182 |
+
#include <ATen/ops/_foreach_erf.h>
|
183 |
+
#include <ATen/ops/_foreach_erfc.h>
|
184 |
+
#include <ATen/ops/_foreach_exp.h>
|
185 |
+
#include <ATen/ops/_foreach_expm1.h>
|
186 |
+
#include <ATen/ops/_foreach_floor.h>
|
187 |
+
#include <ATen/ops/_foreach_frac.h>
|
188 |
+
#include <ATen/ops/_foreach_lerp.h>
|
189 |
+
#include <ATen/ops/_foreach_lgamma.h>
|
190 |
+
#include <ATen/ops/_foreach_log.h>
|
191 |
+
#include <ATen/ops/_foreach_log10.h>
|
192 |
+
#include <ATen/ops/_foreach_log1p.h>
|
193 |
+
#include <ATen/ops/_foreach_log2.h>
|
194 |
+
#include <ATen/ops/_foreach_maximum.h>
|
195 |
+
#include <ATen/ops/_foreach_minimum.h>
|
196 |
+
#include <ATen/ops/_foreach_mul.h>
|
197 |
+
#include <ATen/ops/_foreach_neg.h>
|
198 |
+
#include <ATen/ops/_foreach_norm.h>
|
199 |
+
#include <ATen/ops/_foreach_pow.h>
|
200 |
+
#include <ATen/ops/_foreach_reciprocal.h>
|
201 |
+
#include <ATen/ops/_foreach_round.h>
|
202 |
+
#include <ATen/ops/_foreach_sigmoid.h>
|
203 |
+
#include <ATen/ops/_foreach_sign.h>
|
204 |
+
#include <ATen/ops/_foreach_sin.h>
|
205 |
+
#include <ATen/ops/_foreach_sinh.h>
|
206 |
+
#include <ATen/ops/_foreach_sqrt.h>
|
207 |
+
#include <ATen/ops/_foreach_sub.h>
|
208 |
+
#include <ATen/ops/_foreach_tan.h>
|
209 |
+
#include <ATen/ops/_foreach_tanh.h>
|
210 |
+
#include <ATen/ops/_foreach_trunc.h>
|
211 |
+
#include <ATen/ops/_foreach_zero.h>
|
212 |
+
#include <ATen/ops/_functional_assert_async.h>
|
213 |
+
#include <ATen/ops/_functional_assert_scalar.h>
|
214 |
+
#include <ATen/ops/_functional_sym_constrain_range.h>
|
215 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size.h>
|
216 |
+
#include <ATen/ops/_fused_adam.h>
|
217 |
+
#include <ATen/ops/_fused_adamw.h>
|
218 |
+
#include <ATen/ops/_fused_dropout.h>
|
219 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
|
220 |
+
#include <ATen/ops/_fused_sdp_choice.h>
|
221 |
+
#include <ATen/ops/_fused_sgd.h>
|
222 |
+
#include <ATen/ops/_fw_primal.h>
|
223 |
+
#include <ATen/ops/_fw_primal_copy.h>
|
224 |
+
#include <ATen/ops/_gather_sparse_backward.h>
|
225 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
|
226 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h>
|
227 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type.h>
|
228 |
+
#include <ATen/ops/_has_same_storage_numel.h>
|
229 |
+
#include <ATen/ops/_histogramdd_bin_edges.h>
|
230 |
+
#include <ATen/ops/_histogramdd_from_bin_cts.h>
|
231 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors.h>
|
232 |
+
#include <ATen/ops/_index_put_impl.h>
|
233 |
+
#include <ATen/ops/_indices.h>
|
234 |
+
#include <ATen/ops/_indices_copy.h>
|
235 |
+
#include <ATen/ops/_int_mm.h>
|
236 |
+
#include <ATen/ops/_is_all_true.h>
|
237 |
+
#include <ATen/ops/_is_any_true.h>
|
238 |
+
#include <ATen/ops/_is_zerotensor.h>
|
239 |
+
#include <ATen/ops/_lazy_clone.h>
|
240 |
+
#include <ATen/ops/_linalg_check_errors.h>
|
241 |
+
#include <ATen/ops/_linalg_det.h>
|
242 |
+
#include <ATen/ops/_linalg_eigh.h>
|
243 |
+
#include <ATen/ops/_linalg_eigvals.h>
|
244 |
+
#include <ATen/ops/_linalg_slogdet.h>
|
245 |
+
#include <ATen/ops/_linalg_solve_ex.h>
|
246 |
+
#include <ATen/ops/_linalg_svd.h>
|
247 |
+
#include <ATen/ops/_local_scalar_dense.h>
|
248 |
+
#include <ATen/ops/_log_softmax.h>
|
249 |
+
#include <ATen/ops/_log_softmax_backward_data.h>
|
250 |
+
#include <ATen/ops/_logcumsumexp.h>
|
251 |
+
#include <ATen/ops/_lstm_mps.h>
|
252 |
+
#include <ATen/ops/_lu_with_info.h>
|
253 |
+
#include <ATen/ops/_make_dep_token.h>
|
254 |
+
#include <ATen/ops/_make_dual.h>
|
255 |
+
#include <ATen/ops/_make_dual_copy.h>
|
256 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor.h>
|
257 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
|
258 |
+
#include <ATen/ops/_masked_scale.h>
|
259 |
+
#include <ATen/ops/_masked_softmax.h>
|
260 |
+
#include <ATen/ops/_masked_softmax_backward.h>
|
261 |
+
#include <ATen/ops/_mixed_dtypes_linear.h>
|
262 |
+
#include <ATen/ops/_mkldnn_reshape.h>
|
263 |
+
#include <ATen/ops/_mkldnn_transpose.h>
|
264 |
+
#include <ATen/ops/_mps_convolution.h>
|
265 |
+
#include <ATen/ops/_mps_convolution_transpose.h>
|
266 |
+
#include <ATen/ops/_native_batch_norm_legit.h>
|
267 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training.h>
|
268 |
+
#include <ATen/ops/_native_multi_head_attention.h>
|
269 |
+
#include <ATen/ops/_neg_view.h>
|
270 |
+
#include <ATen/ops/_neg_view_copy.h>
|
271 |
+
#include <ATen/ops/_nested_from_padded.h>
|
272 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example.h>
|
273 |
+
#include <ATen/ops/_nested_get_jagged_dummy.h>
|
274 |
+
#include <ATen/ops/_nested_get_lengths.h>
|
275 |
+
#include <ATen/ops/_nested_get_offsets.h>
|
276 |
+
#include <ATen/ops/_nested_get_ragged_idx.h>
|
277 |
+
#include <ATen/ops/_nested_get_values.h>
|
278 |
+
#include <ATen/ops/_nested_get_values_copy.h>
|
279 |
+
#include <ATen/ops/_nested_select_backward.h>
|
280 |
+
#include <ATen/ops/_nested_sum_backward.h>
|
281 |
+
#include <ATen/ops/_nested_tensor_from_mask.h>
|
282 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned.h>
|
283 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list.h>
|
284 |
+
#include <ATen/ops/_nested_tensor_size.h>
|
285 |
+
#include <ATen/ops/_nested_tensor_softmax_with_shape.h>
|
286 |
+
#include <ATen/ops/_nested_tensor_storage_offsets.h>
|
287 |
+
#include <ATen/ops/_nested_tensor_strides.h>
|
288 |
+
#include <ATen/ops/_nested_view_from_buffer.h>
|
289 |
+
#include <ATen/ops/_nested_view_from_buffer_copy.h>
|
290 |
+
#include <ATen/ops/_nested_view_from_jagged.h>
|
291 |
+
#include <ATen/ops/_nested_view_from_jagged_copy.h>
|
292 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta.h>
|
293 |
+
#include <ATen/ops/_nnpack_available.h>
|
294 |
+
#include <ATen/ops/_nnpack_spatial_convolution.h>
|
295 |
+
#include <ATen/ops/_nnz.h>
|
296 |
+
#include <ATen/ops/_pack_padded_sequence.h>
|
297 |
+
#include <ATen/ops/_pack_padded_sequence_backward.h>
|
298 |
+
#include <ATen/ops/_pad_circular.h>
|
299 |
+
#include <ATen/ops/_pad_enum.h>
|
300 |
+
#include <ATen/ops/_pad_packed_sequence.h>
|
301 |
+
#include <ATen/ops/_pdist_backward.h>
|
302 |
+
#include <ATen/ops/_pdist_forward.h>
|
303 |
+
#include <ATen/ops/_pin_memory.h>
|
304 |
+
#include <ATen/ops/_prelu_kernel.h>
|
305 |
+
#include <ATen/ops/_prelu_kernel_backward.h>
|
306 |
+
#include <ATen/ops/_print.h>
|
307 |
+
#include <ATen/ops/_propagate_xla_data.h>
|
308 |
+
#include <ATen/ops/_remove_batch_dim.h>
|
309 |
+
#include <ATen/ops/_reshape_alias.h>
|
310 |
+
#include <ATen/ops/_reshape_alias_copy.h>
|
311 |
+
#include <ATen/ops/_reshape_copy.h>
|
312 |
+
#include <ATen/ops/_reshape_from_tensor.h>
|
313 |
+
#include <ATen/ops/_resize_output.h>
|
314 |
+
#include <ATen/ops/_rowwise_prune.h>
|
315 |
+
#include <ATen/ops/_sample_dirichlet.h>
|
316 |
+
#include <ATen/ops/_saturate_weight_to_fp16.h>
|
317 |
+
#include <ATen/ops/_scaled_dot_product_attention_math.h>
|
318 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention.h>
|
319 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention.h>
|
320 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward.h>
|
321 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention.h>
|
322 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward.h>
|
323 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu.h>
|
324 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward.h>
|
325 |
+
#include <ATen/ops/_scaled_mm.h>
|
326 |
+
#include <ATen/ops/_segment_reduce_backward.h>
|
327 |
+
#include <ATen/ops/_shape_as_tensor.h>
|
328 |
+
#include <ATen/ops/_slow_conv2d_backward.h>
|
329 |
+
#include <ATen/ops/_slow_conv2d_forward.h>
|
330 |
+
#include <ATen/ops/_sobol_engine_draw.h>
|
331 |
+
#include <ATen/ops/_sobol_engine_ff.h>
|
332 |
+
#include <ATen/ops/_sobol_engine_initialize_state.h>
|
333 |
+
#include <ATen/ops/_sobol_engine_scramble.h>
|
334 |
+
#include <ATen/ops/_softmax.h>
|
335 |
+
#include <ATen/ops/_softmax_backward_data.h>
|
336 |
+
#include <ATen/ops/_sparse_addmm.h>
|
337 |
+
#include <ATen/ops/_sparse_broadcast_to.h>
|
338 |
+
#include <ATen/ops/_sparse_broadcast_to_copy.h>
|
339 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
|
340 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
|
341 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
|
342 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
|
343 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
|
344 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
|
345 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe.h>
|
346 |
+
#include <ATen/ops/_sparse_csr_prod.h>
|
347 |
+
#include <ATen/ops/_sparse_csr_sum.h>
|
348 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe.h>
|
349 |
+
#include <ATen/ops/_sparse_log_softmax.h>
|
350 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data.h>
|
351 |
+
#include <ATen/ops/_sparse_mask_projection.h>
|
352 |
+
#include <ATen/ops/_sparse_mm.h>
|
353 |
+
#include <ATen/ops/_sparse_mm_reduce_impl.h>
|
354 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_backward.h>
|
355 |
+
#include <ATen/ops/_sparse_semi_structured_linear.h>
|
356 |
+
#include <ATen/ops/_sparse_softmax.h>
|
357 |
+
#include <ATen/ops/_sparse_softmax_backward_data.h>
|
358 |
+
#include <ATen/ops/_sparse_sparse_matmul.h>
|
359 |
+
#include <ATen/ops/_sparse_sum.h>
|
360 |
+
#include <ATen/ops/_sparse_sum_backward.h>
|
361 |
+
#include <ATen/ops/_spdiags.h>
|
362 |
+
#include <ATen/ops/_stack.h>
|
363 |
+
#include <ATen/ops/_standard_gamma.h>
|
364 |
+
#include <ATen/ops/_standard_gamma_grad.h>
|
365 |
+
#include <ATen/ops/_test_ambiguous_defaults.h>
|
366 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch.h>
|
367 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view.h>
|
368 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
|
369 |
+
#include <ATen/ops/_test_check_tensor.h>
|
370 |
+
#include <ATen/ops/_test_functorch_fallback.h>
|
371 |
+
#include <ATen/ops/_test_optional_filled_intlist.h>
|
372 |
+
#include <ATen/ops/_test_optional_floatlist.h>
|
373 |
+
#include <ATen/ops/_test_optional_intlist.h>
|
374 |
+
#include <ATen/ops/_test_parallel_materialize.h>
|
375 |
+
#include <ATen/ops/_test_serialization_subcmul.h>
|
376 |
+
#include <ATen/ops/_test_string_default.h>
|
377 |
+
#include <ATen/ops/_test_warn_in_autograd.h>
|
378 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward.h>
|
379 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward.h>
|
380 |
+
#include <ATen/ops/_thnn_fused_gru_cell.h>
|
381 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward.h>
|
382 |
+
#include <ATen/ops/_thnn_fused_lstm_cell.h>
|
383 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward.h>
|
384 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
|
385 |
+
#include <ATen/ops/_to_copy.h>
|
386 |
+
#include <ATen/ops/_to_cpu.h>
|
387 |
+
#include <ATen/ops/_to_dense.h>
|
388 |
+
#include <ATen/ops/_to_sparse.h>
|
389 |
+
#include <ATen/ops/_to_sparse_bsc.h>
|
390 |
+
#include <ATen/ops/_to_sparse_bsr.h>
|
391 |
+
#include <ATen/ops/_to_sparse_csc.h>
|
392 |
+
#include <ATen/ops/_to_sparse_csr.h>
|
393 |
+
#include <ATen/ops/_to_sparse_semi_structured.h>
|
394 |
+
#include <ATen/ops/_transform_bias_rescale_qkv.h>
|
395 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd.h>
|
396 |
+
#include <ATen/ops/_trilinear.h>
|
397 |
+
#include <ATen/ops/_triton_multi_head_attention.h>
|
398 |
+
#include <ATen/ops/_triton_scaled_dot_attention.h>
|
399 |
+
#include <ATen/ops/_unique.h>
|
400 |
+
#include <ATen/ops/_unique2.h>
|
401 |
+
#include <ATen/ops/_unpack_dual.h>
|
402 |
+
#include <ATen/ops/_unsafe_index.h>
|
403 |
+
#include <ATen/ops/_unsafe_index_put.h>
|
404 |
+
#include <ATen/ops/_unsafe_view.h>
|
405 |
+
#include <ATen/ops/_upsample_bicubic2d_aa.h>
|
406 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
|
407 |
+
#include <ATen/ops/_upsample_bilinear2d_aa.h>
|
408 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
|
409 |
+
#include <ATen/ops/_upsample_nearest_exact1d.h>
|
410 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward.h>
|
411 |
+
#include <ATen/ops/_upsample_nearest_exact2d.h>
|
412 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward.h>
|
413 |
+
#include <ATen/ops/_upsample_nearest_exact3d.h>
|
414 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
|
415 |
+
#include <ATen/ops/_use_cudnn_ctc_loss.h>
|
416 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight.h>
|
417 |
+
#include <ATen/ops/_validate_compressed_sparse_indices.h>
|
418 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args.h>
|
419 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args.h>
|
420 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args.h>
|
421 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args.h>
|
422 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args.h>
|
423 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args.h>
|
424 |
+
#include <ATen/ops/_values.h>
|
425 |
+
#include <ATen/ops/_values_copy.h>
|
426 |
+
#include <ATen/ops/_version.h>
|
427 |
+
#include <ATen/ops/_weight_int4pack_mm.h>
|
428 |
+
#include <ATen/ops/_weight_int8pack_mm.h>
|
429 |
+
#include <ATen/ops/_weight_norm.h>
|
430 |
+
#include <ATen/ops/_weight_norm_differentiable_backward.h>
|
431 |
+
#include <ATen/ops/_weight_norm_interface.h>
|
432 |
+
#include <ATen/ops/_weight_norm_interface_backward.h>
|
433 |
+
#include <ATen/ops/abs.h>
|
434 |
+
#include <ATen/ops/absolute.h>
|
435 |
+
#include <ATen/ops/acos.h>
|
436 |
+
#include <ATen/ops/acosh.h>
|
437 |
+
#include <ATen/ops/adaptive_avg_pool1d.h>
|
438 |
+
#include <ATen/ops/adaptive_avg_pool2d.h>
|
439 |
+
#include <ATen/ops/adaptive_avg_pool3d.h>
|
440 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward.h>
|
441 |
+
#include <ATen/ops/adaptive_max_pool1d.h>
|
442 |
+
#include <ATen/ops/adaptive_max_pool2d.h>
|
443 |
+
#include <ATen/ops/adaptive_max_pool2d_backward.h>
|
444 |
+
#include <ATen/ops/adaptive_max_pool3d.h>
|
445 |
+
#include <ATen/ops/adaptive_max_pool3d_backward.h>
|
446 |
+
#include <ATen/ops/add.h>
|
447 |
+
#include <ATen/ops/addbmm.h>
|
448 |
+
#include <ATen/ops/addcdiv.h>
|
449 |
+
#include <ATen/ops/addcmul.h>
|
450 |
+
#include <ATen/ops/addmm.h>
|
451 |
+
#include <ATen/ops/addmv.h>
|
452 |
+
#include <ATen/ops/addr.h>
|
453 |
+
#include <ATen/ops/adjoint.h>
|
454 |
+
#include <ATen/ops/affine_grid_generator.h>
|
455 |
+
#include <ATen/ops/affine_grid_generator_backward.h>
|
456 |
+
#include <ATen/ops/alias.h>
|
457 |
+
#include <ATen/ops/alias_copy.h>
|
458 |
+
#include <ATen/ops/align_as.h>
|
459 |
+
#include <ATen/ops/align_tensors.h>
|
460 |
+
#include <ATen/ops/align_to.h>
|
461 |
+
#include <ATen/ops/all.h>
|
462 |
+
#include <ATen/ops/allclose.h>
|
463 |
+
#include <ATen/ops/alpha_dropout.h>
|
464 |
+
#include <ATen/ops/amax.h>
|
465 |
+
#include <ATen/ops/amin.h>
|
466 |
+
#include <ATen/ops/aminmax.h>
|
467 |
+
#include <ATen/ops/and.h>
|
468 |
+
#include <ATen/ops/angle.h>
|
469 |
+
#include <ATen/ops/any.h>
|
470 |
+
#include <ATen/ops/arange.h>
|
471 |
+
#include <ATen/ops/arccos.h>
|
472 |
+
#include <ATen/ops/arccosh.h>
|
473 |
+
#include <ATen/ops/arcsin.h>
|
474 |
+
#include <ATen/ops/arcsinh.h>
|
475 |
+
#include <ATen/ops/arctan.h>
|
476 |
+
#include <ATen/ops/arctan2.h>
|
477 |
+
#include <ATen/ops/arctanh.h>
|
478 |
+
#include <ATen/ops/argmax.h>
|
479 |
+
#include <ATen/ops/argmin.h>
|
480 |
+
#include <ATen/ops/argsort.h>
|
481 |
+
#include <ATen/ops/argwhere.h>
|
482 |
+
#include <ATen/ops/as_strided.h>
|
483 |
+
#include <ATen/ops/as_strided_copy.h>
|
484 |
+
#include <ATen/ops/as_strided_scatter.h>
|
485 |
+
#include <ATen/ops/asin.h>
|
486 |
+
#include <ATen/ops/asinh.h>
|
487 |
+
#include <ATen/ops/atan.h>
|
488 |
+
#include <ATen/ops/atan2.h>
|
489 |
+
#include <ATen/ops/atanh.h>
|
490 |
+
#include <ATen/ops/atleast_1d.h>
|
491 |
+
#include <ATen/ops/atleast_2d.h>
|
492 |
+
#include <ATen/ops/atleast_3d.h>
|
493 |
+
#include <ATen/ops/avg_pool1d.h>
|
494 |
+
#include <ATen/ops/avg_pool2d.h>
|
495 |
+
#include <ATen/ops/avg_pool2d_backward.h>
|
496 |
+
#include <ATen/ops/avg_pool3d.h>
|
497 |
+
#include <ATen/ops/avg_pool3d_backward.h>
|
498 |
+
#include <ATen/ops/baddbmm.h>
|
499 |
+
#include <ATen/ops/bartlett_window.h>
|
500 |
+
#include <ATen/ops/batch_norm.h>
|
501 |
+
#include <ATen/ops/batch_norm_backward_elemt.h>
|
502 |
+
#include <ATen/ops/batch_norm_backward_reduce.h>
|
503 |
+
#include <ATen/ops/batch_norm_elemt.h>
|
504 |
+
#include <ATen/ops/batch_norm_gather_stats.h>
|
505 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts.h>
|
506 |
+
#include <ATen/ops/batch_norm_stats.h>
|
507 |
+
#include <ATen/ops/batch_norm_update_stats.h>
|
508 |
+
#include <ATen/ops/bernoulli.h>
|
509 |
+
#include <ATen/ops/bilinear.h>
|
510 |
+
#include <ATen/ops/binary_cross_entropy.h>
|
511 |
+
#include <ATen/ops/binary_cross_entropy_backward.h>
|
512 |
+
#include <ATen/ops/binary_cross_entropy_with_logits.h>
|
513 |
+
#include <ATen/ops/bincount.h>
|
514 |
+
#include <ATen/ops/binomial.h>
|
515 |
+
#include <ATen/ops/bitwise_and.h>
|
516 |
+
#include <ATen/ops/bitwise_left_shift.h>
|
517 |
+
#include <ATen/ops/bitwise_not.h>
|
518 |
+
#include <ATen/ops/bitwise_or.h>
|
519 |
+
#include <ATen/ops/bitwise_right_shift.h>
|
520 |
+
#include <ATen/ops/bitwise_xor.h>
|
521 |
+
#include <ATen/ops/blackman_window.h>
|
522 |
+
#include <ATen/ops/block_diag.h>
|
523 |
+
#include <ATen/ops/bmm.h>
|
524 |
+
#include <ATen/ops/broadcast_tensors.h>
|
525 |
+
#include <ATen/ops/broadcast_to.h>
|
526 |
+
#include <ATen/ops/bucketize.h>
|
527 |
+
#include <ATen/ops/can_cast.h>
|
528 |
+
#include <ATen/ops/cartesian_prod.h>
|
529 |
+
#include <ATen/ops/cat.h>
|
530 |
+
#include <ATen/ops/cauchy.h>
|
531 |
+
#include <ATen/ops/ccol_indices.h>
|
532 |
+
#include <ATen/ops/ccol_indices_copy.h>
|
533 |
+
#include <ATen/ops/cdist.h>
|
534 |
+
#include <ATen/ops/ceil.h>
|
535 |
+
#include <ATen/ops/celu.h>
|
536 |
+
#include <ATen/ops/chain_matmul.h>
|
537 |
+
#include <ATen/ops/chalf.h>
|
538 |
+
#include <ATen/ops/channel_shuffle.h>
|
539 |
+
#include <ATen/ops/cholesky.h>
|
540 |
+
#include <ATen/ops/cholesky_inverse.h>
|
541 |
+
#include <ATen/ops/cholesky_solve.h>
|
542 |
+
#include <ATen/ops/choose_qparams_optimized.h>
|
543 |
+
#include <ATen/ops/chunk.h>
|
544 |
+
#include <ATen/ops/clamp.h>
|
545 |
+
#include <ATen/ops/clamp_max.h>
|
546 |
+
#include <ATen/ops/clamp_min.h>
|
547 |
+
#include <ATen/ops/clip.h>
|
548 |
+
#include <ATen/ops/clone.h>
|
549 |
+
#include <ATen/ops/coalesce.h>
|
550 |
+
#include <ATen/ops/col2im.h>
|
551 |
+
#include <ATen/ops/col_indices.h>
|
552 |
+
#include <ATen/ops/col_indices_copy.h>
|
553 |
+
#include <ATen/ops/column_stack.h>
|
554 |
+
#include <ATen/ops/combinations.h>
|
555 |
+
#include <ATen/ops/complex.h>
|
556 |
+
#include <ATen/ops/concat.h>
|
557 |
+
#include <ATen/ops/concatenate.h>
|
558 |
+
#include <ATen/ops/conj.h>
|
559 |
+
#include <ATen/ops/conj_physical.h>
|
560 |
+
#include <ATen/ops/constant_pad_nd.h>
|
561 |
+
#include <ATen/ops/contiguous.h>
|
562 |
+
#include <ATen/ops/conv1d.h>
|
563 |
+
#include <ATen/ops/conv2d.h>
|
564 |
+
#include <ATen/ops/conv3d.h>
|
565 |
+
#include <ATen/ops/conv_depthwise3d.h>
|
566 |
+
#include <ATen/ops/conv_tbc.h>
|
567 |
+
#include <ATen/ops/conv_tbc_backward.h>
|
568 |
+
#include <ATen/ops/conv_transpose1d.h>
|
569 |
+
#include <ATen/ops/conv_transpose2d.h>
|
570 |
+
#include <ATen/ops/conv_transpose3d.h>
|
571 |
+
#include <ATen/ops/convolution.h>
|
572 |
+
#include <ATen/ops/convolution_backward.h>
|
573 |
+
#include <ATen/ops/convolution_backward_overrideable.h>
|
574 |
+
#include <ATen/ops/convolution_overrideable.h>
|
575 |
+
#include <ATen/ops/copy.h>
|
576 |
+
#include <ATen/ops/copy_sparse_to_sparse.h>
|
577 |
+
#include <ATen/ops/copysign.h>
|
578 |
+
#include <ATen/ops/corrcoef.h>
|
579 |
+
#include <ATen/ops/cos.h>
|
580 |
+
#include <ATen/ops/cosh.h>
|
581 |
+
#include <ATen/ops/cosine_embedding_loss.h>
|
582 |
+
#include <ATen/ops/cosine_similarity.h>
|
583 |
+
#include <ATen/ops/count_nonzero.h>
|
584 |
+
#include <ATen/ops/cov.h>
|
585 |
+
#include <ATen/ops/cross.h>
|
586 |
+
#include <ATen/ops/cross_entropy_loss.h>
|
587 |
+
#include <ATen/ops/crow_indices.h>
|
588 |
+
#include <ATen/ops/crow_indices_copy.h>
|
589 |
+
#include <ATen/ops/ctc_loss.h>
|
590 |
+
#include <ATen/ops/cudnn_affine_grid_generator.h>
|
591 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward.h>
|
592 |
+
#include <ATen/ops/cudnn_batch_norm.h>
|
593 |
+
#include <ATen/ops/cudnn_batch_norm_backward.h>
|
594 |
+
#include <ATen/ops/cudnn_convolution.h>
|
595 |
+
#include <ATen/ops/cudnn_convolution_add_relu.h>
|
596 |
+
#include <ATen/ops/cudnn_convolution_relu.h>
|
597 |
+
#include <ATen/ops/cudnn_convolution_transpose.h>
|
598 |
+
#include <ATen/ops/cudnn_grid_sampler.h>
|
599 |
+
#include <ATen/ops/cudnn_grid_sampler_backward.h>
|
600 |
+
#include <ATen/ops/cudnn_is_acceptable.h>
|
601 |
+
#include <ATen/ops/cummax.h>
|
602 |
+
#include <ATen/ops/cummaxmin_backward.h>
|
603 |
+
#include <ATen/ops/cummin.h>
|
604 |
+
#include <ATen/ops/cumprod.h>
|
605 |
+
#include <ATen/ops/cumprod_backward.h>
|
606 |
+
#include <ATen/ops/cumsum.h>
|
607 |
+
#include <ATen/ops/cumulative_trapezoid.h>
|
608 |
+
#include <ATen/ops/data.h>
|
609 |
+
#include <ATen/ops/deg2rad.h>
|
610 |
+
#include <ATen/ops/dense_dim.h>
|
611 |
+
#include <ATen/ops/dequantize.h>
|
612 |
+
#include <ATen/ops/det.h>
|
613 |
+
#include <ATen/ops/detach.h>
|
614 |
+
#include <ATen/ops/detach_copy.h>
|
615 |
+
#include <ATen/ops/diag.h>
|
616 |
+
#include <ATen/ops/diag_embed.h>
|
617 |
+
#include <ATen/ops/diagflat.h>
|
618 |
+
#include <ATen/ops/diagonal.h>
|
619 |
+
#include <ATen/ops/diagonal_backward.h>
|
620 |
+
#include <ATen/ops/diagonal_copy.h>
|
621 |
+
#include <ATen/ops/diagonal_scatter.h>
|
622 |
+
#include <ATen/ops/diff.h>
|
623 |
+
#include <ATen/ops/digamma.h>
|
624 |
+
#include <ATen/ops/dist.h>
|
625 |
+
#include <ATen/ops/div.h>
|
626 |
+
#include <ATen/ops/divide.h>
|
627 |
+
#include <ATen/ops/dot.h>
|
628 |
+
#include <ATen/ops/dropout.h>
|
629 |
+
#include <ATen/ops/dsplit.h>
|
630 |
+
#include <ATen/ops/dstack.h>
|
631 |
+
#include <ATen/ops/einsum.h>
|
632 |
+
#include <ATen/ops/elu.h>
|
633 |
+
#include <ATen/ops/elu_backward.h>
|
634 |
+
#include <ATen/ops/embedding.h>
|
635 |
+
#include <ATen/ops/embedding_backward.h>
|
636 |
+
#include <ATen/ops/embedding_bag.h>
|
637 |
+
#include <ATen/ops/embedding_dense_backward.h>
|
638 |
+
#include <ATen/ops/embedding_renorm.h>
|
639 |
+
#include <ATen/ops/embedding_sparse_backward.h>
|
640 |
+
#include <ATen/ops/empty.h>
|
641 |
+
#include <ATen/ops/empty_like.h>
|
642 |
+
#include <ATen/ops/empty_permuted.h>
|
643 |
+
#include <ATen/ops/empty_quantized.h>
|
644 |
+
#include <ATen/ops/empty_strided.h>
|
645 |
+
#include <ATen/ops/eq.h>
|
646 |
+
#include <ATen/ops/equal.h>
|
647 |
+
#include <ATen/ops/erf.h>
|
648 |
+
#include <ATen/ops/erfc.h>
|
649 |
+
#include <ATen/ops/erfinv.h>
|
650 |
+
#include <ATen/ops/exp.h>
|
651 |
+
#include <ATen/ops/exp2.h>
|
652 |
+
#include <ATen/ops/expand.h>
|
653 |
+
#include <ATen/ops/expand_as.h>
|
654 |
+
#include <ATen/ops/expand_copy.h>
|
655 |
+
#include <ATen/ops/expm1.h>
|
656 |
+
#include <ATen/ops/exponential.h>
|
657 |
+
#include <ATen/ops/eye.h>
|
658 |
+
#include <ATen/ops/fake_quantize_per_channel_affine.h>
|
659 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
|
660 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h>
|
661 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine.h>
|
662 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
|
663 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h>
|
664 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight.h>
|
665 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h>
|
666 |
+
#include <ATen/ops/fbgemm_linear_int8_weight.h>
|
667 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h>
|
668 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight.h>
|
669 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16.h>
|
670 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix.h>
|
671 |
+
#include <ATen/ops/feature_alpha_dropout.h>
|
672 |
+
#include <ATen/ops/feature_dropout.h>
|
673 |
+
#include <ATen/ops/fft_fft.h>
|
674 |
+
#include <ATen/ops/fft_fft2.h>
|
675 |
+
#include <ATen/ops/fft_fftfreq.h>
|
676 |
+
#include <ATen/ops/fft_fftn.h>
|
677 |
+
#include <ATen/ops/fft_fftshift.h>
|
678 |
+
#include <ATen/ops/fft_hfft.h>
|
679 |
+
#include <ATen/ops/fft_hfft2.h>
|
680 |
+
#include <ATen/ops/fft_hfftn.h>
|
681 |
+
#include <ATen/ops/fft_ifft.h>
|
682 |
+
#include <ATen/ops/fft_ifft2.h>
|
683 |
+
#include <ATen/ops/fft_ifftn.h>
|
684 |
+
#include <ATen/ops/fft_ifftshift.h>
|
685 |
+
#include <ATen/ops/fft_ihfft.h>
|
686 |
+
#include <ATen/ops/fft_ihfft2.h>
|
687 |
+
#include <ATen/ops/fft_ihfftn.h>
|
688 |
+
#include <ATen/ops/fft_irfft.h>
|
689 |
+
#include <ATen/ops/fft_irfft2.h>
|
690 |
+
#include <ATen/ops/fft_irfftn.h>
|
691 |
+
#include <ATen/ops/fft_rfft.h>
|
692 |
+
#include <ATen/ops/fft_rfft2.h>
|
693 |
+
#include <ATen/ops/fft_rfftfreq.h>
|
694 |
+
#include <ATen/ops/fft_rfftn.h>
|
695 |
+
#include <ATen/ops/fill.h>
|
696 |
+
#include <ATen/ops/fill_diagonal.h>
|
697 |
+
#include <ATen/ops/fix.h>
|
698 |
+
#include <ATen/ops/flatten.h>
|
699 |
+
#include <ATen/ops/flatten_dense_tensors.h>
|
700 |
+
#include <ATen/ops/flip.h>
|
701 |
+
#include <ATen/ops/fliplr.h>
|
702 |
+
#include <ATen/ops/flipud.h>
|
703 |
+
#include <ATen/ops/float_power.h>
|
704 |
+
#include <ATen/ops/floor.h>
|
705 |
+
#include <ATen/ops/floor_divide.h>
|
706 |
+
#include <ATen/ops/fmax.h>
|
707 |
+
#include <ATen/ops/fmin.h>
|
708 |
+
#include <ATen/ops/fmod.h>
|
709 |
+
#include <ATen/ops/frac.h>
|
710 |
+
#include <ATen/ops/fractional_max_pool2d.h>
|
711 |
+
#include <ATen/ops/fractional_max_pool2d_backward.h>
|
712 |
+
#include <ATen/ops/fractional_max_pool3d.h>
|
713 |
+
#include <ATen/ops/fractional_max_pool3d_backward.h>
|
714 |
+
#include <ATen/ops/frexp.h>
|
715 |
+
#include <ATen/ops/frobenius_norm.h>
|
716 |
+
#include <ATen/ops/from_file.h>
|
717 |
+
#include <ATen/ops/full.h>
|
718 |
+
#include <ATen/ops/full_like.h>
|
719 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant.h>
|
720 |
+
#include <ATen/ops/gather.h>
|
721 |
+
#include <ATen/ops/gather_backward.h>
|
722 |
+
#include <ATen/ops/gcd.h>
|
723 |
+
#include <ATen/ops/ge.h>
|
724 |
+
#include <ATen/ops/gelu.h>
|
725 |
+
#include <ATen/ops/gelu_backward.h>
|
726 |
+
#include <ATen/ops/geometric.h>
|
727 |
+
#include <ATen/ops/geqrf.h>
|
728 |
+
#include <ATen/ops/ger.h>
|
729 |
+
#include <ATen/ops/glu.h>
|
730 |
+
#include <ATen/ops/glu_backward.h>
|
731 |
+
#include <ATen/ops/glu_backward_jvp.h>
|
732 |
+
#include <ATen/ops/glu_jvp.h>
|
733 |
+
#include <ATen/ops/gradient.h>
|
734 |
+
#include <ATen/ops/greater.h>
|
735 |
+
#include <ATen/ops/greater_equal.h>
|
736 |
+
#include <ATen/ops/grid_sampler.h>
|
737 |
+
#include <ATen/ops/grid_sampler_2d.h>
|
738 |
+
#include <ATen/ops/grid_sampler_2d_backward.h>
|
739 |
+
#include <ATen/ops/grid_sampler_3d.h>
|
740 |
+
#include <ATen/ops/grid_sampler_3d_backward.h>
|
741 |
+
#include <ATen/ops/group_norm.h>
|
742 |
+
#include <ATen/ops/gru.h>
|
743 |
+
#include <ATen/ops/gru_cell.h>
|
744 |
+
#include <ATen/ops/gt.h>
|
745 |
+
#include <ATen/ops/hamming_window.h>
|
746 |
+
#include <ATen/ops/hann_window.h>
|
747 |
+
#include <ATen/ops/hardshrink.h>
|
748 |
+
#include <ATen/ops/hardshrink_backward.h>
|
749 |
+
#include <ATen/ops/hardsigmoid.h>
|
750 |
+
#include <ATen/ops/hardsigmoid_backward.h>
|
751 |
+
#include <ATen/ops/hardswish.h>
|
752 |
+
#include <ATen/ops/hardswish_backward.h>
|
753 |
+
#include <ATen/ops/hardtanh.h>
|
754 |
+
#include <ATen/ops/hardtanh_backward.h>
|
755 |
+
#include <ATen/ops/heaviside.h>
|
756 |
+
#include <ATen/ops/hinge_embedding_loss.h>
|
757 |
+
#include <ATen/ops/histc.h>
|
758 |
+
#include <ATen/ops/histogram.h>
|
759 |
+
#include <ATen/ops/histogramdd.h>
|
760 |
+
#include <ATen/ops/hsplit.h>
|
761 |
+
#include <ATen/ops/hspmm.h>
|
762 |
+
#include <ATen/ops/hstack.h>
|
763 |
+
#include <ATen/ops/huber_loss.h>
|
764 |
+
#include <ATen/ops/huber_loss_backward.h>
|
765 |
+
#include <ATen/ops/hypot.h>
|
766 |
+
#include <ATen/ops/i0.h>
|
767 |
+
#include <ATen/ops/igamma.h>
|
768 |
+
#include <ATen/ops/igammac.h>
|
769 |
+
#include <ATen/ops/im2col.h>
|
770 |
+
#include <ATen/ops/imag.h>
|
771 |
+
#include <ATen/ops/index.h>
|
772 |
+
#include <ATen/ops/index_add.h>
|
773 |
+
#include <ATen/ops/index_copy.h>
|
774 |
+
#include <ATen/ops/index_fill.h>
|
775 |
+
#include <ATen/ops/index_put.h>
|
776 |
+
#include <ATen/ops/index_reduce.h>
|
777 |
+
#include <ATen/ops/index_select.h>
|
778 |
+
#include <ATen/ops/index_select_backward.h>
|
779 |
+
#include <ATen/ops/indices.h>
|
780 |
+
#include <ATen/ops/indices_copy.h>
|
781 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward.h>
|
782 |
+
#include <ATen/ops/inner.h>
|
783 |
+
#include <ATen/ops/instance_norm.h>
|
784 |
+
#include <ATen/ops/int_repr.h>
|
785 |
+
#include <ATen/ops/inverse.h>
|
786 |
+
#include <ATen/ops/is_coalesced.h>
|
787 |
+
#include <ATen/ops/is_complex.h>
|
788 |
+
#include <ATen/ops/is_conj.h>
|
789 |
+
#include <ATen/ops/is_distributed.h>
|
790 |
+
#include <ATen/ops/is_floating_point.h>
|
791 |
+
#include <ATen/ops/is_inference.h>
|
792 |
+
#include <ATen/ops/is_leaf.h>
|
793 |
+
#include <ATen/ops/is_neg.h>
|
794 |
+
#include <ATen/ops/is_nonzero.h>
|
795 |
+
#include <ATen/ops/is_pinned.h>
|
796 |
+
#include <ATen/ops/is_same_size.h>
|
797 |
+
#include <ATen/ops/is_set_to.h>
|
798 |
+
#include <ATen/ops/is_signed.h>
|
799 |
+
#include <ATen/ops/is_vulkan_available.h>
|
800 |
+
#include <ATen/ops/isclose.h>
|
801 |
+
#include <ATen/ops/isfinite.h>
|
802 |
+
#include <ATen/ops/isin.h>
|
803 |
+
#include <ATen/ops/isinf.h>
|
804 |
+
#include <ATen/ops/isnan.h>
|
805 |
+
#include <ATen/ops/isneginf.h>
|
806 |
+
#include <ATen/ops/isposinf.h>
|
807 |
+
#include <ATen/ops/isreal.h>
|
808 |
+
#include <ATen/ops/istft.h>
|
809 |
+
#include <ATen/ops/item.h>
|
810 |
+
#include <ATen/ops/kaiser_window.h>
|
811 |
+
#include <ATen/ops/kl_div.h>
|
812 |
+
#include <ATen/ops/kron.h>
|
813 |
+
#include <ATen/ops/kthvalue.h>
|
814 |
+
#include <ATen/ops/l1_loss.h>
|
815 |
+
#include <ATen/ops/layer_norm.h>
|
816 |
+
#include <ATen/ops/lcm.h>
|
817 |
+
#include <ATen/ops/ldexp.h>
|
818 |
+
#include <ATen/ops/le.h>
|
819 |
+
#include <ATen/ops/leaky_relu.h>
|
820 |
+
#include <ATen/ops/leaky_relu_backward.h>
|
821 |
+
#include <ATen/ops/lerp.h>
|
822 |
+
#include <ATen/ops/less.h>
|
823 |
+
#include <ATen/ops/less_equal.h>
|
824 |
+
#include <ATen/ops/lgamma.h>
|
825 |
+
#include <ATen/ops/lift.h>
|
826 |
+
#include <ATen/ops/lift_fresh.h>
|
827 |
+
#include <ATen/ops/lift_fresh_copy.h>
|
828 |
+
#include <ATen/ops/linalg_cholesky.h>
|
829 |
+
#include <ATen/ops/linalg_cholesky_ex.h>
|
830 |
+
#include <ATen/ops/linalg_cond.h>
|
831 |
+
#include <ATen/ops/linalg_cross.h>
|
832 |
+
#include <ATen/ops/linalg_det.h>
|
833 |
+
#include <ATen/ops/linalg_diagonal.h>
|
834 |
+
#include <ATen/ops/linalg_eig.h>
|
835 |
+
#include <ATen/ops/linalg_eigh.h>
|
836 |
+
#include <ATen/ops/linalg_eigvals.h>
|
837 |
+
#include <ATen/ops/linalg_eigvalsh.h>
|
838 |
+
#include <ATen/ops/linalg_householder_product.h>
|
839 |
+
#include <ATen/ops/linalg_inv.h>
|
840 |
+
#include <ATen/ops/linalg_inv_ex.h>
|
841 |
+
#include <ATen/ops/linalg_ldl_factor.h>
|
842 |
+
#include <ATen/ops/linalg_ldl_factor_ex.h>
|
843 |
+
#include <ATen/ops/linalg_ldl_solve.h>
|
844 |
+
#include <ATen/ops/linalg_lstsq.h>
|
845 |
+
#include <ATen/ops/linalg_lu.h>
|
846 |
+
#include <ATen/ops/linalg_lu_factor.h>
|
847 |
+
#include <ATen/ops/linalg_lu_factor_ex.h>
|
848 |
+
#include <ATen/ops/linalg_lu_solve.h>
|
849 |
+
#include <ATen/ops/linalg_matmul.h>
|
850 |
+
#include <ATen/ops/linalg_matrix_exp.h>
|
851 |
+
#include <ATen/ops/linalg_matrix_norm.h>
|
852 |
+
#include <ATen/ops/linalg_matrix_power.h>
|
853 |
+
#include <ATen/ops/linalg_matrix_rank.h>
|
854 |
+
#include <ATen/ops/linalg_multi_dot.h>
|
855 |
+
#include <ATen/ops/linalg_norm.h>
|
856 |
+
#include <ATen/ops/linalg_pinv.h>
|
857 |
+
#include <ATen/ops/linalg_qr.h>
|
858 |
+
#include <ATen/ops/linalg_slogdet.h>
|
859 |
+
#include <ATen/ops/linalg_solve.h>
|
860 |
+
#include <ATen/ops/linalg_solve_ex.h>
|
861 |
+
#include <ATen/ops/linalg_solve_triangular.h>
|
862 |
+
#include <ATen/ops/linalg_svd.h>
|
863 |
+
#include <ATen/ops/linalg_svdvals.h>
|
864 |
+
#include <ATen/ops/linalg_tensorinv.h>
|
865 |
+
#include <ATen/ops/linalg_tensorsolve.h>
|
866 |
+
#include <ATen/ops/linalg_vander.h>
|
867 |
+
#include <ATen/ops/linalg_vecdot.h>
|
868 |
+
#include <ATen/ops/linalg_vector_norm.h>
|
869 |
+
#include <ATen/ops/linear.h>
|
870 |
+
#include <ATen/ops/linear_backward.h>
|
871 |
+
#include <ATen/ops/linspace.h>
|
872 |
+
#include <ATen/ops/log.h>
|
873 |
+
#include <ATen/ops/log10.h>
|
874 |
+
#include <ATen/ops/log1p.h>
|
875 |
+
#include <ATen/ops/log2.h>
|
876 |
+
#include <ATen/ops/log_normal.h>
|
877 |
+
#include <ATen/ops/log_sigmoid.h>
|
878 |
+
#include <ATen/ops/log_sigmoid_backward.h>
|
879 |
+
#include <ATen/ops/log_sigmoid_forward.h>
|
880 |
+
#include <ATen/ops/log_softmax.h>
|
881 |
+
#include <ATen/ops/logaddexp.h>
|
882 |
+
#include <ATen/ops/logaddexp2.h>
|
883 |
+
#include <ATen/ops/logcumsumexp.h>
|
884 |
+
#include <ATen/ops/logdet.h>
|
885 |
+
#include <ATen/ops/logical_and.h>
|
886 |
+
#include <ATen/ops/logical_not.h>
|
887 |
+
#include <ATen/ops/logical_or.h>
|
888 |
+
#include <ATen/ops/logical_xor.h>
|
889 |
+
#include <ATen/ops/logit.h>
|
890 |
+
#include <ATen/ops/logit_backward.h>
|
891 |
+
#include <ATen/ops/logspace.h>
|
892 |
+
#include <ATen/ops/logsumexp.h>
|
893 |
+
#include <ATen/ops/lshift.h>
|
894 |
+
#include <ATen/ops/lstm.h>
|
895 |
+
#include <ATen/ops/lstm_cell.h>
|
896 |
+
#include <ATen/ops/lstm_mps_backward.h>
|
897 |
+
#include <ATen/ops/lt.h>
|
898 |
+
#include <ATen/ops/lu_solve.h>
|
899 |
+
#include <ATen/ops/lu_unpack.h>
|
900 |
+
#include <ATen/ops/mH.h>
|
901 |
+
#include <ATen/ops/mT.h>
|
902 |
+
#include <ATen/ops/margin_ranking_loss.h>
|
903 |
+
#include <ATen/ops/masked_fill.h>
|
904 |
+
#include <ATen/ops/masked_scatter.h>
|
905 |
+
#include <ATen/ops/masked_scatter_backward.h>
|
906 |
+
#include <ATen/ops/masked_select.h>
|
907 |
+
#include <ATen/ops/masked_select_backward.h>
|
908 |
+
#include <ATen/ops/matmul.h>
|
909 |
+
#include <ATen/ops/matmul_backward.h>
|
910 |
+
#include <ATen/ops/matrix_H.h>
|
911 |
+
#include <ATen/ops/matrix_exp.h>
|
912 |
+
#include <ATen/ops/matrix_exp_backward.h>
|
913 |
+
#include <ATen/ops/matrix_power.h>
|
914 |
+
#include <ATen/ops/max.h>
|
915 |
+
#include <ATen/ops/max_pool1d.h>
|
916 |
+
#include <ATen/ops/max_pool1d_with_indices.h>
|
917 |
+
#include <ATen/ops/max_pool2d.h>
|
918 |
+
#include <ATen/ops/max_pool2d_backward.h>
|
919 |
+
#include <ATen/ops/max_pool2d_with_indices.h>
|
920 |
+
#include <ATen/ops/max_pool2d_with_indices_backward.h>
|
921 |
+
#include <ATen/ops/max_pool3d.h>
|
922 |
+
#include <ATen/ops/max_pool3d_with_indices.h>
|
923 |
+
#include <ATen/ops/max_pool3d_with_indices_backward.h>
|
924 |
+
#include <ATen/ops/max_unpool2d.h>
|
925 |
+
#include <ATen/ops/max_unpool3d.h>
|
926 |
+
#include <ATen/ops/maximum.h>
|
927 |
+
#include <ATen/ops/mean.h>
|
928 |
+
#include <ATen/ops/median.h>
|
929 |
+
#include <ATen/ops/meshgrid.h>
|
930 |
+
#include <ATen/ops/min.h>
|
931 |
+
#include <ATen/ops/minimum.h>
|
932 |
+
#include <ATen/ops/miopen_batch_norm.h>
|
933 |
+
#include <ATen/ops/miopen_batch_norm_backward.h>
|
934 |
+
#include <ATen/ops/miopen_convolution.h>
|
935 |
+
#include <ATen/ops/miopen_convolution_add_relu.h>
|
936 |
+
#include <ATen/ops/miopen_convolution_relu.h>
|
937 |
+
#include <ATen/ops/miopen_convolution_transpose.h>
|
938 |
+
#include <ATen/ops/miopen_depthwise_convolution.h>
|
939 |
+
#include <ATen/ops/miopen_rnn.h>
|
940 |
+
#include <ATen/ops/miopen_rnn_backward.h>
|
941 |
+
#include <ATen/ops/mish.h>
|
942 |
+
#include <ATen/ops/mish_backward.h>
|
943 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
|
944 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
|
945 |
+
#include <ATen/ops/mkldnn_convolution.h>
|
946 |
+
#include <ATen/ops/mkldnn_linear.h>
|
947 |
+
#include <ATen/ops/mkldnn_linear_backward.h>
|
948 |
+
#include <ATen/ops/mkldnn_linear_backward_input.h>
|
949 |
+
#include <ATen/ops/mkldnn_linear_backward_weights.h>
|
950 |
+
#include <ATen/ops/mkldnn_max_pool2d.h>
|
951 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward.h>
|
952 |
+
#include <ATen/ops/mkldnn_max_pool3d.h>
|
953 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward.h>
|
954 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
|
955 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
|
956 |
+
#include <ATen/ops/mkldnn_rnn_layer.h>
|
957 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward.h>
|
958 |
+
#include <ATen/ops/mm.h>
|
959 |
+
#include <ATen/ops/mode.h>
|
960 |
+
#include <ATen/ops/moveaxis.h>
|
961 |
+
#include <ATen/ops/movedim.h>
|
962 |
+
#include <ATen/ops/mps_convolution_backward.h>
|
963 |
+
#include <ATen/ops/mps_convolution_transpose_backward.h>
|
964 |
+
#include <ATen/ops/mse_loss.h>
|
965 |
+
#include <ATen/ops/mse_loss_backward.h>
|
966 |
+
#include <ATen/ops/msort.h>
|
967 |
+
#include <ATen/ops/mul.h>
|
968 |
+
#include <ATen/ops/multi_margin_loss.h>
|
969 |
+
#include <ATen/ops/multi_margin_loss_backward.h>
|
970 |
+
#include <ATen/ops/multilabel_margin_loss.h>
|
971 |
+
#include <ATen/ops/multilabel_margin_loss_backward.h>
|
972 |
+
#include <ATen/ops/multilabel_margin_loss_forward.h>
|
973 |
+
#include <ATen/ops/multinomial.h>
|
974 |
+
#include <ATen/ops/multiply.h>
|
975 |
+
#include <ATen/ops/mv.h>
|
976 |
+
#include <ATen/ops/mvlgamma.h>
|
977 |
+
#include <ATen/ops/nan_to_num.h>
|
978 |
+
#include <ATen/ops/nanmean.h>
|
979 |
+
#include <ATen/ops/nanmedian.h>
|
980 |
+
#include <ATen/ops/nanquantile.h>
|
981 |
+
#include <ATen/ops/nansum.h>
|
982 |
+
#include <ATen/ops/narrow.h>
|
983 |
+
#include <ATen/ops/narrow_copy.h>
|
984 |
+
#include <ATen/ops/native_batch_norm.h>
|
985 |
+
#include <ATen/ops/native_batch_norm_backward.h>
|
986 |
+
#include <ATen/ops/native_channel_shuffle.h>
|
987 |
+
#include <ATen/ops/native_dropout.h>
|
988 |
+
#include <ATen/ops/native_dropout_backward.h>
|
989 |
+
#include <ATen/ops/native_group_norm.h>
|
990 |
+
#include <ATen/ops/native_group_norm_backward.h>
|
991 |
+
#include <ATen/ops/native_layer_norm.h>
|
992 |
+
#include <ATen/ops/native_layer_norm_backward.h>
|
993 |
+
#include <ATen/ops/native_norm.h>
|
994 |
+
#include <ATen/ops/ne.h>
|
995 |
+
#include <ATen/ops/neg.h>
|
996 |
+
#include <ATen/ops/negative.h>
|
997 |
+
#include <ATen/ops/nested_to_padded_tensor.h>
|
998 |
+
#include <ATen/ops/new_empty.h>
|
999 |
+
#include <ATen/ops/new_empty_strided.h>
|
1000 |
+
#include <ATen/ops/new_full.h>
|
1001 |
+
#include <ATen/ops/new_ones.h>
|
1002 |
+
#include <ATen/ops/new_zeros.h>
|
1003 |
+
#include <ATen/ops/nextafter.h>
|
1004 |
+
#include <ATen/ops/nll_loss.h>
|
1005 |
+
#include <ATen/ops/nll_loss2d.h>
|
1006 |
+
#include <ATen/ops/nll_loss2d_backward.h>
|
1007 |
+
#include <ATen/ops/nll_loss2d_forward.h>
|
1008 |
+
#include <ATen/ops/nll_loss_backward.h>
|
1009 |
+
#include <ATen/ops/nll_loss_forward.h>
|
1010 |
+
#include <ATen/ops/nll_loss_nd.h>
|
1011 |
+
#include <ATen/ops/nonzero.h>
|
1012 |
+
#include <ATen/ops/nonzero_numpy.h>
|
1013 |
+
#include <ATen/ops/nonzero_static.h>
|
1014 |
+
#include <ATen/ops/norm.h>
|
1015 |
+
#include <ATen/ops/norm_except_dim.h>
|
1016 |
+
#include <ATen/ops/normal.h>
|
1017 |
+
#include <ATen/ops/not_equal.h>
|
1018 |
+
#include <ATen/ops/nuclear_norm.h>
|
1019 |
+
#include <ATen/ops/numpy_T.h>
|
1020 |
+
#include <ATen/ops/one_hot.h>
|
1021 |
+
#include <ATen/ops/ones.h>
|
1022 |
+
#include <ATen/ops/ones_like.h>
|
1023 |
+
#include <ATen/ops/or.h>
|
1024 |
+
#include <ATen/ops/orgqr.h>
|
1025 |
+
#include <ATen/ops/ormqr.h>
|
1026 |
+
#include <ATen/ops/outer.h>
|
1027 |
+
#include <ATen/ops/output_nr.h>
|
1028 |
+
#include <ATen/ops/pad.h>
|
1029 |
+
#include <ATen/ops/pad_sequence.h>
|
1030 |
+
#include <ATen/ops/pairwise_distance.h>
|
1031 |
+
#include <ATen/ops/pdist.h>
|
1032 |
+
#include <ATen/ops/permute.h>
|
1033 |
+
#include <ATen/ops/permute_copy.h>
|
1034 |
+
#include <ATen/ops/pin_memory.h>
|
1035 |
+
#include <ATen/ops/pinverse.h>
|
1036 |
+
#include <ATen/ops/pixel_shuffle.h>
|
1037 |
+
#include <ATen/ops/pixel_unshuffle.h>
|
1038 |
+
#include <ATen/ops/poisson.h>
|
1039 |
+
#include <ATen/ops/poisson_nll_loss.h>
|
1040 |
+
#include <ATen/ops/polar.h>
|
1041 |
+
#include <ATen/ops/polygamma.h>
|
1042 |
+
#include <ATen/ops/positive.h>
|
1043 |
+
#include <ATen/ops/pow.h>
|
1044 |
+
#include <ATen/ops/prelu.h>
|
1045 |
+
#include <ATen/ops/prod.h>
|
1046 |
+
#include <ATen/ops/promote_types.h>
|
1047 |
+
#include <ATen/ops/put.h>
|
1048 |
+
#include <ATen/ops/q_per_channel_axis.h>
|
1049 |
+
#include <ATen/ops/q_per_channel_scales.h>
|
1050 |
+
#include <ATen/ops/q_per_channel_zero_points.h>
|
1051 |
+
#include <ATen/ops/q_scale.h>
|
1052 |
+
#include <ATen/ops/q_zero_point.h>
|
1053 |
+
#include <ATen/ops/qr.h>
|
1054 |
+
#include <ATen/ops/qscheme.h>
|
1055 |
+
#include <ATen/ops/quantile.h>
|
1056 |
+
#include <ATen/ops/quantize_per_channel.h>
|
1057 |
+
#include <ATen/ops/quantize_per_tensor.h>
|
1058 |
+
#include <ATen/ops/quantize_per_tensor_dynamic.h>
|
1059 |
+
#include <ATen/ops/quantized_batch_norm.h>
|
1060 |
+
#include <ATen/ops/quantized_gru_cell.h>
|
1061 |
+
#include <ATen/ops/quantized_lstm_cell.h>
|
1062 |
+
#include <ATen/ops/quantized_max_pool1d.h>
|
1063 |
+
#include <ATen/ops/quantized_max_pool2d.h>
|
1064 |
+
#include <ATen/ops/quantized_max_pool3d.h>
|
1065 |
+
#include <ATen/ops/quantized_rnn_relu_cell.h>
|
1066 |
+
#include <ATen/ops/quantized_rnn_tanh_cell.h>
|
1067 |
+
#include <ATen/ops/rad2deg.h>
|
1068 |
+
#include <ATen/ops/rand.h>
|
1069 |
+
#include <ATen/ops/rand_like.h>
|
1070 |
+
#include <ATen/ops/randint.h>
|
1071 |
+
#include <ATen/ops/randint_like.h>
|
1072 |
+
#include <ATen/ops/randn.h>
|
1073 |
+
#include <ATen/ops/randn_like.h>
|
1074 |
+
#include <ATen/ops/random.h>
|
1075 |
+
#include <ATen/ops/randperm.h>
|
1076 |
+
#include <ATen/ops/range.h>
|
1077 |
+
#include <ATen/ops/ravel.h>
|
1078 |
+
#include <ATen/ops/real.h>
|
1079 |
+
#include <ATen/ops/reciprocal.h>
|
1080 |
+
#include <ATen/ops/record_stream.h>
|
1081 |
+
#include <ATen/ops/refine_names.h>
|
1082 |
+
#include <ATen/ops/reflection_pad1d.h>
|
1083 |
+
#include <ATen/ops/reflection_pad1d_backward.h>
|
1084 |
+
#include <ATen/ops/reflection_pad2d.h>
|
1085 |
+
#include <ATen/ops/reflection_pad2d_backward.h>
|
1086 |
+
#include <ATen/ops/reflection_pad3d.h>
|
1087 |
+
#include <ATen/ops/reflection_pad3d_backward.h>
|
1088 |
+
#include <ATen/ops/relu.h>
|
1089 |
+
#include <ATen/ops/relu6.h>
|
1090 |
+
#include <ATen/ops/remainder.h>
|
1091 |
+
#include <ATen/ops/rename.h>
|
1092 |
+
#include <ATen/ops/renorm.h>
|
1093 |
+
#include <ATen/ops/repeat.h>
|
1094 |
+
#include <ATen/ops/repeat_interleave.h>
|
1095 |
+
#include <ATen/ops/replication_pad1d.h>
|
1096 |
+
#include <ATen/ops/replication_pad1d_backward.h>
|
1097 |
+
#include <ATen/ops/replication_pad2d.h>
|
1098 |
+
#include <ATen/ops/replication_pad2d_backward.h>
|
1099 |
+
#include <ATen/ops/replication_pad3d.h>
|
1100 |
+
#include <ATen/ops/replication_pad3d_backward.h>
|
1101 |
+
#include <ATen/ops/requires_grad.h>
|
1102 |
+
#include <ATen/ops/reshape.h>
|
1103 |
+
#include <ATen/ops/reshape_as.h>
|
1104 |
+
#include <ATen/ops/resize.h>
|
1105 |
+
#include <ATen/ops/resize_as.h>
|
1106 |
+
#include <ATen/ops/resize_as_sparse.h>
|
1107 |
+
#include <ATen/ops/resolve_conj.h>
|
1108 |
+
#include <ATen/ops/resolve_neg.h>
|
1109 |
+
#include <ATen/ops/result_type.h>
|
1110 |
+
#include <ATen/ops/retain_grad.h>
|
1111 |
+
#include <ATen/ops/retains_grad.h>
|
1112 |
+
#include <ATen/ops/rnn_relu.h>
|
1113 |
+
#include <ATen/ops/rnn_relu_cell.h>
|
1114 |
+
#include <ATen/ops/rnn_tanh.h>
|
1115 |
+
#include <ATen/ops/rnn_tanh_cell.h>
|
1116 |
+
#include <ATen/ops/roll.h>
|
1117 |
+
#include <ATen/ops/rot90.h>
|
1118 |
+
#include <ATen/ops/round.h>
|
1119 |
+
#include <ATen/ops/row_indices.h>
|
1120 |
+
#include <ATen/ops/row_indices_copy.h>
|
1121 |
+
#include <ATen/ops/row_stack.h>
|
1122 |
+
#include <ATen/ops/rrelu.h>
|
1123 |
+
#include <ATen/ops/rrelu_with_noise.h>
|
1124 |
+
#include <ATen/ops/rrelu_with_noise_backward.h>
|
1125 |
+
#include <ATen/ops/rshift.h>
|
1126 |
+
#include <ATen/ops/rsqrt.h>
|
1127 |
+
#include <ATen/ops/rsub.h>
|
1128 |
+
#include <ATen/ops/scalar_tensor.h>
|
1129 |
+
#include <ATen/ops/scaled_dot_product_attention.h>
|
1130 |
+
#include <ATen/ops/scatter.h>
|
1131 |
+
#include <ATen/ops/scatter_add.h>
|
1132 |
+
#include <ATen/ops/scatter_reduce.h>
|
1133 |
+
#include <ATen/ops/searchsorted.h>
|
1134 |
+
#include <ATen/ops/segment_reduce.h>
|
1135 |
+
#include <ATen/ops/select.h>
|
1136 |
+
#include <ATen/ops/select_backward.h>
|
1137 |
+
#include <ATen/ops/select_copy.h>
|
1138 |
+
#include <ATen/ops/select_scatter.h>
|
1139 |
+
#include <ATen/ops/selu.h>
|
1140 |
+
#include <ATen/ops/set.h>
|
1141 |
+
#include <ATen/ops/set_data.h>
|
1142 |
+
#include <ATen/ops/sgn.h>
|
1143 |
+
#include <ATen/ops/sigmoid.h>
|
1144 |
+
#include <ATen/ops/sigmoid_backward.h>
|
1145 |
+
#include <ATen/ops/sign.h>
|
1146 |
+
#include <ATen/ops/signbit.h>
|
1147 |
+
#include <ATen/ops/silu.h>
|
1148 |
+
#include <ATen/ops/silu_backward.h>
|
1149 |
+
#include <ATen/ops/sin.h>
|
1150 |
+
#include <ATen/ops/sinc.h>
|
1151 |
+
#include <ATen/ops/sinh.h>
|
1152 |
+
#include <ATen/ops/size.h>
|
1153 |
+
#include <ATen/ops/slice.h>
|
1154 |
+
#include <ATen/ops/slice_backward.h>
|
1155 |
+
#include <ATen/ops/slice_copy.h>
|
1156 |
+
#include <ATen/ops/slice_inverse.h>
|
1157 |
+
#include <ATen/ops/slice_scatter.h>
|
1158 |
+
#include <ATen/ops/slogdet.h>
|
1159 |
+
#include <ATen/ops/slow_conv3d.h>
|
1160 |
+
#include <ATen/ops/slow_conv3d_forward.h>
|
1161 |
+
#include <ATen/ops/slow_conv_dilated2d.h>
|
1162 |
+
#include <ATen/ops/slow_conv_dilated3d.h>
|
1163 |
+
#include <ATen/ops/slow_conv_transpose2d.h>
|
1164 |
+
#include <ATen/ops/slow_conv_transpose3d.h>
|
1165 |
+
#include <ATen/ops/smm.h>
|
1166 |
+
#include <ATen/ops/smooth_l1_loss.h>
|
1167 |
+
#include <ATen/ops/smooth_l1_loss_backward.h>
|
1168 |
+
#include <ATen/ops/soft_margin_loss.h>
|
1169 |
+
#include <ATen/ops/soft_margin_loss_backward.h>
|
1170 |
+
#include <ATen/ops/softmax.h>
|
1171 |
+
#include <ATen/ops/softplus.h>
|
1172 |
+
#include <ATen/ops/softplus_backward.h>
|
1173 |
+
#include <ATen/ops/softshrink.h>
|
1174 |
+
#include <ATen/ops/softshrink_backward.h>
|
1175 |
+
#include <ATen/ops/sort.h>
|
1176 |
+
#include <ATen/ops/sparse_bsc_tensor.h>
|
1177 |
+
#include <ATen/ops/sparse_bsr_tensor.h>
|
1178 |
+
#include <ATen/ops/sparse_compressed_tensor.h>
|
1179 |
+
#include <ATen/ops/sparse_coo_tensor.h>
|
1180 |
+
#include <ATen/ops/sparse_csc_tensor.h>
|
1181 |
+
#include <ATen/ops/sparse_csr_tensor.h>
|
1182 |
+
#include <ATen/ops/sparse_dim.h>
|
1183 |
+
#include <ATen/ops/sparse_mask.h>
|
1184 |
+
#include <ATen/ops/sparse_resize.h>
|
1185 |
+
#include <ATen/ops/sparse_resize_and_clear.h>
|
1186 |
+
#include <ATen/ops/sparse_sampled_addmm.h>
|
1187 |
+
#include <ATen/ops/special_airy_ai.h>
|
1188 |
+
#include <ATen/ops/special_bessel_j0.h>
|
1189 |
+
#include <ATen/ops/special_bessel_j1.h>
|
1190 |
+
#include <ATen/ops/special_bessel_y0.h>
|
1191 |
+
#include <ATen/ops/special_bessel_y1.h>
|
1192 |
+
#include <ATen/ops/special_chebyshev_polynomial_t.h>
|
1193 |
+
#include <ATen/ops/special_chebyshev_polynomial_u.h>
|
1194 |
+
#include <ATen/ops/special_chebyshev_polynomial_v.h>
|
1195 |
+
#include <ATen/ops/special_chebyshev_polynomial_w.h>
|
1196 |
+
#include <ATen/ops/special_digamma.h>
|
1197 |
+
#include <ATen/ops/special_entr.h>
|
1198 |
+
#include <ATen/ops/special_erf.h>
|
1199 |
+
#include <ATen/ops/special_erfc.h>
|
1200 |
+
#include <ATen/ops/special_erfcx.h>
|
1201 |
+
#include <ATen/ops/special_erfinv.h>
|
1202 |
+
#include <ATen/ops/special_exp2.h>
|
1203 |
+
#include <ATen/ops/special_expit.h>
|
1204 |
+
#include <ATen/ops/special_expm1.h>
|
1205 |
+
#include <ATen/ops/special_gammainc.h>
|
1206 |
+
#include <ATen/ops/special_gammaincc.h>
|
1207 |
+
#include <ATen/ops/special_gammaln.h>
|
1208 |
+
#include <ATen/ops/special_hermite_polynomial_h.h>
|
1209 |
+
#include <ATen/ops/special_hermite_polynomial_he.h>
|
1210 |
+
#include <ATen/ops/special_i0.h>
|
1211 |
+
#include <ATen/ops/special_i0e.h>
|
1212 |
+
#include <ATen/ops/special_i1.h>
|
1213 |
+
#include <ATen/ops/special_i1e.h>
|
1214 |
+
#include <ATen/ops/special_laguerre_polynomial_l.h>
|
1215 |
+
#include <ATen/ops/special_legendre_polynomial_p.h>
|
1216 |
+
#include <ATen/ops/special_log1p.h>
|
1217 |
+
#include <ATen/ops/special_log_ndtr.h>
|
1218 |
+
#include <ATen/ops/special_log_softmax.h>
|
1219 |
+
#include <ATen/ops/special_logit.h>
|
1220 |
+
#include <ATen/ops/special_logsumexp.h>
|
1221 |
+
#include <ATen/ops/special_modified_bessel_i0.h>
|
1222 |
+
#include <ATen/ops/special_modified_bessel_i1.h>
|
1223 |
+
#include <ATen/ops/special_modified_bessel_k0.h>
|
1224 |
+
#include <ATen/ops/special_modified_bessel_k1.h>
|
1225 |
+
#include <ATen/ops/special_multigammaln.h>
|
1226 |
+
#include <ATen/ops/special_ndtr.h>
|
1227 |
+
#include <ATen/ops/special_ndtri.h>
|
1228 |
+
#include <ATen/ops/special_polygamma.h>
|
1229 |
+
#include <ATen/ops/special_psi.h>
|
1230 |
+
#include <ATen/ops/special_round.h>
|
1231 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0.h>
|
1232 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1.h>
|
1233 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
|
1234 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
|
1235 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
|
1236 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
|
1237 |
+
#include <ATen/ops/special_sinc.h>
|
1238 |
+
#include <ATen/ops/special_softmax.h>
|
1239 |
+
#include <ATen/ops/special_spherical_bessel_j0.h>
|
1240 |
+
#include <ATen/ops/special_xlog1py.h>
|
1241 |
+
#include <ATen/ops/special_xlogy.h>
|
1242 |
+
#include <ATen/ops/special_zeta.h>
|
1243 |
+
#include <ATen/ops/split.h>
|
1244 |
+
#include <ATen/ops/split_copy.h>
|
1245 |
+
#include <ATen/ops/split_with_sizes.h>
|
1246 |
+
#include <ATen/ops/split_with_sizes_copy.h>
|
1247 |
+
#include <ATen/ops/sqrt.h>
|
1248 |
+
#include <ATen/ops/square.h>
|
1249 |
+
#include <ATen/ops/squeeze.h>
|
1250 |
+
#include <ATen/ops/squeeze_copy.h>
|
1251 |
+
#include <ATen/ops/sspaddmm.h>
|
1252 |
+
#include <ATen/ops/stack.h>
|
1253 |
+
#include <ATen/ops/std.h>
|
1254 |
+
#include <ATen/ops/std_mean.h>
|
1255 |
+
#include <ATen/ops/stft.h>
|
1256 |
+
#include <ATen/ops/stride.h>
|
1257 |
+
#include <ATen/ops/sub.h>
|
1258 |
+
#include <ATen/ops/subtract.h>
|
1259 |
+
#include <ATen/ops/sum.h>
|
1260 |
+
#include <ATen/ops/sum_to_size.h>
|
1261 |
+
#include <ATen/ops/svd.h>
|
1262 |
+
#include <ATen/ops/swapaxes.h>
|
1263 |
+
#include <ATen/ops/swapdims.h>
|
1264 |
+
#include <ATen/ops/sym_constrain_range.h>
|
1265 |
+
#include <ATen/ops/sym_constrain_range_for_size.h>
|
1266 |
+
#include <ATen/ops/sym_numel.h>
|
1267 |
+
#include <ATen/ops/sym_size.h>
|
1268 |
+
#include <ATen/ops/sym_storage_offset.h>
|
1269 |
+
#include <ATen/ops/sym_stride.h>
|
1270 |
+
#include <ATen/ops/t.h>
|
1271 |
+
#include <ATen/ops/t_copy.h>
|
1272 |
+
#include <ATen/ops/take.h>
|
1273 |
+
#include <ATen/ops/take_along_dim.h>
|
1274 |
+
#include <ATen/ops/tan.h>
|
1275 |
+
#include <ATen/ops/tanh.h>
|
1276 |
+
#include <ATen/ops/tanh_backward.h>
|
1277 |
+
#include <ATen/ops/tensor_split.h>
|
1278 |
+
#include <ATen/ops/tensordot.h>
|
1279 |
+
#include <ATen/ops/thnn_conv2d.h>
|
1280 |
+
#include <ATen/ops/threshold.h>
|
1281 |
+
#include <ATen/ops/threshold_backward.h>
|
1282 |
+
#include <ATen/ops/tile.h>
|
1283 |
+
#include <ATen/ops/to.h>
|
1284 |
+
#include <ATen/ops/to_dense.h>
|
1285 |
+
#include <ATen/ops/to_dense_backward.h>
|
1286 |
+
#include <ATen/ops/to_mkldnn.h>
|
1287 |
+
#include <ATen/ops/to_mkldnn_backward.h>
|
1288 |
+
#include <ATen/ops/to_padded_tensor.h>
|
1289 |
+
#include <ATen/ops/to_sparse.h>
|
1290 |
+
#include <ATen/ops/to_sparse_bsc.h>
|
1291 |
+
#include <ATen/ops/to_sparse_bsr.h>
|
1292 |
+
#include <ATen/ops/to_sparse_csc.h>
|
1293 |
+
#include <ATen/ops/to_sparse_csr.h>
|
1294 |
+
#include <ATen/ops/topk.h>
|
1295 |
+
#include <ATen/ops/trace.h>
|
1296 |
+
#include <ATen/ops/trace_backward.h>
|
1297 |
+
#include <ATen/ops/transpose.h>
|
1298 |
+
#include <ATen/ops/transpose_copy.h>
|
1299 |
+
#include <ATen/ops/trapezoid.h>
|
1300 |
+
#include <ATen/ops/trapz.h>
|
1301 |
+
#include <ATen/ops/triangular_solve.h>
|
1302 |
+
#include <ATen/ops/tril.h>
|
1303 |
+
#include <ATen/ops/tril_indices.h>
|
1304 |
+
#include <ATen/ops/triplet_margin_loss.h>
|
1305 |
+
#include <ATen/ops/triu.h>
|
1306 |
+
#include <ATen/ops/triu_indices.h>
|
1307 |
+
#include <ATen/ops/true_divide.h>
|
1308 |
+
#include <ATen/ops/trunc.h>
|
1309 |
+
#include <ATen/ops/type_as.h>
|
1310 |
+
#include <ATen/ops/unbind.h>
|
1311 |
+
#include <ATen/ops/unbind_copy.h>
|
1312 |
+
#include <ATen/ops/unflatten.h>
|
1313 |
+
#include <ATen/ops/unflatten_dense_tensors.h>
|
1314 |
+
#include <ATen/ops/unfold.h>
|
1315 |
+
#include <ATen/ops/unfold_backward.h>
|
1316 |
+
#include <ATen/ops/unfold_copy.h>
|
1317 |
+
#include <ATen/ops/uniform.h>
|
1318 |
+
#include <ATen/ops/unique_consecutive.h>
|
1319 |
+
#include <ATen/ops/unique_dim.h>
|
1320 |
+
#include <ATen/ops/unique_dim_consecutive.h>
|
1321 |
+
#include <ATen/ops/unsafe_chunk.h>
|
1322 |
+
#include <ATen/ops/unsafe_split.h>
|
1323 |
+
#include <ATen/ops/unsafe_split_with_sizes.h>
|
1324 |
+
#include <ATen/ops/unsqueeze.h>
|
1325 |
+
#include <ATen/ops/unsqueeze_copy.h>
|
1326 |
+
#include <ATen/ops/upsample_bicubic2d.h>
|
1327 |
+
#include <ATen/ops/upsample_bicubic2d_backward.h>
|
1328 |
+
#include <ATen/ops/upsample_bilinear2d.h>
|
1329 |
+
#include <ATen/ops/upsample_bilinear2d_backward.h>
|
1330 |
+
#include <ATen/ops/upsample_linear1d.h>
|
1331 |
+
#include <ATen/ops/upsample_linear1d_backward.h>
|
1332 |
+
#include <ATen/ops/upsample_nearest1d.h>
|
1333 |
+
#include <ATen/ops/upsample_nearest1d_backward.h>
|
1334 |
+
#include <ATen/ops/upsample_nearest2d.h>
|
1335 |
+
#include <ATen/ops/upsample_nearest2d_backward.h>
|
1336 |
+
#include <ATen/ops/upsample_nearest3d.h>
|
1337 |
+
#include <ATen/ops/upsample_nearest3d_backward.h>
|
1338 |
+
#include <ATen/ops/upsample_trilinear3d.h>
|
1339 |
+
#include <ATen/ops/upsample_trilinear3d_backward.h>
|
1340 |
+
#include <ATen/ops/value_selecting_reduction_backward.h>
|
1341 |
+
#include <ATen/ops/values.h>
|
1342 |
+
#include <ATen/ops/values_copy.h>
|
1343 |
+
#include <ATen/ops/vander.h>
|
1344 |
+
#include <ATen/ops/var.h>
|
1345 |
+
#include <ATen/ops/var_mean.h>
|
1346 |
+
#include <ATen/ops/vdot.h>
|
1347 |
+
#include <ATen/ops/view.h>
|
1348 |
+
#include <ATen/ops/view_as.h>
|
1349 |
+
#include <ATen/ops/view_as_complex.h>
|
1350 |
+
#include <ATen/ops/view_as_complex_copy.h>
|
1351 |
+
#include <ATen/ops/view_as_real.h>
|
1352 |
+
#include <ATen/ops/view_as_real_copy.h>
|
1353 |
+
#include <ATen/ops/view_copy.h>
|
1354 |
+
#include <ATen/ops/vsplit.h>
|
1355 |
+
#include <ATen/ops/vstack.h>
|
1356 |
+
#include <ATen/ops/where.h>
|
1357 |
+
#include <ATen/ops/xlogy.h>
|
1358 |
+
#include <ATen/ops/xor.h>
|
1359 |
+
#include <ATen/ops/zero.h>
|
1360 |
+
#include <ATen/ops/zeros.h>
|
1361 |
+
#include <ATen/ops/zeros_like.h>
|
1362 |
+
|
1363 |
+
namespace at {
|
1364 |
+
|
1365 |
+
|
1366 |
+
|
1367 |
+
// Special C++ only overloads for std()-like functions (See gh-40287)
|
1368 |
+
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
|
1369 |
+
// So, for example std(0) would select the std(unbiased=False) overload
|
1370 |
+
TORCH_API inline Tensor var(const Tensor& self, int dim) {
|
1371 |
+
return at::var(self, IntArrayRef{dim});
|
1372 |
+
}
|
1373 |
+
TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
|
1374 |
+
return at::var_mean(self, IntArrayRef{dim});
|
1375 |
+
}
|
1376 |
+
TORCH_API inline Tensor std(const Tensor& self, int dim) {
|
1377 |
+
return at::std(self, IntArrayRef{dim});
|
1378 |
+
}
|
1379 |
+
TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
|
1380 |
+
return at::std_mean(self, IntArrayRef{dim});
|
1381 |
+
}
|
1382 |
+
|
1383 |
+
inline int64_t numel(const Tensor& tensor) {
|
1384 |
+
return tensor.numel();
|
1385 |
+
}
|
1386 |
+
|
1387 |
+
inline int64_t size(const Tensor& tensor, int64_t dim) {
|
1388 |
+
return tensor.size(dim);
|
1389 |
+
}
|
1390 |
+
|
1391 |
+
inline int64_t stride(const Tensor& tensor, int64_t dim) {
|
1392 |
+
return tensor.stride(dim);
|
1393 |
+
}
|
1394 |
+
|
1395 |
+
inline bool is_complex(const Tensor& tensor) {
|
1396 |
+
return tensor.is_complex();
|
1397 |
+
}
|
1398 |
+
|
1399 |
+
inline bool is_floating_point(const Tensor& tensor) {
|
1400 |
+
return tensor.is_floating_point();
|
1401 |
+
}
|
1402 |
+
|
1403 |
+
inline bool is_signed(const Tensor& tensor) {
|
1404 |
+
return tensor.is_signed();
|
1405 |
+
}
|
1406 |
+
|
1407 |
+
inline bool is_inference(const Tensor& tensor) {
|
1408 |
+
return tensor.is_inference();
|
1409 |
+
}
|
1410 |
+
|
1411 |
+
inline bool _is_zerotensor(const Tensor& tensor) {
|
1412 |
+
return tensor._is_zerotensor();
|
1413 |
+
}
|
1414 |
+
|
1415 |
+
inline bool is_conj(const Tensor& tensor) {
|
1416 |
+
return tensor.is_conj();
|
1417 |
+
}
|
1418 |
+
|
1419 |
+
inline Tensor conj(const Tensor& tensor) {
|
1420 |
+
return tensor.conj();
|
1421 |
+
}
|
1422 |
+
|
1423 |
+
inline bool is_neg(const Tensor& tensor) {
|
1424 |
+
return tensor.is_neg();
|
1425 |
+
}
|
1426 |
+
|
1427 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/Generator.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/core/Generator.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/InferSize.h
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/DimVector.h>
|
4 |
+
#include <c10/core/ScalarType.h>
|
5 |
+
#include <c10/core/SymIntArrayRef.h>
|
6 |
+
#include <c10/util/DimVector.h>
|
7 |
+
#include <c10/util/Optional.h>
|
8 |
+
#include <sstream>
|
9 |
+
#include <vector>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
|
13 |
+
// Infers the size of a dim with size -1, if it exists. Also checks that new
|
14 |
+
// shape is compatible with the number of elements.
|
15 |
+
//
|
16 |
+
// templated to handle std::vector<int64_t> and DimVector use cases, see
|
17 |
+
// below
|
18 |
+
//
|
19 |
+
template <typename InputArrayRef, typename NumelType, typename ResultVec>
|
20 |
+
inline void infer_size_impl(
|
21 |
+
InputArrayRef shape,
|
22 |
+
NumelType numel,
|
23 |
+
ResultVec& res) {
|
24 |
+
NumelType newsize = 1;
|
25 |
+
// N.B. this is an index, not a sym dim!
|
26 |
+
auto infer_dim = c10::optional<int64_t>();
|
27 |
+
for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
|
28 |
+
if (shape[dim] == -1) {
|
29 |
+
if (infer_dim) {
|
30 |
+
throw std::runtime_error("only one dimension can be inferred");
|
31 |
+
}
|
32 |
+
infer_dim = dim;
|
33 |
+
} else if (shape[dim] >= 0) {
|
34 |
+
newsize *= shape[dim];
|
35 |
+
} else {
|
36 |
+
AT_ERROR("invalid shape dimension ", shape[dim]);
|
37 |
+
}
|
38 |
+
}
|
39 |
+
|
40 |
+
if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) {
|
41 |
+
if (infer_dim) {
|
42 |
+
// We have a degree of freedom here to select the dimension size; follow
|
43 |
+
// NumPy semantics and just bail. However, a nice error message is needed
|
44 |
+
// because users often use `view` as a way to flatten & unflatten
|
45 |
+
// dimensions and will otherwise be confused why
|
46 |
+
// empty_tensor.view( 0, 0)
|
47 |
+
// works yet
|
48 |
+
// empty_tensor.view(-1, 0)
|
49 |
+
// doesn't.
|
50 |
+
TORCH_CHECK(
|
51 |
+
newsize != 0,
|
52 |
+
"cannot reshape tensor of 0 elements into shape ",
|
53 |
+
shape,
|
54 |
+
" because the unspecified dimension size -1 can be any "
|
55 |
+
"value and is ambiguous");
|
56 |
+
res[*infer_dim] = numel / newsize;
|
57 |
+
}
|
58 |
+
return;
|
59 |
+
}
|
60 |
+
|
61 |
+
std::ostringstream ss;
|
62 |
+
ss << "shape '" << shape << "' is invalid for input of size " << numel;
|
63 |
+
throw std::runtime_error(ss.str());
|
64 |
+
}
|
65 |
+
|
66 |
+
inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
|
67 |
+
auto res = shape.vec();
|
68 |
+
infer_size_impl(shape, numel, res);
|
69 |
+
return res;
|
70 |
+
}
|
71 |
+
|
72 |
+
inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) {
|
73 |
+
auto res = at::DimVector(shape);
|
74 |
+
infer_size_impl(shape, numel, res);
|
75 |
+
return res;
|
76 |
+
}
|
77 |
+
|
78 |
+
inline at::SymDimVector infer_size_dv(
|
79 |
+
c10::SymIntArrayRef shape,
|
80 |
+
c10::SymInt numel) {
|
81 |
+
auto res = at::SymDimVector(shape);
|
82 |
+
infer_size_impl<c10::SymIntArrayRef, c10::SymInt, at::SymDimVector>(
|
83 |
+
shape, std::move(numel), res);
|
84 |
+
return res;
|
85 |
+
}
|
86 |
+
|
87 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/ATen.h>
|
3 |
+
#include <ATen/core/op_registration/op_registration.h>
|
4 |
+
#include <torch/library.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
// If an operator doesn't have a batching rule implemented then we fallback
|
9 |
+
// to this implementation. The fallback only works on out-of-place operators
|
10 |
+
// that return only tensors with new memory. (e.g., no in-place operators, no
|
11 |
+
// view operations).
|
12 |
+
//
|
13 |
+
// The fallback effectively takes all of the BatchedTensors in `stack`, slices
|
14 |
+
// them, and runs `op` on all of the corresponding slices to produce slices
|
15 |
+
// of the outputs. The output slices then get `torch.stack`ed to create the
|
16 |
+
// final returns.
|
17 |
+
//
|
18 |
+
// The performance of the fallback is not very good because it introduces an
|
19 |
+
// extra copy from stacking the sliced outputs. Because of this, we prefer to
|
20 |
+
// write batching rules for operators whenever possible.
|
21 |
+
void batchedTensorForLoopFallback(
|
22 |
+
const c10::OperatorHandle& op,
|
23 |
+
torch::jit::Stack* stack);
|
24 |
+
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedTensorImpl.h
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <bitset>
|
4 |
+
|
5 |
+
#include <ATen/ArrayRef.h>
|
6 |
+
#include <ATen/SmallVector.h>
|
7 |
+
#include <ATen/Tensor.h>
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
// We assume this in a few other places in the codebase,
|
12 |
+
// but there isn't a centralized definition.
|
13 |
+
constexpr int64_t kVmapMaxTensorDims = 64;
|
14 |
+
|
15 |
+
// The valid vmap levels range from [0, 64). This effectively means that we
|
16 |
+
// support a maximum of 64 nested vmaps.
|
17 |
+
constexpr int64_t kVmapNumLevels = 64;
|
18 |
+
|
19 |
+
// Store this number of elements of BatchDims on the stack. Most people will
|
20 |
+
// probably use <= 5 nested vmaps, but adjust this number as necessary.
|
21 |
+
constexpr int64_t kBatchDimsStackSize = 5;
|
22 |
+
|
23 |
+
// a BatchDim represents a "private" dimension on a Tensor created inside of
|
24 |
+
// vmap. It is a (level, dim) tuple, with the `dim` indicating which dimension
|
25 |
+
// is being vmap'ed over and the `level` being an identifier for which vmap
|
26 |
+
// said dimension was created inside. The `dim` corresponds to a "physical
|
27 |
+
// dim" - it is a dimension index on the underlying physical tensor that is
|
28 |
+
// being vmapped over.
|
29 |
+
struct BatchDim {
|
30 |
+
BatchDim(int64_t level, int64_t dim) : dim_(dim), level_(level) {}
|
31 |
+
int64_t dim() const {
|
32 |
+
return dim_;
|
33 |
+
}
|
34 |
+
int64_t level() const {
|
35 |
+
return level_;
|
36 |
+
}
|
37 |
+
|
38 |
+
private:
|
39 |
+
int64_t dim_;
|
40 |
+
int64_t level_;
|
41 |
+
};
|
42 |
+
|
43 |
+
using BatchDims = SmallVector<BatchDim, kBatchDimsStackSize>;
|
44 |
+
using BatchDimsRef = ArrayRef<BatchDim>;
|
45 |
+
|
46 |
+
// A BatchedTensorImpl holds an underlying Tensor and a list of BatchDim
|
47 |
+
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
|
48 |
+
// BatchedTensorImpl.
|
49 |
+
//
|
50 |
+
// The batch dimensions are treated as being "private"; they are not
|
51 |
+
// user-visible. For example, in the following Tensor,
|
52 |
+
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2, dim=1)])
|
53 |
+
// dimensions 0 and 1 are batch dimensions.
|
54 |
+
//
|
55 |
+
// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
|
56 |
+
// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7)
|
57 |
+
// tensor.
|
58 |
+
struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
|
59 |
+
explicit BatchedTensorImpl(Tensor value, BatchDims bdims);
|
60 |
+
|
61 |
+
// Returns a reference to BatchDims that represent which dimensions of this
|
62 |
+
// tensor are private.
|
63 |
+
BatchDimsRef bdims() const {
|
64 |
+
return bdims_;
|
65 |
+
}
|
66 |
+
|
67 |
+
// BatchedTensorImpl wraps a Tensor
|
68 |
+
const Tensor& value() const {
|
69 |
+
return value_;
|
70 |
+
};
|
71 |
+
|
72 |
+
// Given a public dimension index, return the dimension index in the
|
73 |
+
// underlying value() tensor. For example, if we have
|
74 |
+
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2,
|
75 |
+
// dim=2)])
|
76 |
+
// bt.actualDim(0) -> 1
|
77 |
+
// bt.actualDim(1) -> 3
|
78 |
+
// bt.actualDim(2) -> Error
|
79 |
+
int64_t actualDim(int64_t dim, bool wrap_dim = true) const;
|
80 |
+
|
81 |
+
// We have to override this because we opted into CustomStrides
|
82 |
+
IntArrayRef strides_custom() const override;
|
83 |
+
// Override a bunch of methods inherited from TensorImpl to return error
|
84 |
+
// messages.
|
85 |
+
bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
|
86 |
+
void set_size(int64_t dim, int64_t new_size) override;
|
87 |
+
void set_stride(int64_t dim, int64_t new_stride) override;
|
88 |
+
void set_storage_offset(int64_t storage_offset) override;
|
89 |
+
#ifdef DEBUG
|
90 |
+
bool has_storage() const override;
|
91 |
+
#endif
|
92 |
+
|
93 |
+
private:
|
94 |
+
// see NOTE: [BatchedTensorImpl levels invariant]
|
95 |
+
void checkInvariants() const;
|
96 |
+
const char* tensorimpl_type_name() const override;
|
97 |
+
|
98 |
+
Tensor value_;
|
99 |
+
|
100 |
+
// Note: [BatchedTensorImpl levels invariant]
|
101 |
+
// There is an invariant that the BatchDims must be stored in increasing
|
102 |
+
// `level` order. That is, for i < j, bdims_[i].level must be less than
|
103 |
+
// bdims_[j].level.
|
104 |
+
BatchDims bdims_;
|
105 |
+
};
|
106 |
+
|
107 |
+
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
|
108 |
+
// BatchedTensorImpl.
|
109 |
+
inline bool isBatchedTensor(const Tensor& tensor) {
|
110 |
+
return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::Batched);
|
111 |
+
}
|
112 |
+
|
113 |
+
// It is unsafe to call this on a Tensor that is not backed by a
|
114 |
+
// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
|
115 |
+
inline BatchedTensorImpl* unsafeGetBatchedImpl(const Tensor& tensor) {
|
116 |
+
return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
117 |
+
}
|
118 |
+
|
119 |
+
inline BatchedTensorImpl* maybeGetBatchedImpl(const Tensor& tensor) {
|
120 |
+
if (!isBatchedTensor(tensor)) {
|
121 |
+
return nullptr;
|
122 |
+
}
|
123 |
+
return unsafeGetBatchedImpl(tensor);
|
124 |
+
}
|
125 |
+
|
126 |
+
// Returns a bitset. If bit i is set, then that means dim i is a batchdim.
|
127 |
+
inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset(
|
128 |
+
BatchDimsRef bdims) {
|
129 |
+
std::bitset<kVmapMaxTensorDims> is_bdim;
|
130 |
+
for (const auto& bdim : bdims) {
|
131 |
+
is_bdim.set(bdim.dim());
|
132 |
+
}
|
133 |
+
return is_bdim;
|
134 |
+
}
|
135 |
+
|
136 |
+
// Creates a bitset for all of the levels present in `bdims`
|
137 |
+
inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(BatchDimsRef bdims) {
|
138 |
+
std::bitset<kVmapNumLevels> result;
|
139 |
+
for (const auto& bdim : bdims) {
|
140 |
+
result.set(bdim.level());
|
141 |
+
}
|
142 |
+
return result;
|
143 |
+
}
|
144 |
+
|
145 |
+
inline std::ostream& operator<<(std::ostream& out, const BatchDim& bdim) {
|
146 |
+
out << "(lvl=" << bdim.level() << ", dim=" << bdim.dim() << ")";
|
147 |
+
return out;
|
148 |
+
}
|
149 |
+
|
150 |
+
// Use this to construct a BatchedTensor from a regular Tensor
|
151 |
+
TORCH_API Tensor makeBatched(const Tensor& tensor, BatchDims bdims);
|
152 |
+
|
153 |
+
// Adds a batch dim to `tensor`, returning a BatchedTensor
|
154 |
+
TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t level, int64_t dim);
|
155 |
+
|
156 |
+
// Checks if an inplace operation on self and other is "vmap compatible".
|
157 |
+
// See NOTE: [vmap-incompatible in-place operations] for the definition of this.
|
158 |
+
TORCH_API bool inplaceIsVmapCompatible(const Tensor& self, const Tensor& other);
|
159 |
+
|
160 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapMode.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
4 |
+
|
5 |
+
namespace at::impl {
|
6 |
+
|
7 |
+
// VmapMode contains a thread local count of how many nested vmaps
|
8 |
+
// we are currently inside. That number is known as the `vmap level`.
|
9 |
+
// VmapMode is used in the implementation of the Python `torch.vmap` API.
|
10 |
+
//
|
11 |
+
// NOTE: this is NOT the c++ api for torch.vmap. That doesn't exist yet.
|
12 |
+
|
13 |
+
struct TORCH_API VmapMode {
|
14 |
+
// Returns the vmap level, aka the count of how many nested vmaps we're in.
|
15 |
+
static int64_t current_vmap_level();
|
16 |
+
|
17 |
+
// Increment the count of nested vmaps. If this causes the vmap level to be
|
18 |
+
// greater than 0, then it enables DispatchKey::VmapMode on all tensors.
|
19 |
+
static int64_t increment_nesting();
|
20 |
+
|
21 |
+
// Decrements the count of nested vmaps. If this causes the vmap level to be
|
22 |
+
// equal to 0, then it disables DispatchKey::VmapMode on all tensors.
|
23 |
+
static int64_t decrement_nesting();
|
24 |
+
};
|
25 |
+
|
26 |
+
} // namespace at::impl
|
venv/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapTransforms.h
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/LegacyBatchedTensorImpl.h>
|
4 |
+
#include <ATen/core/IListRef.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
// This file contains abstractions used for transforming *logical* vmap
|
9 |
+
// arguments into *physical* arguments. (Keep reading for definitions of these
|
10 |
+
// terms).
|
11 |
+
|
12 |
+
// NOTE: [Logical vs physical args]
|
13 |
+
// Consider the following vmap.
|
14 |
+
// vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
|
15 |
+
// This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
|
16 |
+
// with batch dims 0 and 2:
|
17 |
+
// BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
|
18 |
+
//
|
19 |
+
// We say the *logical* view of the tensor has size [3] -- tensors inside
|
20 |
+
// `func` appear to have size [3].
|
21 |
+
// However, the *physical* underlying tensor (the one passed to vmap) has size
|
22 |
+
// [2, 3, 4].
|
23 |
+
//
|
24 |
+
// This notion of logical vs physical also extends to non-tensor arguments.
|
25 |
+
// Consider the previous tensor; let's assume the user called
|
26 |
+
// `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
|
27 |
+
// dimension they are reducing over is dim 0 but the physical dim is dim 1
|
28 |
+
// (the first non-batch dimension)
|
29 |
+
|
30 |
+
// Forward declared; see NOTE: [What is a VmapPhysicalView?]
|
31 |
+
struct VmapPhysicalView;
|
32 |
+
|
33 |
+
// Most PyTorch operators take 4 or fewer inputs.
|
34 |
+
constexpr int64_t kVmapTransformStaticInputSize = 4;
|
35 |
+
using VmapPhysicalViewVec =
|
36 |
+
SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
|
37 |
+
|
38 |
+
// Pytorch generally advertises good performance for <= 5 dims.
|
39 |
+
// (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
|
40 |
+
// dimensions to get 8. Adjust this number as necessary
|
41 |
+
constexpr int64_t kVmapStaticDimVecSize = 8;
|
42 |
+
using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
|
43 |
+
using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
|
44 |
+
|
45 |
+
// NOTE: [What is an VmapTransform?]
|
46 |
+
// An *VmapTransform* converts logical views of tensors to physical views.
|
47 |
+
//
|
48 |
+
// Batching rules use VmapTransforms to convert logical arguments to
|
49 |
+
// physical arguments, then call one or more at:: operator that handles the
|
50 |
+
// physical arguments, and then converts the physical result back to a logical
|
51 |
+
// argument.
|
52 |
+
|
53 |
+
// VmapTransform for operators that take tensors with multiple batch dims.
|
54 |
+
// Given one or more logical views on Tensors, `logicalToPhysical`
|
55 |
+
// permutes all of the batch dims to the front of the tensor, aligns
|
56 |
+
// and expands the batch dims to match each other (according to their `level`),
|
57 |
+
// and returns a VmapPhysicalView on the tensor(s).
|
58 |
+
struct TORCH_API MultiBatchVmapTransform {
|
59 |
+
static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
|
60 |
+
static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
|
61 |
+
};
|
62 |
+
|
63 |
+
// VmapTransform for operators that broadcast all inputs.
|
64 |
+
// Given some logical views on Tensors, `logicalToPhysical`:
|
65 |
+
// - permutes all of the batch dims to the front of the tensors
|
66 |
+
// - aligns all the batch dims to the collective levels of all of the tensors.
|
67 |
+
// If a tensor does not have a batch dim for a vmap level, then it receives
|
68 |
+
// a size-one dimension for said level.
|
69 |
+
// - aligns the non-batch dims to have the same dimensionality, adding extra
|
70 |
+
// size-1 dimensions in between the batch dimensions and the non-batch
|
71 |
+
// dimensions so that the batch dimensions are lined up from the right.
|
72 |
+
//
|
73 |
+
// For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
|
74 |
+
// dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap
|
75 |
+
// tensors of size (B, 1, 2) and (B, 3, 2).
|
76 |
+
//
|
77 |
+
// Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
|
78 |
+
// VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
|
79 |
+
// actually *need* to return a tensor of size (1, 2) for the second tensor
|
80 |
+
// because the broadcasting operation takes care of that for us, but we do
|
81 |
+
// it anyways to keep things simple.
|
82 |
+
struct TORCH_API BroadcastingVmapTransform {
|
83 |
+
static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
|
84 |
+
};
|
85 |
+
|
86 |
+
// Forward declared, if you're reading this file head to toe, don't worry about
|
87 |
+
// it yet.
|
88 |
+
struct VmapPhysicalToLogicalMap;
|
89 |
+
|
90 |
+
// NOTE: [What is a VmapPhysicalView?]
|
91 |
+
// VmapPhysicalView represents a physical view on a Tensor.
|
92 |
+
//
|
93 |
+
// One can use it to further convert logical dimension indices, logical shapes,
|
94 |
+
// and more to their physical variants, or convert a new (physical) tensor into
|
95 |
+
// a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
|
96 |
+
//
|
97 |
+
// VmapPhysicalView stores a physical tensor with all of its batch dimensions at
|
98 |
+
// the front and some levels that correspond to said batch dimensions.
|
99 |
+
//
|
100 |
+
// The levels bitset specifies which vmap levels correspond to the batch
|
101 |
+
// dimensions at the front of the tensor. In particular, the number of set bits
|
102 |
+
// corresponds to the number of batch dimensions on `tensor` and the rightmost
|
103 |
+
// bit of `levels` specifies the maximum number of nested vmaps we are in at
|
104 |
+
// this point in time.
|
105 |
+
// For example, given:
|
106 |
+
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
|
107 |
+
//
|
108 |
+
// Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
|
109 |
+
// than or equal to 3.
|
110 |
+
// bitset: 010100
|
111 |
+
// ^
|
112 |
+
// |
|
113 |
+
// levels: 012345
|
114 |
+
struct TORCH_API VmapPhysicalView {
|
115 |
+
VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
|
116 |
+
: levels_(levels), tensor_(std::move(tensor)) {
|
117 |
+
TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor_));
|
118 |
+
}
|
119 |
+
|
120 |
+
Tensor& tensor() {
|
121 |
+
return tensor_;
|
122 |
+
}
|
123 |
+
const Tensor& tensor() const {
|
124 |
+
return tensor_;
|
125 |
+
}
|
126 |
+
|
127 |
+
// Maps logical dim indices to physical dim indices. Also does dim wrapping.
|
128 |
+
//
|
129 |
+
// For example, given:
|
130 |
+
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
|
131 |
+
//
|
132 |
+
// Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
|
133 |
+
// This is because the size of levels tell us that the first two dimensions
|
134 |
+
// of `tensor_` are batch dimensions, so a logical dim of `n` is actually
|
135 |
+
// a physical dim of `n + 2`.
|
136 |
+
VmapDimVector getPhysicalDims(OptionalIntArrayRef logical_dims) const;
|
137 |
+
int64_t getPhysicalDim(int64_t logical_dim) const;
|
138 |
+
|
139 |
+
// Returns a VmapPhysicalToLogicalMap object. This can be used for
|
140 |
+
// mapping a physical tensor to a new logical tensor (BatchedTensor)
|
141 |
+
VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
|
142 |
+
|
143 |
+
// Maps a logical shape to a physical shape by pre-pending the batch
|
144 |
+
// sizes to the logical shape.
|
145 |
+
VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
|
146 |
+
|
147 |
+
int64_t numBatchDims() const;
|
148 |
+
|
149 |
+
private:
|
150 |
+
int64_t numLogicalDims() const;
|
151 |
+
|
152 |
+
std::bitset<kVmapNumLevels> levels_;
|
153 |
+
Tensor tensor_;
|
154 |
+
};
|
155 |
+
|
156 |
+
// Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
|
157 |
+
// to a logical one (BatchedTensor). It holds some levels that are used to do
|
158 |
+
// the mapping and assumes that the batch dimensions in the physical tensor all
|
159 |
+
// occur at the front of the tensor.
|
160 |
+
struct TORCH_API VmapPhysicalToLogicalMap {
|
161 |
+
VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels)
|
162 |
+
: levels_(levels) {}
|
163 |
+
|
164 |
+
// Maps a physical tensor to a new logical tensor (BatchedTensor).
|
165 |
+
// Assumes that all of the "batch dimensions" are at the front
|
166 |
+
// of the physical tensor. For example, given:
|
167 |
+
// - x = rank-4 Tensor with size 2, 3, 5, 7
|
168 |
+
// - levels = (2, 4)
|
169 |
+
// Returns:
|
170 |
+
// - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
|
171 |
+
Tensor apply(const Tensor& physical_tensor) const;
|
172 |
+
|
173 |
+
// Given a vector of physical tensors,
|
174 |
+
// 1. maps each tensor to a new logical tensor. Assumes that all of the
|
175 |
+
// "batch dimensions" are at the front of the physical tensors.
|
176 |
+
// 2. stores the new logical tensors back into the passed-in vector. This is
|
177 |
+
// to avoid additional dynamic allocations.
|
178 |
+
void applyInplace(std::vector<Tensor>& physical_tensors) const;
|
179 |
+
|
180 |
+
std::bitset<kVmapNumLevels> levels_;
|
181 |
+
};
|
182 |
+
|
183 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/Exception.h>
|
4 |
+
|
5 |
+
#include <ostream>
|
6 |
+
#include <string>
|
7 |
+
|
8 |
+
namespace at {
|
9 |
+
|
10 |
+
enum class LinalgBackend : int8_t { Default, Cusolver, Magma };
|
11 |
+
|
12 |
+
inline std::string LinalgBackendToString(at::LinalgBackend backend) {
|
13 |
+
switch (backend) {
|
14 |
+
case LinalgBackend::Default:
|
15 |
+
return "at::LinalgBackend::Default";
|
16 |
+
case LinalgBackend::Cusolver:
|
17 |
+
return "at::LinalgBackend::Cusolver";
|
18 |
+
case LinalgBackend::Magma:
|
19 |
+
return "at::LinalgBackend::Magma";
|
20 |
+
default:
|
21 |
+
TORCH_CHECK(false, "Unknown linalg backend");
|
22 |
+
}
|
23 |
+
}
|
24 |
+
|
25 |
+
inline std::ostream& operator<<(
|
26 |
+
std::ostream& stream,
|
27 |
+
at::LinalgBackend backend) {
|
28 |
+
return stream << LinalgBackendToString(backend);
|
29 |
+
}
|
30 |
+
|
31 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h
ADDED
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
12 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
13 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
14 |
+
Consider including a specific operator from \
|
15 |
+
<ATen/ops/{my_operator}_meta_dispatch.h>. \
|
16 |
+
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#include <ATen/ops/_add_relu_meta_dispatch.h>
|
20 |
+
#include <ATen/ops/_addmm_activation_meta_dispatch.h>
|
21 |
+
#include <ATen/ops/_amp_update_scale_meta_dispatch.h>
|
22 |
+
#include <ATen/ops/_coalesced_meta_dispatch.h>
|
23 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_meta_dispatch.h>
|
24 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_meta_dispatch.h>
|
25 |
+
#include <ATen/ops/_ctc_loss_meta_dispatch.h>
|
26 |
+
#include <ATen/ops/_efficientzerotensor_meta_dispatch.h>
|
27 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_meta_dispatch.h>
|
28 |
+
#include <ATen/ops/_fused_sdp_choice_meta_dispatch.h>
|
29 |
+
#include <ATen/ops/_index_put_impl_meta_dispatch.h>
|
30 |
+
#include <ATen/ops/_linalg_det_meta_dispatch.h>
|
31 |
+
#include <ATen/ops/_linalg_eigh_meta_dispatch.h>
|
32 |
+
#include <ATen/ops/_linalg_slogdet_meta_dispatch.h>
|
33 |
+
#include <ATen/ops/_linalg_solve_ex_meta_dispatch.h>
|
34 |
+
#include <ATen/ops/_linalg_svd_meta_dispatch.h>
|
35 |
+
#include <ATen/ops/_log_softmax_meta_dispatch.h>
|
36 |
+
#include <ATen/ops/_log_softmax_backward_data_meta_dispatch.h>
|
37 |
+
#include <ATen/ops/_mkldnn_transpose_meta_dispatch.h>
|
38 |
+
#include <ATen/ops/_reshape_alias_meta_dispatch.h>
|
39 |
+
#include <ATen/ops/_resize_output_meta_dispatch.h>
|
40 |
+
#include <ATen/ops/_softmax_meta_dispatch.h>
|
41 |
+
#include <ATen/ops/_softmax_backward_data_meta_dispatch.h>
|
42 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h>
|
43 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_meta_dispatch.h>
|
44 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h>
|
45 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_meta_dispatch.h>
|
46 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_meta_dispatch.h>
|
47 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_meta_dispatch.h>
|
48 |
+
#include <ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h>
|
49 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h>
|
50 |
+
#include <ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h>
|
51 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h>
|
52 |
+
#include <ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h>
|
53 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_meta_dispatch.h>
|
54 |
+
#include <ATen/ops/acos_meta_dispatch.h>
|
55 |
+
#include <ATen/ops/acosh_meta_dispatch.h>
|
56 |
+
#include <ATen/ops/adaptive_max_pool2d_meta_dispatch.h>
|
57 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h>
|
58 |
+
#include <ATen/ops/adaptive_max_pool3d_meta_dispatch.h>
|
59 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_meta_dispatch.h>
|
60 |
+
#include <ATen/ops/add_meta_dispatch.h>
|
61 |
+
#include <ATen/ops/addbmm_meta_dispatch.h>
|
62 |
+
#include <ATen/ops/addcdiv_meta_dispatch.h>
|
63 |
+
#include <ATen/ops/addcmul_meta_dispatch.h>
|
64 |
+
#include <ATen/ops/addmm_meta_dispatch.h>
|
65 |
+
#include <ATen/ops/addmv_meta_dispatch.h>
|
66 |
+
#include <ATen/ops/all_meta_dispatch.h>
|
67 |
+
#include <ATen/ops/amax_meta_dispatch.h>
|
68 |
+
#include <ATen/ops/amin_meta_dispatch.h>
|
69 |
+
#include <ATen/ops/aminmax_meta_dispatch.h>
|
70 |
+
#include <ATen/ops/any_meta_dispatch.h>
|
71 |
+
#include <ATen/ops/arange_meta_dispatch.h>
|
72 |
+
#include <ATen/ops/argmax_meta_dispatch.h>
|
73 |
+
#include <ATen/ops/argmin_meta_dispatch.h>
|
74 |
+
#include <ATen/ops/as_strided_meta_dispatch.h>
|
75 |
+
#include <ATen/ops/asin_meta_dispatch.h>
|
76 |
+
#include <ATen/ops/asinh_meta_dispatch.h>
|
77 |
+
#include <ATen/ops/atan_meta_dispatch.h>
|
78 |
+
#include <ATen/ops/atan2_meta_dispatch.h>
|
79 |
+
#include <ATen/ops/atanh_meta_dispatch.h>
|
80 |
+
#include <ATen/ops/avg_pool2d_meta_dispatch.h>
|
81 |
+
#include <ATen/ops/avg_pool2d_backward_meta_dispatch.h>
|
82 |
+
#include <ATen/ops/avg_pool3d_meta_dispatch.h>
|
83 |
+
#include <ATen/ops/avg_pool3d_backward_meta_dispatch.h>
|
84 |
+
#include <ATen/ops/baddbmm_meta_dispatch.h>
|
85 |
+
#include <ATen/ops/bernoulli_meta_dispatch.h>
|
86 |
+
#include <ATen/ops/bitwise_and_meta_dispatch.h>
|
87 |
+
#include <ATen/ops/bitwise_left_shift_meta_dispatch.h>
|
88 |
+
#include <ATen/ops/bitwise_not_meta_dispatch.h>
|
89 |
+
#include <ATen/ops/bitwise_or_meta_dispatch.h>
|
90 |
+
#include <ATen/ops/bitwise_right_shift_meta_dispatch.h>
|
91 |
+
#include <ATen/ops/bitwise_xor_meta_dispatch.h>
|
92 |
+
#include <ATen/ops/bmm_meta_dispatch.h>
|
93 |
+
#include <ATen/ops/cat_meta_dispatch.h>
|
94 |
+
#include <ATen/ops/cauchy_meta_dispatch.h>
|
95 |
+
#include <ATen/ops/ceil_meta_dispatch.h>
|
96 |
+
#include <ATen/ops/clamp_meta_dispatch.h>
|
97 |
+
#include <ATen/ops/clamp_max_meta_dispatch.h>
|
98 |
+
#include <ATen/ops/clamp_min_meta_dispatch.h>
|
99 |
+
#include <ATen/ops/copy_sparse_to_sparse_meta_dispatch.h>
|
100 |
+
#include <ATen/ops/copysign_meta_dispatch.h>
|
101 |
+
#include <ATen/ops/cos_meta_dispatch.h>
|
102 |
+
#include <ATen/ops/cosh_meta_dispatch.h>
|
103 |
+
#include <ATen/ops/cumprod_meta_dispatch.h>
|
104 |
+
#include <ATen/ops/cumsum_meta_dispatch.h>
|
105 |
+
#include <ATen/ops/digamma_meta_dispatch.h>
|
106 |
+
#include <ATen/ops/div_meta_dispatch.h>
|
107 |
+
#include <ATen/ops/elu_meta_dispatch.h>
|
108 |
+
#include <ATen/ops/elu_backward_meta_dispatch.h>
|
109 |
+
#include <ATen/ops/embedding_renorm_meta_dispatch.h>
|
110 |
+
#include <ATen/ops/empty_meta_dispatch.h>
|
111 |
+
#include <ATen/ops/empty_strided_meta_dispatch.h>
|
112 |
+
#include <ATen/ops/eq_meta_dispatch.h>
|
113 |
+
#include <ATen/ops/erf_meta_dispatch.h>
|
114 |
+
#include <ATen/ops/erfc_meta_dispatch.h>
|
115 |
+
#include <ATen/ops/erfinv_meta_dispatch.h>
|
116 |
+
#include <ATen/ops/exp_meta_dispatch.h>
|
117 |
+
#include <ATen/ops/exp2_meta_dispatch.h>
|
118 |
+
#include <ATen/ops/expm1_meta_dispatch.h>
|
119 |
+
#include <ATen/ops/exponential_meta_dispatch.h>
|
120 |
+
#include <ATen/ops/eye_meta_dispatch.h>
|
121 |
+
#include <ATen/ops/fill_meta_dispatch.h>
|
122 |
+
#include <ATen/ops/floor_meta_dispatch.h>
|
123 |
+
#include <ATen/ops/floor_divide_meta_dispatch.h>
|
124 |
+
#include <ATen/ops/fmax_meta_dispatch.h>
|
125 |
+
#include <ATen/ops/fmin_meta_dispatch.h>
|
126 |
+
#include <ATen/ops/fmod_meta_dispatch.h>
|
127 |
+
#include <ATen/ops/frac_meta_dispatch.h>
|
128 |
+
#include <ATen/ops/fractional_max_pool2d_meta_dispatch.h>
|
129 |
+
#include <ATen/ops/fractional_max_pool2d_backward_meta_dispatch.h>
|
130 |
+
#include <ATen/ops/fractional_max_pool3d_meta_dispatch.h>
|
131 |
+
#include <ATen/ops/gather_meta_dispatch.h>
|
132 |
+
#include <ATen/ops/gcd_meta_dispatch.h>
|
133 |
+
#include <ATen/ops/ge_meta_dispatch.h>
|
134 |
+
#include <ATen/ops/gelu_meta_dispatch.h>
|
135 |
+
#include <ATen/ops/gelu_backward_meta_dispatch.h>
|
136 |
+
#include <ATen/ops/geometric_meta_dispatch.h>
|
137 |
+
#include <ATen/ops/glu_meta_dispatch.h>
|
138 |
+
#include <ATen/ops/gt_meta_dispatch.h>
|
139 |
+
#include <ATen/ops/hardshrink_meta_dispatch.h>
|
140 |
+
#include <ATen/ops/hardshrink_backward_meta_dispatch.h>
|
141 |
+
#include <ATen/ops/hardsigmoid_meta_dispatch.h>
|
142 |
+
#include <ATen/ops/hardsigmoid_backward_meta_dispatch.h>
|
143 |
+
#include <ATen/ops/hardswish_meta_dispatch.h>
|
144 |
+
#include <ATen/ops/hardtanh_meta_dispatch.h>
|
145 |
+
#include <ATen/ops/heaviside_meta_dispatch.h>
|
146 |
+
#include <ATen/ops/hypot_meta_dispatch.h>
|
147 |
+
#include <ATen/ops/i0_meta_dispatch.h>
|
148 |
+
#include <ATen/ops/igamma_meta_dispatch.h>
|
149 |
+
#include <ATen/ops/igammac_meta_dispatch.h>
|
150 |
+
#include <ATen/ops/index_meta_dispatch.h>
|
151 |
+
#include <ATen/ops/index_add_meta_dispatch.h>
|
152 |
+
#include <ATen/ops/index_copy_meta_dispatch.h>
|
153 |
+
#include <ATen/ops/index_fill_meta_dispatch.h>
|
154 |
+
#include <ATen/ops/index_reduce_meta_dispatch.h>
|
155 |
+
#include <ATen/ops/isin_meta_dispatch.h>
|
156 |
+
#include <ATen/ops/isneginf_meta_dispatch.h>
|
157 |
+
#include <ATen/ops/isposinf_meta_dispatch.h>
|
158 |
+
#include <ATen/ops/lcm_meta_dispatch.h>
|
159 |
+
#include <ATen/ops/le_meta_dispatch.h>
|
160 |
+
#include <ATen/ops/leaky_relu_meta_dispatch.h>
|
161 |
+
#include <ATen/ops/leaky_relu_backward_meta_dispatch.h>
|
162 |
+
#include <ATen/ops/lerp_meta_dispatch.h>
|
163 |
+
#include <ATen/ops/lgamma_meta_dispatch.h>
|
164 |
+
#include <ATen/ops/linalg_cholesky_ex_meta_dispatch.h>
|
165 |
+
#include <ATen/ops/linalg_cross_meta_dispatch.h>
|
166 |
+
#include <ATen/ops/linalg_inv_ex_meta_dispatch.h>
|
167 |
+
#include <ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h>
|
168 |
+
#include <ATen/ops/linalg_ldl_solve_meta_dispatch.h>
|
169 |
+
#include <ATen/ops/linalg_lu_meta_dispatch.h>
|
170 |
+
#include <ATen/ops/linalg_lu_factor_ex_meta_dispatch.h>
|
171 |
+
#include <ATen/ops/linalg_lu_solve_meta_dispatch.h>
|
172 |
+
#include <ATen/ops/linalg_qr_meta_dispatch.h>
|
173 |
+
#include <ATen/ops/linalg_vector_norm_meta_dispatch.h>
|
174 |
+
#include <ATen/ops/linspace_meta_dispatch.h>
|
175 |
+
#include <ATen/ops/log_meta_dispatch.h>
|
176 |
+
#include <ATen/ops/log10_meta_dispatch.h>
|
177 |
+
#include <ATen/ops/log1p_meta_dispatch.h>
|
178 |
+
#include <ATen/ops/log2_meta_dispatch.h>
|
179 |
+
#include <ATen/ops/log_normal_meta_dispatch.h>
|
180 |
+
#include <ATen/ops/logaddexp_meta_dispatch.h>
|
181 |
+
#include <ATen/ops/logaddexp2_meta_dispatch.h>
|
182 |
+
#include <ATen/ops/logit_meta_dispatch.h>
|
183 |
+
#include <ATen/ops/logit_backward_meta_dispatch.h>
|
184 |
+
#include <ATen/ops/logspace_meta_dispatch.h>
|
185 |
+
#include <ATen/ops/lshift_meta_dispatch.h>
|
186 |
+
#include <ATen/ops/lt_meta_dispatch.h>
|
187 |
+
#include <ATen/ops/lu_unpack_meta_dispatch.h>
|
188 |
+
#include <ATen/ops/masked_fill_meta_dispatch.h>
|
189 |
+
#include <ATen/ops/masked_scatter_meta_dispatch.h>
|
190 |
+
#include <ATen/ops/max_meta_dispatch.h>
|
191 |
+
#include <ATen/ops/max_pool2d_with_indices_meta_dispatch.h>
|
192 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_meta_dispatch.h>
|
193 |
+
#include <ATen/ops/maximum_meta_dispatch.h>
|
194 |
+
#include <ATen/ops/mean_meta_dispatch.h>
|
195 |
+
#include <ATen/ops/min_meta_dispatch.h>
|
196 |
+
#include <ATen/ops/minimum_meta_dispatch.h>
|
197 |
+
#include <ATen/ops/mish_meta_dispatch.h>
|
198 |
+
#include <ATen/ops/mm_meta_dispatch.h>
|
199 |
+
#include <ATen/ops/mse_loss_meta_dispatch.h>
|
200 |
+
#include <ATen/ops/mul_meta_dispatch.h>
|
201 |
+
#include <ATen/ops/ne_meta_dispatch.h>
|
202 |
+
#include <ATen/ops/neg_meta_dispatch.h>
|
203 |
+
#include <ATen/ops/nextafter_meta_dispatch.h>
|
204 |
+
#include <ATen/ops/nll_loss_backward_meta_dispatch.h>
|
205 |
+
#include <ATen/ops/nll_loss_forward_meta_dispatch.h>
|
206 |
+
#include <ATen/ops/norm_meta_dispatch.h>
|
207 |
+
#include <ATen/ops/normal_meta_dispatch.h>
|
208 |
+
#include <ATen/ops/polygamma_meta_dispatch.h>
|
209 |
+
#include <ATen/ops/pow_meta_dispatch.h>
|
210 |
+
#include <ATen/ops/prod_meta_dispatch.h>
|
211 |
+
#include <ATen/ops/put_meta_dispatch.h>
|
212 |
+
#include <ATen/ops/random_meta_dispatch.h>
|
213 |
+
#include <ATen/ops/range_meta_dispatch.h>
|
214 |
+
#include <ATen/ops/reciprocal_meta_dispatch.h>
|
215 |
+
#include <ATen/ops/reflection_pad1d_meta_dispatch.h>
|
216 |
+
#include <ATen/ops/reflection_pad1d_backward_meta_dispatch.h>
|
217 |
+
#include <ATen/ops/reflection_pad3d_meta_dispatch.h>
|
218 |
+
#include <ATen/ops/reflection_pad3d_backward_meta_dispatch.h>
|
219 |
+
#include <ATen/ops/relu_meta_dispatch.h>
|
220 |
+
#include <ATen/ops/remainder_meta_dispatch.h>
|
221 |
+
#include <ATen/ops/renorm_meta_dispatch.h>
|
222 |
+
#include <ATen/ops/replication_pad1d_meta_dispatch.h>
|
223 |
+
#include <ATen/ops/replication_pad1d_backward_meta_dispatch.h>
|
224 |
+
#include <ATen/ops/replication_pad2d_meta_dispatch.h>
|
225 |
+
#include <ATen/ops/replication_pad3d_meta_dispatch.h>
|
226 |
+
#include <ATen/ops/resize_meta_dispatch.h>
|
227 |
+
#include <ATen/ops/resize_as_sparse_meta_dispatch.h>
|
228 |
+
#include <ATen/ops/round_meta_dispatch.h>
|
229 |
+
#include <ATen/ops/rrelu_with_noise_meta_dispatch.h>
|
230 |
+
#include <ATen/ops/rshift_meta_dispatch.h>
|
231 |
+
#include <ATen/ops/rsqrt_meta_dispatch.h>
|
232 |
+
#include <ATen/ops/scatter_meta_dispatch.h>
|
233 |
+
#include <ATen/ops/scatter_add_meta_dispatch.h>
|
234 |
+
#include <ATen/ops/scatter_reduce_meta_dispatch.h>
|
235 |
+
#include <ATen/ops/set_meta_dispatch.h>
|
236 |
+
#include <ATen/ops/sgn_meta_dispatch.h>
|
237 |
+
#include <ATen/ops/sigmoid_meta_dispatch.h>
|
238 |
+
#include <ATen/ops/sigmoid_backward_meta_dispatch.h>
|
239 |
+
#include <ATen/ops/sign_meta_dispatch.h>
|
240 |
+
#include <ATen/ops/signbit_meta_dispatch.h>
|
241 |
+
#include <ATen/ops/silu_meta_dispatch.h>
|
242 |
+
#include <ATen/ops/silu_backward_meta_dispatch.h>
|
243 |
+
#include <ATen/ops/sin_meta_dispatch.h>
|
244 |
+
#include <ATen/ops/sinc_meta_dispatch.h>
|
245 |
+
#include <ATen/ops/sinh_meta_dispatch.h>
|
246 |
+
#include <ATen/ops/slow_conv_transpose2d_meta_dispatch.h>
|
247 |
+
#include <ATen/ops/smooth_l1_loss_meta_dispatch.h>
|
248 |
+
#include <ATen/ops/softplus_meta_dispatch.h>
|
249 |
+
#include <ATen/ops/softplus_backward_meta_dispatch.h>
|
250 |
+
#include <ATen/ops/softshrink_meta_dispatch.h>
|
251 |
+
#include <ATen/ops/softshrink_backward_meta_dispatch.h>
|
252 |
+
#include <ATen/ops/sort_meta_dispatch.h>
|
253 |
+
#include <ATen/ops/sparse_resize_meta_dispatch.h>
|
254 |
+
#include <ATen/ops/sparse_resize_and_clear_meta_dispatch.h>
|
255 |
+
#include <ATen/ops/special_airy_ai_meta_dispatch.h>
|
256 |
+
#include <ATen/ops/special_bessel_j0_meta_dispatch.h>
|
257 |
+
#include <ATen/ops/special_bessel_j1_meta_dispatch.h>
|
258 |
+
#include <ATen/ops/special_bessel_y0_meta_dispatch.h>
|
259 |
+
#include <ATen/ops/special_bessel_y1_meta_dispatch.h>
|
260 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_meta_dispatch.h>
|
261 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_meta_dispatch.h>
|
262 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_meta_dispatch.h>
|
263 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_meta_dispatch.h>
|
264 |
+
#include <ATen/ops/special_entr_meta_dispatch.h>
|
265 |
+
#include <ATen/ops/special_erfcx_meta_dispatch.h>
|
266 |
+
#include <ATen/ops/special_hermite_polynomial_h_meta_dispatch.h>
|
267 |
+
#include <ATen/ops/special_hermite_polynomial_he_meta_dispatch.h>
|
268 |
+
#include <ATen/ops/special_i0e_meta_dispatch.h>
|
269 |
+
#include <ATen/ops/special_i1_meta_dispatch.h>
|
270 |
+
#include <ATen/ops/special_i1e_meta_dispatch.h>
|
271 |
+
#include <ATen/ops/special_laguerre_polynomial_l_meta_dispatch.h>
|
272 |
+
#include <ATen/ops/special_legendre_polynomial_p_meta_dispatch.h>
|
273 |
+
#include <ATen/ops/special_log_ndtr_meta_dispatch.h>
|
274 |
+
#include <ATen/ops/special_modified_bessel_i0_meta_dispatch.h>
|
275 |
+
#include <ATen/ops/special_modified_bessel_i1_meta_dispatch.h>
|
276 |
+
#include <ATen/ops/special_modified_bessel_k0_meta_dispatch.h>
|
277 |
+
#include <ATen/ops/special_modified_bessel_k1_meta_dispatch.h>
|
278 |
+
#include <ATen/ops/special_ndtri_meta_dispatch.h>
|
279 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h>
|
280 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_meta_dispatch.h>
|
281 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_meta_dispatch.h>
|
282 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h>
|
283 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_meta_dispatch.h>
|
284 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_meta_dispatch.h>
|
285 |
+
#include <ATen/ops/special_spherical_bessel_j0_meta_dispatch.h>
|
286 |
+
#include <ATen/ops/special_xlog1py_meta_dispatch.h>
|
287 |
+
#include <ATen/ops/special_zeta_meta_dispatch.h>
|
288 |
+
#include <ATen/ops/sqrt_meta_dispatch.h>
|
289 |
+
#include <ATen/ops/sub_meta_dispatch.h>
|
290 |
+
#include <ATen/ops/sum_meta_dispatch.h>
|
291 |
+
#include <ATen/ops/tan_meta_dispatch.h>
|
292 |
+
#include <ATen/ops/tanh_meta_dispatch.h>
|
293 |
+
#include <ATen/ops/tanh_backward_meta_dispatch.h>
|
294 |
+
#include <ATen/ops/threshold_meta_dispatch.h>
|
295 |
+
#include <ATen/ops/threshold_backward_meta_dispatch.h>
|
296 |
+
#include <ATen/ops/topk_meta_dispatch.h>
|
297 |
+
#include <ATen/ops/triangular_solve_meta_dispatch.h>
|
298 |
+
#include <ATen/ops/tril_meta_dispatch.h>
|
299 |
+
#include <ATen/ops/triu_meta_dispatch.h>
|
300 |
+
#include <ATen/ops/trunc_meta_dispatch.h>
|
301 |
+
#include <ATen/ops/unfold_meta_dispatch.h>
|
302 |
+
#include <ATen/ops/uniform_meta_dispatch.h>
|
303 |
+
#include <ATen/ops/upsample_bicubic2d_meta_dispatch.h>
|
304 |
+
#include <ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h>
|
305 |
+
#include <ATen/ops/upsample_bilinear2d_meta_dispatch.h>
|
306 |
+
#include <ATen/ops/upsample_bilinear2d_backward_meta_dispatch.h>
|
307 |
+
#include <ATen/ops/upsample_linear1d_meta_dispatch.h>
|
308 |
+
#include <ATen/ops/upsample_linear1d_backward_meta_dispatch.h>
|
309 |
+
#include <ATen/ops/upsample_nearest1d_meta_dispatch.h>
|
310 |
+
#include <ATen/ops/upsample_nearest1d_backward_meta_dispatch.h>
|
311 |
+
#include <ATen/ops/upsample_nearest2d_meta_dispatch.h>
|
312 |
+
#include <ATen/ops/upsample_nearest2d_backward_meta_dispatch.h>
|
313 |
+
#include <ATen/ops/upsample_nearest3d_meta_dispatch.h>
|
314 |
+
#include <ATen/ops/upsample_nearest3d_backward_meta_dispatch.h>
|
315 |
+
#include <ATen/ops/upsample_trilinear3d_meta_dispatch.h>
|
316 |
+
#include <ATen/ops/upsample_trilinear3d_backward_meta_dispatch.h>
|
317 |
+
#include <ATen/ops/view_meta_dispatch.h>
|
318 |
+
#include <ATen/ops/view_as_complex_meta_dispatch.h>
|
319 |
+
#include <ATen/ops/view_as_real_meta_dispatch.h>
|
320 |
+
#include <ATen/ops/xlogy_meta_dispatch.h>
|
321 |
+
#include <ATen/ops/zero_meta_dispatch.h>
|
322 |
+
|
323 |
+
|
324 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/NamedTensor.h>
|
3 |
+
#include <ATen/TensorNames.h>
|
4 |
+
#include <ATen/WrapDimUtilsMulti.h>
|
5 |
+
|
6 |
+
#include <ATen/core/DimVector.h>
|
7 |
+
#include <ATen/core/Tensor.h>
|
8 |
+
#include <functional>
|
9 |
+
|
10 |
+
namespace at {
|
11 |
+
|
12 |
+
using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
|
13 |
+
|
14 |
+
inline bool has_names(const ITensorListRef& tensors) {
|
15 |
+
return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) {
|
16 |
+
return t.has_names();
|
17 |
+
});
|
18 |
+
}
|
19 |
+
|
20 |
+
// Converts dim to an positional index. Errors if `dim` cannot be used to
|
21 |
+
// refer to any dimension of tensor.
|
22 |
+
TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
|
23 |
+
TORCH_API std::vector<int64_t> dimnames_to_positions(
|
24 |
+
const Tensor& tensor,
|
25 |
+
DimnameList dims);
|
26 |
+
|
27 |
+
// Unifies two DimnameList to produce a third. This is useful for implementing
|
28 |
+
// the named inference rule for binary broadcasting operations like add.
|
29 |
+
//
|
30 |
+
// There are three main constraints:
|
31 |
+
// 1) Check matching: Names must match positionally from the right.
|
32 |
+
// 2) Check misaligned: If a name `n` is in `names`, then it must appear at
|
33 |
+
// the same index from the right in other.
|
34 |
+
// 3) The output names are obtained by unifying the names individually from the
|
35 |
+
// right.
|
36 |
+
TORCH_API std::vector<Dimname> unify_from_right(
|
37 |
+
DimnameList names,
|
38 |
+
DimnameList other,
|
39 |
+
const char* action = "broadcast");
|
40 |
+
|
41 |
+
[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
|
42 |
+
TORCH_CHECK(
|
43 |
+
false,
|
44 |
+
op_name,
|
45 |
+
": You passed a dimname (string) to this op in place of a dimension "
|
46 |
+
"index but it does not yet support this behavior. Please pass a dimension "
|
47 |
+
"index to work around this.");
|
48 |
+
}
|
49 |
+
|
50 |
+
// [NOTE] Writing name inference rules
|
51 |
+
//
|
52 |
+
// Operators that support named tensors are either composed of operations that
|
53 |
+
// support named tensors or implement some name inference rule. An op that
|
54 |
+
// implements its own name inference rule generally looks like the following:
|
55 |
+
//
|
56 |
+
// Tensor op(...) {
|
57 |
+
// perform_shape_checks(...);
|
58 |
+
// # (1)
|
59 |
+
// auto maybe_outnames = compute_outnames(...);
|
60 |
+
// auto result = [&]() {
|
61 |
+
// NoNamesGuard guard;
|
62 |
+
// return op_impl(...);
|
63 |
+
// }();
|
64 |
+
// # (2)
|
65 |
+
// propagate_names_if_nonempty(result, maybe_outnames);
|
66 |
+
//
|
67 |
+
// Each op has (1) a compute outnames step and (2) a propagate names step.
|
68 |
+
//
|
69 |
+
// compute_outnames is responsible for checking that input names match and
|
70 |
+
// determining what the output names should be. It returns either:
|
71 |
+
// - {} (if the inputs tensors are all unnamed)
|
72 |
+
// - non-empty outnames.
|
73 |
+
//
|
74 |
+
// propagate_names_if_nonempty propagates the outnames if they exist to the
|
75 |
+
// result tensors.
|
76 |
+
//
|
77 |
+
// The {} case is an optimization; if the user does not use named tensors they
|
78 |
+
// pay no perf cost for it.
|
79 |
+
|
80 |
+
namespace namedinference {
|
81 |
+
|
82 |
+
const Tensor& propagate_names_if_present_and_nonempty(
|
83 |
+
const Tensor& result,
|
84 |
+
c10::optional<DimnameList> maybe_names,
|
85 |
+
bool validate_names = false);
|
86 |
+
// Propagates `names` to `result` if `names` is not empty.
|
87 |
+
// `names` can be empty; see [NOTE] Writing name inference rules
|
88 |
+
// If `names` is not empty, `names.size()` should equal `result.dim()`.
|
89 |
+
// When in doubt, use this overload instead of the others.
|
90 |
+
TORCH_API const Tensor& propagate_names_if_nonempty(
|
91 |
+
const Tensor& result,
|
92 |
+
DimnameList maybe_names,
|
93 |
+
bool validate_names = false);
|
94 |
+
|
95 |
+
// Propagates `names` to `result`. Only use this if we are certain that there
|
96 |
+
// are names to propagate (that names is not empty).
|
97 |
+
TORCH_API const Tensor& propagate_names(
|
98 |
+
const Tensor& result,
|
99 |
+
DimnameList names,
|
100 |
+
bool validate_names = false);
|
101 |
+
|
102 |
+
// Propagates all names from src to result.
|
103 |
+
TORCH_API void propagate_names(const Tensor& result, const Tensor& src);
|
104 |
+
|
105 |
+
// Propagates all names except for those at the excluded_idxs.
|
106 |
+
TORCH_API void propagate_names_except(
|
107 |
+
const Tensor& result,
|
108 |
+
const Tensor& src,
|
109 |
+
IntArrayRef excluded_idxs);
|
110 |
+
|
111 |
+
// Used for reduction ops that have a `keepdim` arg.
|
112 |
+
TORCH_API void propagate_names_for_reduction(
|
113 |
+
const Tensor& result,
|
114 |
+
const Tensor& src,
|
115 |
+
IntArrayRef excluded_idxs,
|
116 |
+
bool keepdim);
|
117 |
+
|
118 |
+
TORCH_API void propagate_names_for_expand(
|
119 |
+
const Tensor& result,
|
120 |
+
const Tensor& self);
|
121 |
+
|
122 |
+
TORCH_API std::vector<Dimname> compute_cat_outnames(
|
123 |
+
const MaterializedITensorListRef& tensors);
|
124 |
+
|
125 |
+
TORCH_API std::vector<Dimname> compute_broadcast_outnames(
|
126 |
+
const Tensor& self,
|
127 |
+
const Tensor& other);
|
128 |
+
|
129 |
+
TORCH_API std::vector<Dimname> broadcast_to_outnames(
|
130 |
+
const Tensor& tensor,
|
131 |
+
const Tensor& reference_tensor,
|
132 |
+
const char* op_name);
|
133 |
+
|
134 |
+
TORCH_API std::vector<Dimname> compute_matmul_outnames(
|
135 |
+
const Tensor& self,
|
136 |
+
const Tensor& other);
|
137 |
+
|
138 |
+
TORCH_API std::vector<Dimname> compute_cdist_outnames(
|
139 |
+
const Tensor& self,
|
140 |
+
const Tensor& other);
|
141 |
+
|
142 |
+
TORCH_API std::vector<Dimname> compute_bmm_outnames(
|
143 |
+
const Tensor& result,
|
144 |
+
const Tensor& self,
|
145 |
+
const Tensor& other);
|
146 |
+
|
147 |
+
TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
|
148 |
+
TORCH_API std::vector<Dimname> compute_squeeze_outnames(
|
149 |
+
const Tensor& tensor,
|
150 |
+
std::bitset<dim_bitset_size> dims);
|
151 |
+
|
152 |
+
std::vector<Dimname> compute_diagonal_outnames(
|
153 |
+
const Tensor& tensor,
|
154 |
+
int64_t dim1,
|
155 |
+
int64_t dim2);
|
156 |
+
|
157 |
+
// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
|
158 |
+
|
159 |
+
TORCH_API TensorImpl* propagate_names_if_nonempty(
|
160 |
+
TensorImpl* result,
|
161 |
+
DimnameList maybe_names,
|
162 |
+
bool validate_names = false);
|
163 |
+
|
164 |
+
TORCH_API TensorImpl* propagate_names(
|
165 |
+
TensorImpl* result,
|
166 |
+
DimnameList names,
|
167 |
+
bool validate_names = false);
|
168 |
+
|
169 |
+
TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
|
170 |
+
|
171 |
+
TORCH_API inline void propagate_names(
|
172 |
+
const TensorBase& result,
|
173 |
+
DimnameList names,
|
174 |
+
bool validate_names = false) {
|
175 |
+
propagate_names(result.unsafeGetTensorImpl(), names, validate_names);
|
176 |
+
}
|
177 |
+
|
178 |
+
TORCH_API inline void propagate_names_if_nonempty(
|
179 |
+
const TensorBase& result,
|
180 |
+
DimnameList names,
|
181 |
+
bool validate_names = false) {
|
182 |
+
propagate_names_if_nonempty(
|
183 |
+
result.unsafeGetTensorImpl(), names, validate_names);
|
184 |
+
}
|
185 |
+
|
186 |
+
TORCH_API inline void propagate_names(
|
187 |
+
const TensorBase& result,
|
188 |
+
const TensorBase& src) {
|
189 |
+
propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl());
|
190 |
+
}
|
191 |
+
|
192 |
+
// result = m1 @ m2 + bias
|
193 |
+
TORCH_API std::vector<Dimname> propagate_names_for_addmm(
|
194 |
+
const Tensor& m1,
|
195 |
+
const Tensor& m2,
|
196 |
+
const Tensor& bias);
|
197 |
+
|
198 |
+
TORCH_API std::vector<Dimname> propagate_names_for_addmv(
|
199 |
+
const Tensor& mat,
|
200 |
+
const Tensor& vec,
|
201 |
+
const Tensor& bias);
|
202 |
+
|
203 |
+
TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
|
204 |
+
|
205 |
+
TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
|
206 |
+
const Tensor& result,
|
207 |
+
const Tensor& self,
|
208 |
+
const Tensor& other,
|
209 |
+
const Tensor& bias);
|
210 |
+
|
211 |
+
TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
|
212 |
+
|
213 |
+
} // namespace namedinference
|
214 |
+
|
215 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/NativeFunctions.h
ADDED
@@ -0,0 +1,1317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunctions.h
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
|
17 |
+
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
18 |
+
#endif
|
19 |
+
|
20 |
+
#include <c10/core/Scalar.h>
|
21 |
+
#include <c10/core/Storage.h>
|
22 |
+
#include <c10/core/TensorOptions.h>
|
23 |
+
#include <c10/util/Deprecated.h>
|
24 |
+
#include <c10/util/Optional.h>
|
25 |
+
#include <c10/core/QScheme.h>
|
26 |
+
#include <ATen/core/Reduction.h>
|
27 |
+
#include <ATen/core/Tensor.h>
|
28 |
+
#include <tuple>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
|
32 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
|
33 |
+
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
|
34 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
|
35 |
+
#include <ATen/ops/_add_batch_dim_native.h>
|
36 |
+
#include <ATen/ops/_add_relu_native.h>
|
37 |
+
#include <ATen/ops/_addmm_activation_native.h>
|
38 |
+
#include <ATen/ops/_aminmax_native.h>
|
39 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
|
40 |
+
#include <ATen/ops/_amp_update_scale_native.h>
|
41 |
+
#include <ATen/ops/_assert_async_native.h>
|
42 |
+
#include <ATen/ops/_assert_scalar_native.h>
|
43 |
+
#include <ATen/ops/_assert_tensor_metadata_native.h>
|
44 |
+
#include <ATen/ops/_autocast_to_full_precision_native.h>
|
45 |
+
#include <ATen/ops/_autocast_to_reduced_precision_native.h>
|
46 |
+
#include <ATen/ops/_backward_native.h>
|
47 |
+
#include <ATen/ops/_batch_norm_impl_index_native.h>
|
48 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
|
49 |
+
#include <ATen/ops/_cast_Byte_native.h>
|
50 |
+
#include <ATen/ops/_cast_Char_native.h>
|
51 |
+
#include <ATen/ops/_cast_Double_native.h>
|
52 |
+
#include <ATen/ops/_cast_Float_native.h>
|
53 |
+
#include <ATen/ops/_cast_Half_native.h>
|
54 |
+
#include <ATen/ops/_cast_Int_native.h>
|
55 |
+
#include <ATen/ops/_cast_Long_native.h>
|
56 |
+
#include <ATen/ops/_cast_Short_native.h>
|
57 |
+
#include <ATen/ops/_cdist_backward_native.h>
|
58 |
+
#include <ATen/ops/_cdist_forward_native.h>
|
59 |
+
#include <ATen/ops/_cholesky_solve_helper_native.h>
|
60 |
+
#include <ATen/ops/_choose_qparams_per_tensor_native.h>
|
61 |
+
#include <ATen/ops/_chunk_cat_native.h>
|
62 |
+
#include <ATen/ops/_coalesce_native.h>
|
63 |
+
#include <ATen/ops/_coalesced_native.h>
|
64 |
+
#include <ATen/ops/_compute_linear_combination_native.h>
|
65 |
+
#include <ATen/ops/_conj_native.h>
|
66 |
+
#include <ATen/ops/_conj_copy_native.h>
|
67 |
+
#include <ATen/ops/_conj_physical_native.h>
|
68 |
+
#include <ATen/ops/_conv_depthwise2d_native.h>
|
69 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
|
70 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
|
71 |
+
#include <ATen/ops/_convert_weight_to_int4pack_native.h>
|
72 |
+
#include <ATen/ops/_convolution_native.h>
|
73 |
+
#include <ATen/ops/_convolution_double_backward_native.h>
|
74 |
+
#include <ATen/ops/_convolution_mode_native.h>
|
75 |
+
#include <ATen/ops/_copy_from_native.h>
|
76 |
+
#include <ATen/ops/_copy_from_and_resize_native.h>
|
77 |
+
#include <ATen/ops/_cslt_compress_native.h>
|
78 |
+
#include <ATen/ops/_cslt_sparse_mm_native.h>
|
79 |
+
#include <ATen/ops/_cslt_sparse_mm_search_native.h>
|
80 |
+
#include <ATen/ops/_ctc_loss_native.h>
|
81 |
+
#include <ATen/ops/_ctc_loss_backward_native.h>
|
82 |
+
#include <ATen/ops/_cudnn_ctc_loss_native.h>
|
83 |
+
#include <ATen/ops/_cudnn_init_dropout_state_native.h>
|
84 |
+
#include <ATen/ops/_cudnn_rnn_native.h>
|
85 |
+
#include <ATen/ops/_cudnn_rnn_backward_native.h>
|
86 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
|
87 |
+
#include <ATen/ops/_cufft_clear_plan_cache_native.h>
|
88 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_native.h>
|
89 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_native.h>
|
90 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_native.h>
|
91 |
+
#include <ATen/ops/_cummax_helper_native.h>
|
92 |
+
#include <ATen/ops/_cummin_helper_native.h>
|
93 |
+
#include <ATen/ops/_debug_has_internal_overlap_native.h>
|
94 |
+
#include <ATen/ops/_dimI_native.h>
|
95 |
+
#include <ATen/ops/_dimV_native.h>
|
96 |
+
#include <ATen/ops/_dim_arange_native.h>
|
97 |
+
#include <ATen/ops/_dirichlet_grad_native.h>
|
98 |
+
#include <ATen/ops/_efficient_attention_backward_native.h>
|
99 |
+
#include <ATen/ops/_efficient_attention_forward_native.h>
|
100 |
+
#include <ATen/ops/_efficientzerotensor_native.h>
|
101 |
+
#include <ATen/ops/_embedding_bag_native.h>
|
102 |
+
#include <ATen/ops/_embedding_bag_backward_native.h>
|
103 |
+
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
|
104 |
+
#include <ATen/ops/_embedding_bag_forward_only_native.h>
|
105 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
|
106 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_native.h>
|
107 |
+
#include <ATen/ops/_empty_affine_quantized_native.h>
|
108 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
|
109 |
+
#include <ATen/ops/_euclidean_dist_native.h>
|
110 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
|
111 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
|
112 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
|
113 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
|
114 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
|
115 |
+
#include <ATen/ops/_fft_c2c_native.h>
|
116 |
+
#include <ATen/ops/_fft_c2r_native.h>
|
117 |
+
#include <ATen/ops/_fft_r2c_native.h>
|
118 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_native.h>
|
119 |
+
#include <ATen/ops/_flash_attention_backward_native.h>
|
120 |
+
#include <ATen/ops/_flash_attention_forward_native.h>
|
121 |
+
#include <ATen/ops/_foobar_native.h>
|
122 |
+
#include <ATen/ops/_foreach_abs_native.h>
|
123 |
+
#include <ATen/ops/_foreach_acos_native.h>
|
124 |
+
#include <ATen/ops/_foreach_add_native.h>
|
125 |
+
#include <ATen/ops/_foreach_addcdiv_native.h>
|
126 |
+
#include <ATen/ops/_foreach_addcmul_native.h>
|
127 |
+
#include <ATen/ops/_foreach_asin_native.h>
|
128 |
+
#include <ATen/ops/_foreach_atan_native.h>
|
129 |
+
#include <ATen/ops/_foreach_ceil_native.h>
|
130 |
+
#include <ATen/ops/_foreach_clamp_max_native.h>
|
131 |
+
#include <ATen/ops/_foreach_clamp_min_native.h>
|
132 |
+
#include <ATen/ops/_foreach_copy_native.h>
|
133 |
+
#include <ATen/ops/_foreach_cos_native.h>
|
134 |
+
#include <ATen/ops/_foreach_cosh_native.h>
|
135 |
+
#include <ATen/ops/_foreach_div_native.h>
|
136 |
+
#include <ATen/ops/_foreach_erf_native.h>
|
137 |
+
#include <ATen/ops/_foreach_erfc_native.h>
|
138 |
+
#include <ATen/ops/_foreach_exp_native.h>
|
139 |
+
#include <ATen/ops/_foreach_expm1_native.h>
|
140 |
+
#include <ATen/ops/_foreach_floor_native.h>
|
141 |
+
#include <ATen/ops/_foreach_frac_native.h>
|
142 |
+
#include <ATen/ops/_foreach_lerp_native.h>
|
143 |
+
#include <ATen/ops/_foreach_lgamma_native.h>
|
144 |
+
#include <ATen/ops/_foreach_log_native.h>
|
145 |
+
#include <ATen/ops/_foreach_log10_native.h>
|
146 |
+
#include <ATen/ops/_foreach_log1p_native.h>
|
147 |
+
#include <ATen/ops/_foreach_log2_native.h>
|
148 |
+
#include <ATen/ops/_foreach_maximum_native.h>
|
149 |
+
#include <ATen/ops/_foreach_minimum_native.h>
|
150 |
+
#include <ATen/ops/_foreach_mul_native.h>
|
151 |
+
#include <ATen/ops/_foreach_neg_native.h>
|
152 |
+
#include <ATen/ops/_foreach_norm_native.h>
|
153 |
+
#include <ATen/ops/_foreach_pow_native.h>
|
154 |
+
#include <ATen/ops/_foreach_reciprocal_native.h>
|
155 |
+
#include <ATen/ops/_foreach_round_native.h>
|
156 |
+
#include <ATen/ops/_foreach_sigmoid_native.h>
|
157 |
+
#include <ATen/ops/_foreach_sign_native.h>
|
158 |
+
#include <ATen/ops/_foreach_sin_native.h>
|
159 |
+
#include <ATen/ops/_foreach_sinh_native.h>
|
160 |
+
#include <ATen/ops/_foreach_sqrt_native.h>
|
161 |
+
#include <ATen/ops/_foreach_sub_native.h>
|
162 |
+
#include <ATen/ops/_foreach_tan_native.h>
|
163 |
+
#include <ATen/ops/_foreach_tanh_native.h>
|
164 |
+
#include <ATen/ops/_foreach_trunc_native.h>
|
165 |
+
#include <ATen/ops/_foreach_zero_native.h>
|
166 |
+
#include <ATen/ops/_functional_assert_async_native.h>
|
167 |
+
#include <ATen/ops/_functional_assert_scalar_native.h>
|
168 |
+
#include <ATen/ops/_functional_sym_constrain_range_native.h>
|
169 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_native.h>
|
170 |
+
#include <ATen/ops/_fused_adam_native.h>
|
171 |
+
#include <ATen/ops/_fused_adamw_native.h>
|
172 |
+
#include <ATen/ops/_fused_dropout_native.h>
|
173 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
|
174 |
+
#include <ATen/ops/_fused_sdp_choice_native.h>
|
175 |
+
#include <ATen/ops/_fused_sgd_native.h>
|
176 |
+
#include <ATen/ops/_fw_primal_native.h>
|
177 |
+
#include <ATen/ops/_fw_primal_copy_native.h>
|
178 |
+
#include <ATen/ops/_gather_sparse_backward_native.h>
|
179 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h>
|
180 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h>
|
181 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_native.h>
|
182 |
+
#include <ATen/ops/_has_same_storage_numel_native.h>
|
183 |
+
#include <ATen/ops/_histogramdd_bin_edges_native.h>
|
184 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
|
185 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
|
186 |
+
#include <ATen/ops/_index_put_impl_native.h>
|
187 |
+
#include <ATen/ops/_indices_native.h>
|
188 |
+
#include <ATen/ops/_indices_copy_native.h>
|
189 |
+
#include <ATen/ops/_int_mm_native.h>
|
190 |
+
#include <ATen/ops/_is_all_true_native.h>
|
191 |
+
#include <ATen/ops/_is_any_true_native.h>
|
192 |
+
#include <ATen/ops/_is_zerotensor_native.h>
|
193 |
+
#include <ATen/ops/_lazy_clone_native.h>
|
194 |
+
#include <ATen/ops/_linalg_check_errors_native.h>
|
195 |
+
#include <ATen/ops/_linalg_det_native.h>
|
196 |
+
#include <ATen/ops/_linalg_eigh_native.h>
|
197 |
+
#include <ATen/ops/_linalg_eigvals_native.h>
|
198 |
+
#include <ATen/ops/_linalg_slogdet_native.h>
|
199 |
+
#include <ATen/ops/_linalg_solve_ex_native.h>
|
200 |
+
#include <ATen/ops/_linalg_svd_native.h>
|
201 |
+
#include <ATen/ops/_local_scalar_dense_native.h>
|
202 |
+
#include <ATen/ops/_log_softmax_native.h>
|
203 |
+
#include <ATen/ops/_log_softmax_backward_data_native.h>
|
204 |
+
#include <ATen/ops/_logcumsumexp_native.h>
|
205 |
+
#include <ATen/ops/_lstm_mps_native.h>
|
206 |
+
#include <ATen/ops/_lu_with_info_native.h>
|
207 |
+
#include <ATen/ops/_make_dep_token_native.h>
|
208 |
+
#include <ATen/ops/_make_dual_native.h>
|
209 |
+
#include <ATen/ops/_make_dual_copy_native.h>
|
210 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
|
211 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
|
212 |
+
#include <ATen/ops/_masked_scale_native.h>
|
213 |
+
#include <ATen/ops/_masked_softmax_native.h>
|
214 |
+
#include <ATen/ops/_masked_softmax_backward_native.h>
|
215 |
+
#include <ATen/ops/_mixed_dtypes_linear_native.h>
|
216 |
+
#include <ATen/ops/_mkldnn_reshape_native.h>
|
217 |
+
#include <ATen/ops/_mkldnn_transpose_native.h>
|
218 |
+
#include <ATen/ops/_mps_convolution_native.h>
|
219 |
+
#include <ATen/ops/_mps_convolution_transpose_native.h>
|
220 |
+
#include <ATen/ops/_native_batch_norm_legit_native.h>
|
221 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_native.h>
|
222 |
+
#include <ATen/ops/_native_multi_head_attention_native.h>
|
223 |
+
#include <ATen/ops/_neg_view_native.h>
|
224 |
+
#include <ATen/ops/_neg_view_copy_native.h>
|
225 |
+
#include <ATen/ops/_nested_from_padded_native.h>
|
226 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_native.h>
|
227 |
+
#include <ATen/ops/_nested_get_jagged_dummy_native.h>
|
228 |
+
#include <ATen/ops/_nested_get_lengths_native.h>
|
229 |
+
#include <ATen/ops/_nested_get_offsets_native.h>
|
230 |
+
#include <ATen/ops/_nested_get_ragged_idx_native.h>
|
231 |
+
#include <ATen/ops/_nested_get_values_native.h>
|
232 |
+
#include <ATen/ops/_nested_get_values_copy_native.h>
|
233 |
+
#include <ATen/ops/_nested_select_backward_native.h>
|
234 |
+
#include <ATen/ops/_nested_sum_backward_native.h>
|
235 |
+
#include <ATen/ops/_nested_tensor_from_mask_native.h>
|
236 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
|
237 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_native.h>
|
238 |
+
#include <ATen/ops/_nested_tensor_size_native.h>
|
239 |
+
#include <ATen/ops/_nested_tensor_softmax_with_shape_native.h>
|
240 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_native.h>
|
241 |
+
#include <ATen/ops/_nested_tensor_strides_native.h>
|
242 |
+
#include <ATen/ops/_nested_view_from_buffer_native.h>
|
243 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
|
244 |
+
#include <ATen/ops/_nested_view_from_jagged_native.h>
|
245 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_native.h>
|
246 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_native.h>
|
247 |
+
#include <ATen/ops/_nnpack_available_native.h>
|
248 |
+
#include <ATen/ops/_nnpack_spatial_convolution_native.h>
|
249 |
+
#include <ATen/ops/_nnz_native.h>
|
250 |
+
#include <ATen/ops/_pack_padded_sequence_native.h>
|
251 |
+
#include <ATen/ops/_pack_padded_sequence_backward_native.h>
|
252 |
+
#include <ATen/ops/_pad_circular_native.h>
|
253 |
+
#include <ATen/ops/_pad_enum_native.h>
|
254 |
+
#include <ATen/ops/_pad_packed_sequence_native.h>
|
255 |
+
#include <ATen/ops/_pdist_backward_native.h>
|
256 |
+
#include <ATen/ops/_pdist_forward_native.h>
|
257 |
+
#include <ATen/ops/_pin_memory_native.h>
|
258 |
+
#include <ATen/ops/_prelu_kernel_native.h>
|
259 |
+
#include <ATen/ops/_prelu_kernel_backward_native.h>
|
260 |
+
#include <ATen/ops/_print_native.h>
|
261 |
+
#include <ATen/ops/_propagate_xla_data_native.h>
|
262 |
+
#include <ATen/ops/_remove_batch_dim_native.h>
|
263 |
+
#include <ATen/ops/_reshape_alias_native.h>
|
264 |
+
#include <ATen/ops/_reshape_alias_copy_native.h>
|
265 |
+
#include <ATen/ops/_reshape_copy_native.h>
|
266 |
+
#include <ATen/ops/_reshape_from_tensor_native.h>
|
267 |
+
#include <ATen/ops/_resize_output_native.h>
|
268 |
+
#include <ATen/ops/_rowwise_prune_native.h>
|
269 |
+
#include <ATen/ops/_sample_dirichlet_native.h>
|
270 |
+
#include <ATen/ops/_saturate_weight_to_fp16_native.h>
|
271 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_native.h>
|
272 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_native.h>
|
273 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_native.h>
|
274 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_native.h>
|
275 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_native.h>
|
276 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_native.h>
|
277 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h>
|
278 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h>
|
279 |
+
#include <ATen/ops/_scaled_mm_native.h>
|
280 |
+
#include <ATen/ops/_segment_reduce_backward_native.h>
|
281 |
+
#include <ATen/ops/_shape_as_tensor_native.h>
|
282 |
+
#include <ATen/ops/_slow_conv2d_backward_native.h>
|
283 |
+
#include <ATen/ops/_slow_conv2d_forward_native.h>
|
284 |
+
#include <ATen/ops/_sobol_engine_draw_native.h>
|
285 |
+
#include <ATen/ops/_sobol_engine_ff_native.h>
|
286 |
+
#include <ATen/ops/_sobol_engine_initialize_state_native.h>
|
287 |
+
#include <ATen/ops/_sobol_engine_scramble_native.h>
|
288 |
+
#include <ATen/ops/_softmax_native.h>
|
289 |
+
#include <ATen/ops/_softmax_backward_data_native.h>
|
290 |
+
#include <ATen/ops/_sparse_addmm_native.h>
|
291 |
+
#include <ATen/ops/_sparse_broadcast_to_native.h>
|
292 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
|
293 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h>
|
294 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h>
|
295 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h>
|
296 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
|
297 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
|
298 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
|
299 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_native.h>
|
300 |
+
#include <ATen/ops/_sparse_csr_prod_native.h>
|
301 |
+
#include <ATen/ops/_sparse_csr_sum_native.h>
|
302 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_native.h>
|
303 |
+
#include <ATen/ops/_sparse_log_softmax_native.h>
|
304 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
|
305 |
+
#include <ATen/ops/_sparse_mask_projection_native.h>
|
306 |
+
#include <ATen/ops/_sparse_mm_native.h>
|
307 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
|
308 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h>
|
309 |
+
#include <ATen/ops/_sparse_semi_structured_linear_native.h>
|
310 |
+
#include <ATen/ops/_sparse_softmax_native.h>
|
311 |
+
#include <ATen/ops/_sparse_softmax_backward_data_native.h>
|
312 |
+
#include <ATen/ops/_sparse_sparse_matmul_native.h>
|
313 |
+
#include <ATen/ops/_sparse_sum_native.h>
|
314 |
+
#include <ATen/ops/_sparse_sum_backward_native.h>
|
315 |
+
#include <ATen/ops/_spdiags_native.h>
|
316 |
+
#include <ATen/ops/_stack_native.h>
|
317 |
+
#include <ATen/ops/_standard_gamma_native.h>
|
318 |
+
#include <ATen/ops/_standard_gamma_grad_native.h>
|
319 |
+
#include <ATen/ops/_test_ambiguous_defaults_native.h>
|
320 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
|
321 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h>
|
322 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
|
323 |
+
#include <ATen/ops/_test_check_tensor_native.h>
|
324 |
+
#include <ATen/ops/_test_functorch_fallback_native.h>
|
325 |
+
#include <ATen/ops/_test_optional_filled_intlist_native.h>
|
326 |
+
#include <ATen/ops/_test_optional_floatlist_native.h>
|
327 |
+
#include <ATen/ops/_test_optional_intlist_native.h>
|
328 |
+
#include <ATen/ops/_test_parallel_materialize_native.h>
|
329 |
+
#include <ATen/ops/_test_serialization_subcmul_native.h>
|
330 |
+
#include <ATen/ops/_test_string_default_native.h>
|
331 |
+
#include <ATen/ops/_test_warn_in_autograd_native.h>
|
332 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h>
|
333 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h>
|
334 |
+
#include <ATen/ops/_thnn_fused_gru_cell_native.h>
|
335 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_native.h>
|
336 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
|
337 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h>
|
338 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
|
339 |
+
#include <ATen/ops/_to_copy_native.h>
|
340 |
+
#include <ATen/ops/_to_cpu_native.h>
|
341 |
+
#include <ATen/ops/_to_dense_native.h>
|
342 |
+
#include <ATen/ops/_to_sparse_native.h>
|
343 |
+
#include <ATen/ops/_to_sparse_bsc_native.h>
|
344 |
+
#include <ATen/ops/_to_sparse_bsr_native.h>
|
345 |
+
#include <ATen/ops/_to_sparse_csc_native.h>
|
346 |
+
#include <ATen/ops/_to_sparse_csr_native.h>
|
347 |
+
#include <ATen/ops/_to_sparse_semi_structured_native.h>
|
348 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
|
349 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
|
350 |
+
#include <ATen/ops/_trilinear_native.h>
|
351 |
+
#include <ATen/ops/_triton_multi_head_attention_native.h>
|
352 |
+
#include <ATen/ops/_triton_scaled_dot_attention_native.h>
|
353 |
+
#include <ATen/ops/_unique_native.h>
|
354 |
+
#include <ATen/ops/_unique2_native.h>
|
355 |
+
#include <ATen/ops/_unpack_dual_native.h>
|
356 |
+
#include <ATen/ops/_unsafe_index_native.h>
|
357 |
+
#include <ATen/ops/_unsafe_index_put_native.h>
|
358 |
+
#include <ATen/ops/_unsafe_view_native.h>
|
359 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
|
360 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
|
361 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
|
362 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
|
363 |
+
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
|
364 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
|
365 |
+
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
|
366 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
|
367 |
+
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
|
368 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
|
369 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
|
370 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h>
|
371 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
|
372 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h>
|
373 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h>
|
374 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h>
|
375 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_native.h>
|
376 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_native.h>
|
377 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_native.h>
|
378 |
+
#include <ATen/ops/_values_native.h>
|
379 |
+
#include <ATen/ops/_values_copy_native.h>
|
380 |
+
#include <ATen/ops/_version_native.h>
|
381 |
+
#include <ATen/ops/_weight_int4pack_mm_native.h>
|
382 |
+
#include <ATen/ops/_weight_int8pack_mm_native.h>
|
383 |
+
#include <ATen/ops/_weight_norm_native.h>
|
384 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_native.h>
|
385 |
+
#include <ATen/ops/_weight_norm_interface_native.h>
|
386 |
+
#include <ATen/ops/_weight_norm_interface_backward_native.h>
|
387 |
+
#include <ATen/ops/abs_native.h>
|
388 |
+
#include <ATen/ops/absolute_native.h>
|
389 |
+
#include <ATen/ops/acos_native.h>
|
390 |
+
#include <ATen/ops/acosh_native.h>
|
391 |
+
#include <ATen/ops/adaptive_avg_pool1d_native.h>
|
392 |
+
#include <ATen/ops/adaptive_avg_pool2d_native.h>
|
393 |
+
#include <ATen/ops/adaptive_avg_pool3d_native.h>
|
394 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
|
395 |
+
#include <ATen/ops/adaptive_max_pool1d_native.h>
|
396 |
+
#include <ATen/ops/adaptive_max_pool2d_native.h>
|
397 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
|
398 |
+
#include <ATen/ops/adaptive_max_pool3d_native.h>
|
399 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
|
400 |
+
#include <ATen/ops/add_native.h>
|
401 |
+
#include <ATen/ops/addbmm_native.h>
|
402 |
+
#include <ATen/ops/addcdiv_native.h>
|
403 |
+
#include <ATen/ops/addcmul_native.h>
|
404 |
+
#include <ATen/ops/addmm_native.h>
|
405 |
+
#include <ATen/ops/addmv_native.h>
|
406 |
+
#include <ATen/ops/addr_native.h>
|
407 |
+
#include <ATen/ops/adjoint_native.h>
|
408 |
+
#include <ATen/ops/affine_grid_generator_native.h>
|
409 |
+
#include <ATen/ops/affine_grid_generator_backward_native.h>
|
410 |
+
#include <ATen/ops/alias_native.h>
|
411 |
+
#include <ATen/ops/alias_copy_native.h>
|
412 |
+
#include <ATen/ops/align_as_native.h>
|
413 |
+
#include <ATen/ops/align_tensors_native.h>
|
414 |
+
#include <ATen/ops/align_to_native.h>
|
415 |
+
#include <ATen/ops/all_native.h>
|
416 |
+
#include <ATen/ops/allclose_native.h>
|
417 |
+
#include <ATen/ops/alpha_dropout_native.h>
|
418 |
+
#include <ATen/ops/amax_native.h>
|
419 |
+
#include <ATen/ops/amin_native.h>
|
420 |
+
#include <ATen/ops/aminmax_native.h>
|
421 |
+
#include <ATen/ops/and_native.h>
|
422 |
+
#include <ATen/ops/angle_native.h>
|
423 |
+
#include <ATen/ops/any_native.h>
|
424 |
+
#include <ATen/ops/arange_native.h>
|
425 |
+
#include <ATen/ops/arccos_native.h>
|
426 |
+
#include <ATen/ops/arccosh_native.h>
|
427 |
+
#include <ATen/ops/arcsin_native.h>
|
428 |
+
#include <ATen/ops/arcsinh_native.h>
|
429 |
+
#include <ATen/ops/arctan_native.h>
|
430 |
+
#include <ATen/ops/arctan2_native.h>
|
431 |
+
#include <ATen/ops/arctanh_native.h>
|
432 |
+
#include <ATen/ops/argmax_native.h>
|
433 |
+
#include <ATen/ops/argmin_native.h>
|
434 |
+
#include <ATen/ops/argsort_native.h>
|
435 |
+
#include <ATen/ops/argwhere_native.h>
|
436 |
+
#include <ATen/ops/as_strided_native.h>
|
437 |
+
#include <ATen/ops/as_strided_copy_native.h>
|
438 |
+
#include <ATen/ops/as_strided_scatter_native.h>
|
439 |
+
#include <ATen/ops/asin_native.h>
|
440 |
+
#include <ATen/ops/asinh_native.h>
|
441 |
+
#include <ATen/ops/atan_native.h>
|
442 |
+
#include <ATen/ops/atan2_native.h>
|
443 |
+
#include <ATen/ops/atanh_native.h>
|
444 |
+
#include <ATen/ops/atleast_1d_native.h>
|
445 |
+
#include <ATen/ops/atleast_2d_native.h>
|
446 |
+
#include <ATen/ops/atleast_3d_native.h>
|
447 |
+
#include <ATen/ops/avg_pool1d_native.h>
|
448 |
+
#include <ATen/ops/avg_pool2d_native.h>
|
449 |
+
#include <ATen/ops/avg_pool2d_backward_native.h>
|
450 |
+
#include <ATen/ops/avg_pool3d_native.h>
|
451 |
+
#include <ATen/ops/avg_pool3d_backward_native.h>
|
452 |
+
#include <ATen/ops/baddbmm_native.h>
|
453 |
+
#include <ATen/ops/bartlett_window_native.h>
|
454 |
+
#include <ATen/ops/batch_norm_native.h>
|
455 |
+
#include <ATen/ops/batch_norm_backward_elemt_native.h>
|
456 |
+
#include <ATen/ops/batch_norm_backward_reduce_native.h>
|
457 |
+
#include <ATen/ops/batch_norm_elemt_native.h>
|
458 |
+
#include <ATen/ops/batch_norm_gather_stats_native.h>
|
459 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
|
460 |
+
#include <ATen/ops/batch_norm_stats_native.h>
|
461 |
+
#include <ATen/ops/batch_norm_update_stats_native.h>
|
462 |
+
#include <ATen/ops/bernoulli_native.h>
|
463 |
+
#include <ATen/ops/bilinear_native.h>
|
464 |
+
#include <ATen/ops/binary_cross_entropy_native.h>
|
465 |
+
#include <ATen/ops/binary_cross_entropy_backward_native.h>
|
466 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
|
467 |
+
#include <ATen/ops/bincount_native.h>
|
468 |
+
#include <ATen/ops/binomial_native.h>
|
469 |
+
#include <ATen/ops/bitwise_and_native.h>
|
470 |
+
#include <ATen/ops/bitwise_left_shift_native.h>
|
471 |
+
#include <ATen/ops/bitwise_not_native.h>
|
472 |
+
#include <ATen/ops/bitwise_or_native.h>
|
473 |
+
#include <ATen/ops/bitwise_right_shift_native.h>
|
474 |
+
#include <ATen/ops/bitwise_xor_native.h>
|
475 |
+
#include <ATen/ops/blackman_window_native.h>
|
476 |
+
#include <ATen/ops/block_diag_native.h>
|
477 |
+
#include <ATen/ops/bmm_native.h>
|
478 |
+
#include <ATen/ops/broadcast_tensors_native.h>
|
479 |
+
#include <ATen/ops/broadcast_to_native.h>
|
480 |
+
#include <ATen/ops/bucketize_native.h>
|
481 |
+
#include <ATen/ops/can_cast_native.h>
|
482 |
+
#include <ATen/ops/cartesian_prod_native.h>
|
483 |
+
#include <ATen/ops/cat_native.h>
|
484 |
+
#include <ATen/ops/cauchy_native.h>
|
485 |
+
#include <ATen/ops/ccol_indices_native.h>
|
486 |
+
#include <ATen/ops/ccol_indices_copy_native.h>
|
487 |
+
#include <ATen/ops/cdist_native.h>
|
488 |
+
#include <ATen/ops/ceil_native.h>
|
489 |
+
#include <ATen/ops/celu_native.h>
|
490 |
+
#include <ATen/ops/chain_matmul_native.h>
|
491 |
+
#include <ATen/ops/chalf_native.h>
|
492 |
+
#include <ATen/ops/channel_shuffle_native.h>
|
493 |
+
#include <ATen/ops/cholesky_native.h>
|
494 |
+
#include <ATen/ops/cholesky_inverse_native.h>
|
495 |
+
#include <ATen/ops/cholesky_solve_native.h>
|
496 |
+
#include <ATen/ops/choose_qparams_optimized_native.h>
|
497 |
+
#include <ATen/ops/chunk_native.h>
|
498 |
+
#include <ATen/ops/clamp_native.h>
|
499 |
+
#include <ATen/ops/clamp_max_native.h>
|
500 |
+
#include <ATen/ops/clamp_min_native.h>
|
501 |
+
#include <ATen/ops/clip_native.h>
|
502 |
+
#include <ATen/ops/clone_native.h>
|
503 |
+
#include <ATen/ops/coalesce_native.h>
|
504 |
+
#include <ATen/ops/col2im_native.h>
|
505 |
+
#include <ATen/ops/col_indices_native.h>
|
506 |
+
#include <ATen/ops/col_indices_copy_native.h>
|
507 |
+
#include <ATen/ops/column_stack_native.h>
|
508 |
+
#include <ATen/ops/combinations_native.h>
|
509 |
+
#include <ATen/ops/complex_native.h>
|
510 |
+
#include <ATen/ops/concat_native.h>
|
511 |
+
#include <ATen/ops/concatenate_native.h>
|
512 |
+
#include <ATen/ops/conj_native.h>
|
513 |
+
#include <ATen/ops/conj_physical_native.h>
|
514 |
+
#include <ATen/ops/constant_pad_nd_native.h>
|
515 |
+
#include <ATen/ops/contiguous_native.h>
|
516 |
+
#include <ATen/ops/conv1d_native.h>
|
517 |
+
#include <ATen/ops/conv2d_native.h>
|
518 |
+
#include <ATen/ops/conv3d_native.h>
|
519 |
+
#include <ATen/ops/conv_depthwise3d_native.h>
|
520 |
+
#include <ATen/ops/conv_tbc_native.h>
|
521 |
+
#include <ATen/ops/conv_tbc_backward_native.h>
|
522 |
+
#include <ATen/ops/conv_transpose1d_native.h>
|
523 |
+
#include <ATen/ops/conv_transpose2d_native.h>
|
524 |
+
#include <ATen/ops/conv_transpose3d_native.h>
|
525 |
+
#include <ATen/ops/convolution_native.h>
|
526 |
+
#include <ATen/ops/convolution_backward_native.h>
|
527 |
+
#include <ATen/ops/convolution_backward_overrideable_native.h>
|
528 |
+
#include <ATen/ops/convolution_overrideable_native.h>
|
529 |
+
#include <ATen/ops/copy_native.h>
|
530 |
+
#include <ATen/ops/copy_sparse_to_sparse_native.h>
|
531 |
+
#include <ATen/ops/copysign_native.h>
|
532 |
+
#include <ATen/ops/corrcoef_native.h>
|
533 |
+
#include <ATen/ops/cos_native.h>
|
534 |
+
#include <ATen/ops/cosh_native.h>
|
535 |
+
#include <ATen/ops/cosine_embedding_loss_native.h>
|
536 |
+
#include <ATen/ops/cosine_similarity_native.h>
|
537 |
+
#include <ATen/ops/count_nonzero_native.h>
|
538 |
+
#include <ATen/ops/cov_native.h>
|
539 |
+
#include <ATen/ops/cross_native.h>
|
540 |
+
#include <ATen/ops/cross_entropy_loss_native.h>
|
541 |
+
#include <ATen/ops/crow_indices_native.h>
|
542 |
+
#include <ATen/ops/crow_indices_copy_native.h>
|
543 |
+
#include <ATen/ops/ctc_loss_native.h>
|
544 |
+
#include <ATen/ops/cudnn_affine_grid_generator_native.h>
|
545 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_native.h>
|
546 |
+
#include <ATen/ops/cudnn_batch_norm_native.h>
|
547 |
+
#include <ATen/ops/cudnn_batch_norm_backward_native.h>
|
548 |
+
#include <ATen/ops/cudnn_convolution_native.h>
|
549 |
+
#include <ATen/ops/cudnn_convolution_add_relu_native.h>
|
550 |
+
#include <ATen/ops/cudnn_convolution_relu_native.h>
|
551 |
+
#include <ATen/ops/cudnn_convolution_transpose_native.h>
|
552 |
+
#include <ATen/ops/cudnn_grid_sampler_native.h>
|
553 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_native.h>
|
554 |
+
#include <ATen/ops/cudnn_is_acceptable_native.h>
|
555 |
+
#include <ATen/ops/cummax_native.h>
|
556 |
+
#include <ATen/ops/cummaxmin_backward_native.h>
|
557 |
+
#include <ATen/ops/cummin_native.h>
|
558 |
+
#include <ATen/ops/cumprod_native.h>
|
559 |
+
#include <ATen/ops/cumprod_backward_native.h>
|
560 |
+
#include <ATen/ops/cumsum_native.h>
|
561 |
+
#include <ATen/ops/cumulative_trapezoid_native.h>
|
562 |
+
#include <ATen/ops/data_native.h>
|
563 |
+
#include <ATen/ops/deg2rad_native.h>
|
564 |
+
#include <ATen/ops/dense_dim_native.h>
|
565 |
+
#include <ATen/ops/dequantize_native.h>
|
566 |
+
#include <ATen/ops/det_native.h>
|
567 |
+
#include <ATen/ops/detach_native.h>
|
568 |
+
#include <ATen/ops/detach_copy_native.h>
|
569 |
+
#include <ATen/ops/diag_native.h>
|
570 |
+
#include <ATen/ops/diag_embed_native.h>
|
571 |
+
#include <ATen/ops/diagflat_native.h>
|
572 |
+
#include <ATen/ops/diagonal_native.h>
|
573 |
+
#include <ATen/ops/diagonal_backward_native.h>
|
574 |
+
#include <ATen/ops/diagonal_copy_native.h>
|
575 |
+
#include <ATen/ops/diagonal_scatter_native.h>
|
576 |
+
#include <ATen/ops/diff_native.h>
|
577 |
+
#include <ATen/ops/digamma_native.h>
|
578 |
+
#include <ATen/ops/dist_native.h>
|
579 |
+
#include <ATen/ops/div_native.h>
|
580 |
+
#include <ATen/ops/divide_native.h>
|
581 |
+
#include <ATen/ops/dot_native.h>
|
582 |
+
#include <ATen/ops/dropout_native.h>
|
583 |
+
#include <ATen/ops/dsplit_native.h>
|
584 |
+
#include <ATen/ops/dstack_native.h>
|
585 |
+
#include <ATen/ops/einsum_native.h>
|
586 |
+
#include <ATen/ops/elu_native.h>
|
587 |
+
#include <ATen/ops/elu_backward_native.h>
|
588 |
+
#include <ATen/ops/embedding_native.h>
|
589 |
+
#include <ATen/ops/embedding_backward_native.h>
|
590 |
+
#include <ATen/ops/embedding_bag_native.h>
|
591 |
+
#include <ATen/ops/embedding_dense_backward_native.h>
|
592 |
+
#include <ATen/ops/embedding_renorm_native.h>
|
593 |
+
#include <ATen/ops/embedding_sparse_backward_native.h>
|
594 |
+
#include <ATen/ops/empty_native.h>
|
595 |
+
#include <ATen/ops/empty_like_native.h>
|
596 |
+
#include <ATen/ops/empty_permuted_native.h>
|
597 |
+
#include <ATen/ops/empty_quantized_native.h>
|
598 |
+
#include <ATen/ops/empty_strided_native.h>
|
599 |
+
#include <ATen/ops/eq_native.h>
|
600 |
+
#include <ATen/ops/equal_native.h>
|
601 |
+
#include <ATen/ops/erf_native.h>
|
602 |
+
#include <ATen/ops/erfc_native.h>
|
603 |
+
#include <ATen/ops/erfinv_native.h>
|
604 |
+
#include <ATen/ops/exp_native.h>
|
605 |
+
#include <ATen/ops/exp2_native.h>
|
606 |
+
#include <ATen/ops/expand_native.h>
|
607 |
+
#include <ATen/ops/expand_as_native.h>
|
608 |
+
#include <ATen/ops/expand_copy_native.h>
|
609 |
+
#include <ATen/ops/expm1_native.h>
|
610 |
+
#include <ATen/ops/exponential_native.h>
|
611 |
+
#include <ATen/ops/eye_native.h>
|
612 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_native.h>
|
613 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
|
614 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h>
|
615 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_native.h>
|
616 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
|
617 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h>
|
618 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_native.h>
|
619 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h>
|
620 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_native.h>
|
621 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h>
|
622 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_native.h>
|
623 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h>
|
624 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_native.h>
|
625 |
+
#include <ATen/ops/feature_alpha_dropout_native.h>
|
626 |
+
#include <ATen/ops/feature_dropout_native.h>
|
627 |
+
#include <ATen/ops/fft_fft_native.h>
|
628 |
+
#include <ATen/ops/fft_fft2_native.h>
|
629 |
+
#include <ATen/ops/fft_fftfreq_native.h>
|
630 |
+
#include <ATen/ops/fft_fftn_native.h>
|
631 |
+
#include <ATen/ops/fft_fftshift_native.h>
|
632 |
+
#include <ATen/ops/fft_hfft_native.h>
|
633 |
+
#include <ATen/ops/fft_hfft2_native.h>
|
634 |
+
#include <ATen/ops/fft_hfftn_native.h>
|
635 |
+
#include <ATen/ops/fft_ifft_native.h>
|
636 |
+
#include <ATen/ops/fft_ifft2_native.h>
|
637 |
+
#include <ATen/ops/fft_ifftn_native.h>
|
638 |
+
#include <ATen/ops/fft_ifftshift_native.h>
|
639 |
+
#include <ATen/ops/fft_ihfft_native.h>
|
640 |
+
#include <ATen/ops/fft_ihfft2_native.h>
|
641 |
+
#include <ATen/ops/fft_ihfftn_native.h>
|
642 |
+
#include <ATen/ops/fft_irfft_native.h>
|
643 |
+
#include <ATen/ops/fft_irfft2_native.h>
|
644 |
+
#include <ATen/ops/fft_irfftn_native.h>
|
645 |
+
#include <ATen/ops/fft_rfft_native.h>
|
646 |
+
#include <ATen/ops/fft_rfft2_native.h>
|
647 |
+
#include <ATen/ops/fft_rfftfreq_native.h>
|
648 |
+
#include <ATen/ops/fft_rfftn_native.h>
|
649 |
+
#include <ATen/ops/fill_native.h>
|
650 |
+
#include <ATen/ops/fill_diagonal_native.h>
|
651 |
+
#include <ATen/ops/fix_native.h>
|
652 |
+
#include <ATen/ops/flatten_native.h>
|
653 |
+
#include <ATen/ops/flatten_dense_tensors_native.h>
|
654 |
+
#include <ATen/ops/flip_native.h>
|
655 |
+
#include <ATen/ops/fliplr_native.h>
|
656 |
+
#include <ATen/ops/flipud_native.h>
|
657 |
+
#include <ATen/ops/float_power_native.h>
|
658 |
+
#include <ATen/ops/floor_native.h>
|
659 |
+
#include <ATen/ops/floor_divide_native.h>
|
660 |
+
#include <ATen/ops/fmax_native.h>
|
661 |
+
#include <ATen/ops/fmin_native.h>
|
662 |
+
#include <ATen/ops/fmod_native.h>
|
663 |
+
#include <ATen/ops/frac_native.h>
|
664 |
+
#include <ATen/ops/fractional_max_pool2d_native.h>
|
665 |
+
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
|
666 |
+
#include <ATen/ops/fractional_max_pool3d_native.h>
|
667 |
+
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
|
668 |
+
#include <ATen/ops/frexp_native.h>
|
669 |
+
#include <ATen/ops/frobenius_norm_native.h>
|
670 |
+
#include <ATen/ops/from_file_native.h>
|
671 |
+
#include <ATen/ops/full_native.h>
|
672 |
+
#include <ATen/ops/full_like_native.h>
|
673 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h>
|
674 |
+
#include <ATen/ops/gather_native.h>
|
675 |
+
#include <ATen/ops/gather_backward_native.h>
|
676 |
+
#include <ATen/ops/gcd_native.h>
|
677 |
+
#include <ATen/ops/ge_native.h>
|
678 |
+
#include <ATen/ops/gelu_native.h>
|
679 |
+
#include <ATen/ops/gelu_backward_native.h>
|
680 |
+
#include <ATen/ops/geometric_native.h>
|
681 |
+
#include <ATen/ops/geqrf_native.h>
|
682 |
+
#include <ATen/ops/ger_native.h>
|
683 |
+
#include <ATen/ops/glu_native.h>
|
684 |
+
#include <ATen/ops/glu_backward_native.h>
|
685 |
+
#include <ATen/ops/glu_backward_jvp_native.h>
|
686 |
+
#include <ATen/ops/glu_jvp_native.h>
|
687 |
+
#include <ATen/ops/gradient_native.h>
|
688 |
+
#include <ATen/ops/greater_native.h>
|
689 |
+
#include <ATen/ops/greater_equal_native.h>
|
690 |
+
#include <ATen/ops/grid_sampler_native.h>
|
691 |
+
#include <ATen/ops/grid_sampler_2d_native.h>
|
692 |
+
#include <ATen/ops/grid_sampler_2d_backward_native.h>
|
693 |
+
#include <ATen/ops/grid_sampler_3d_native.h>
|
694 |
+
#include <ATen/ops/grid_sampler_3d_backward_native.h>
|
695 |
+
#include <ATen/ops/group_norm_native.h>
|
696 |
+
#include <ATen/ops/gru_native.h>
|
697 |
+
#include <ATen/ops/gru_cell_native.h>
|
698 |
+
#include <ATen/ops/gt_native.h>
|
699 |
+
#include <ATen/ops/hamming_window_native.h>
|
700 |
+
#include <ATen/ops/hann_window_native.h>
|
701 |
+
#include <ATen/ops/hardshrink_native.h>
|
702 |
+
#include <ATen/ops/hardshrink_backward_native.h>
|
703 |
+
#include <ATen/ops/hardsigmoid_native.h>
|
704 |
+
#include <ATen/ops/hardsigmoid_backward_native.h>
|
705 |
+
#include <ATen/ops/hardswish_native.h>
|
706 |
+
#include <ATen/ops/hardswish_backward_native.h>
|
707 |
+
#include <ATen/ops/hardtanh_native.h>
|
708 |
+
#include <ATen/ops/hardtanh_backward_native.h>
|
709 |
+
#include <ATen/ops/heaviside_native.h>
|
710 |
+
#include <ATen/ops/hinge_embedding_loss_native.h>
|
711 |
+
#include <ATen/ops/histc_native.h>
|
712 |
+
#include <ATen/ops/histogram_native.h>
|
713 |
+
#include <ATen/ops/histogramdd_native.h>
|
714 |
+
#include <ATen/ops/hsplit_native.h>
|
715 |
+
#include <ATen/ops/hspmm_native.h>
|
716 |
+
#include <ATen/ops/hstack_native.h>
|
717 |
+
#include <ATen/ops/huber_loss_native.h>
|
718 |
+
#include <ATen/ops/huber_loss_backward_native.h>
|
719 |
+
#include <ATen/ops/hypot_native.h>
|
720 |
+
#include <ATen/ops/i0_native.h>
|
721 |
+
#include <ATen/ops/igamma_native.h>
|
722 |
+
#include <ATen/ops/igammac_native.h>
|
723 |
+
#include <ATen/ops/im2col_native.h>
|
724 |
+
#include <ATen/ops/imag_native.h>
|
725 |
+
#include <ATen/ops/index_native.h>
|
726 |
+
#include <ATen/ops/index_add_native.h>
|
727 |
+
#include <ATen/ops/index_copy_native.h>
|
728 |
+
#include <ATen/ops/index_fill_native.h>
|
729 |
+
#include <ATen/ops/index_put_native.h>
|
730 |
+
#include <ATen/ops/index_reduce_native.h>
|
731 |
+
#include <ATen/ops/index_select_native.h>
|
732 |
+
#include <ATen/ops/index_select_backward_native.h>
|
733 |
+
#include <ATen/ops/indices_native.h>
|
734 |
+
#include <ATen/ops/indices_copy_native.h>
|
735 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_native.h>
|
736 |
+
#include <ATen/ops/inner_native.h>
|
737 |
+
#include <ATen/ops/instance_norm_native.h>
|
738 |
+
#include <ATen/ops/int_repr_native.h>
|
739 |
+
#include <ATen/ops/inverse_native.h>
|
740 |
+
#include <ATen/ops/is_coalesced_native.h>
|
741 |
+
#include <ATen/ops/is_complex_native.h>
|
742 |
+
#include <ATen/ops/is_conj_native.h>
|
743 |
+
#include <ATen/ops/is_distributed_native.h>
|
744 |
+
#include <ATen/ops/is_floating_point_native.h>
|
745 |
+
#include <ATen/ops/is_inference_native.h>
|
746 |
+
#include <ATen/ops/is_leaf_native.h>
|
747 |
+
#include <ATen/ops/is_neg_native.h>
|
748 |
+
#include <ATen/ops/is_nonzero_native.h>
|
749 |
+
#include <ATen/ops/is_pinned_native.h>
|
750 |
+
#include <ATen/ops/is_same_size_native.h>
|
751 |
+
#include <ATen/ops/is_set_to_native.h>
|
752 |
+
#include <ATen/ops/is_signed_native.h>
|
753 |
+
#include <ATen/ops/is_vulkan_available_native.h>
|
754 |
+
#include <ATen/ops/isclose_native.h>
|
755 |
+
#include <ATen/ops/isfinite_native.h>
|
756 |
+
#include <ATen/ops/isin_native.h>
|
757 |
+
#include <ATen/ops/isinf_native.h>
|
758 |
+
#include <ATen/ops/isnan_native.h>
|
759 |
+
#include <ATen/ops/isneginf_native.h>
|
760 |
+
#include <ATen/ops/isposinf_native.h>
|
761 |
+
#include <ATen/ops/isreal_native.h>
|
762 |
+
#include <ATen/ops/istft_native.h>
|
763 |
+
#include <ATen/ops/item_native.h>
|
764 |
+
#include <ATen/ops/kaiser_window_native.h>
|
765 |
+
#include <ATen/ops/kl_div_native.h>
|
766 |
+
#include <ATen/ops/kron_native.h>
|
767 |
+
#include <ATen/ops/kthvalue_native.h>
|
768 |
+
#include <ATen/ops/l1_loss_native.h>
|
769 |
+
#include <ATen/ops/layer_norm_native.h>
|
770 |
+
#include <ATen/ops/lcm_native.h>
|
771 |
+
#include <ATen/ops/ldexp_native.h>
|
772 |
+
#include <ATen/ops/le_native.h>
|
773 |
+
#include <ATen/ops/leaky_relu_native.h>
|
774 |
+
#include <ATen/ops/leaky_relu_backward_native.h>
|
775 |
+
#include <ATen/ops/lerp_native.h>
|
776 |
+
#include <ATen/ops/less_native.h>
|
777 |
+
#include <ATen/ops/less_equal_native.h>
|
778 |
+
#include <ATen/ops/lgamma_native.h>
|
779 |
+
#include <ATen/ops/lift_native.h>
|
780 |
+
#include <ATen/ops/lift_fresh_native.h>
|
781 |
+
#include <ATen/ops/lift_fresh_copy_native.h>
|
782 |
+
#include <ATen/ops/linalg_cholesky_native.h>
|
783 |
+
#include <ATen/ops/linalg_cholesky_ex_native.h>
|
784 |
+
#include <ATen/ops/linalg_cond_native.h>
|
785 |
+
#include <ATen/ops/linalg_cross_native.h>
|
786 |
+
#include <ATen/ops/linalg_det_native.h>
|
787 |
+
#include <ATen/ops/linalg_diagonal_native.h>
|
788 |
+
#include <ATen/ops/linalg_eig_native.h>
|
789 |
+
#include <ATen/ops/linalg_eigh_native.h>
|
790 |
+
#include <ATen/ops/linalg_eigvals_native.h>
|
791 |
+
#include <ATen/ops/linalg_eigvalsh_native.h>
|
792 |
+
#include <ATen/ops/linalg_householder_product_native.h>
|
793 |
+
#include <ATen/ops/linalg_inv_native.h>
|
794 |
+
#include <ATen/ops/linalg_inv_ex_native.h>
|
795 |
+
#include <ATen/ops/linalg_ldl_factor_native.h>
|
796 |
+
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
|
797 |
+
#include <ATen/ops/linalg_ldl_solve_native.h>
|
798 |
+
#include <ATen/ops/linalg_lstsq_native.h>
|
799 |
+
#include <ATen/ops/linalg_lu_native.h>
|
800 |
+
#include <ATen/ops/linalg_lu_factor_native.h>
|
801 |
+
#include <ATen/ops/linalg_lu_factor_ex_native.h>
|
802 |
+
#include <ATen/ops/linalg_lu_solve_native.h>
|
803 |
+
#include <ATen/ops/linalg_matmul_native.h>
|
804 |
+
#include <ATen/ops/linalg_matrix_exp_native.h>
|
805 |
+
#include <ATen/ops/linalg_matrix_norm_native.h>
|
806 |
+
#include <ATen/ops/linalg_matrix_power_native.h>
|
807 |
+
#include <ATen/ops/linalg_matrix_rank_native.h>
|
808 |
+
#include <ATen/ops/linalg_multi_dot_native.h>
|
809 |
+
#include <ATen/ops/linalg_norm_native.h>
|
810 |
+
#include <ATen/ops/linalg_pinv_native.h>
|
811 |
+
#include <ATen/ops/linalg_qr_native.h>
|
812 |
+
#include <ATen/ops/linalg_slogdet_native.h>
|
813 |
+
#include <ATen/ops/linalg_solve_native.h>
|
814 |
+
#include <ATen/ops/linalg_solve_ex_native.h>
|
815 |
+
#include <ATen/ops/linalg_solve_triangular_native.h>
|
816 |
+
#include <ATen/ops/linalg_svd_native.h>
|
817 |
+
#include <ATen/ops/linalg_svdvals_native.h>
|
818 |
+
#include <ATen/ops/linalg_tensorinv_native.h>
|
819 |
+
#include <ATen/ops/linalg_tensorsolve_native.h>
|
820 |
+
#include <ATen/ops/linalg_vander_native.h>
|
821 |
+
#include <ATen/ops/linalg_vecdot_native.h>
|
822 |
+
#include <ATen/ops/linalg_vector_norm_native.h>
|
823 |
+
#include <ATen/ops/linear_native.h>
|
824 |
+
#include <ATen/ops/linear_backward_native.h>
|
825 |
+
#include <ATen/ops/linspace_native.h>
|
826 |
+
#include <ATen/ops/log_native.h>
|
827 |
+
#include <ATen/ops/log10_native.h>
|
828 |
+
#include <ATen/ops/log1p_native.h>
|
829 |
+
#include <ATen/ops/log2_native.h>
|
830 |
+
#include <ATen/ops/log_normal_native.h>
|
831 |
+
#include <ATen/ops/log_sigmoid_native.h>
|
832 |
+
#include <ATen/ops/log_sigmoid_backward_native.h>
|
833 |
+
#include <ATen/ops/log_sigmoid_forward_native.h>
|
834 |
+
#include <ATen/ops/log_softmax_native.h>
|
835 |
+
#include <ATen/ops/logaddexp_native.h>
|
836 |
+
#include <ATen/ops/logaddexp2_native.h>
|
837 |
+
#include <ATen/ops/logcumsumexp_native.h>
|
838 |
+
#include <ATen/ops/logdet_native.h>
|
839 |
+
#include <ATen/ops/logical_and_native.h>
|
840 |
+
#include <ATen/ops/logical_not_native.h>
|
841 |
+
#include <ATen/ops/logical_or_native.h>
|
842 |
+
#include <ATen/ops/logical_xor_native.h>
|
843 |
+
#include <ATen/ops/logit_native.h>
|
844 |
+
#include <ATen/ops/logit_backward_native.h>
|
845 |
+
#include <ATen/ops/logspace_native.h>
|
846 |
+
#include <ATen/ops/logsumexp_native.h>
|
847 |
+
#include <ATen/ops/lshift_native.h>
|
848 |
+
#include <ATen/ops/lstm_native.h>
|
849 |
+
#include <ATen/ops/lstm_cell_native.h>
|
850 |
+
#include <ATen/ops/lstm_mps_backward_native.h>
|
851 |
+
#include <ATen/ops/lt_native.h>
|
852 |
+
#include <ATen/ops/lu_solve_native.h>
|
853 |
+
#include <ATen/ops/lu_unpack_native.h>
|
854 |
+
#include <ATen/ops/mH_native.h>
|
855 |
+
#include <ATen/ops/mT_native.h>
|
856 |
+
#include <ATen/ops/margin_ranking_loss_native.h>
|
857 |
+
#include <ATen/ops/masked_fill_native.h>
|
858 |
+
#include <ATen/ops/masked_scatter_native.h>
|
859 |
+
#include <ATen/ops/masked_scatter_backward_native.h>
|
860 |
+
#include <ATen/ops/masked_select_native.h>
|
861 |
+
#include <ATen/ops/masked_select_backward_native.h>
|
862 |
+
#include <ATen/ops/matmul_native.h>
|
863 |
+
#include <ATen/ops/matmul_backward_native.h>
|
864 |
+
#include <ATen/ops/matrix_H_native.h>
|
865 |
+
#include <ATen/ops/matrix_exp_native.h>
|
866 |
+
#include <ATen/ops/matrix_exp_backward_native.h>
|
867 |
+
#include <ATen/ops/matrix_power_native.h>
|
868 |
+
#include <ATen/ops/max_native.h>
|
869 |
+
#include <ATen/ops/max_pool1d_native.h>
|
870 |
+
#include <ATen/ops/max_pool1d_with_indices_native.h>
|
871 |
+
#include <ATen/ops/max_pool2d_native.h>
|
872 |
+
#include <ATen/ops/max_pool2d_backward_native.h>
|
873 |
+
#include <ATen/ops/max_pool2d_with_indices_native.h>
|
874 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
|
875 |
+
#include <ATen/ops/max_pool3d_native.h>
|
876 |
+
#include <ATen/ops/max_pool3d_with_indices_native.h>
|
877 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
|
878 |
+
#include <ATen/ops/max_unpool2d_native.h>
|
879 |
+
#include <ATen/ops/max_unpool3d_native.h>
|
880 |
+
#include <ATen/ops/maximum_native.h>
|
881 |
+
#include <ATen/ops/mean_native.h>
|
882 |
+
#include <ATen/ops/median_native.h>
|
883 |
+
#include <ATen/ops/meshgrid_native.h>
|
884 |
+
#include <ATen/ops/min_native.h>
|
885 |
+
#include <ATen/ops/minimum_native.h>
|
886 |
+
#include <ATen/ops/miopen_batch_norm_native.h>
|
887 |
+
#include <ATen/ops/miopen_batch_norm_backward_native.h>
|
888 |
+
#include <ATen/ops/miopen_convolution_native.h>
|
889 |
+
#include <ATen/ops/miopen_convolution_add_relu_native.h>
|
890 |
+
#include <ATen/ops/miopen_convolution_relu_native.h>
|
891 |
+
#include <ATen/ops/miopen_convolution_transpose_native.h>
|
892 |
+
#include <ATen/ops/miopen_depthwise_convolution_native.h>
|
893 |
+
#include <ATen/ops/miopen_rnn_native.h>
|
894 |
+
#include <ATen/ops/miopen_rnn_backward_native.h>
|
895 |
+
#include <ATen/ops/mish_native.h>
|
896 |
+
#include <ATen/ops/mish_backward_native.h>
|
897 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h>
|
898 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h>
|
899 |
+
#include <ATen/ops/mkldnn_convolution_native.h>
|
900 |
+
#include <ATen/ops/mkldnn_linear_native.h>
|
901 |
+
#include <ATen/ops/mkldnn_linear_backward_native.h>
|
902 |
+
#include <ATen/ops/mkldnn_linear_backward_input_native.h>
|
903 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_native.h>
|
904 |
+
#include <ATen/ops/mkldnn_max_pool2d_native.h>
|
905 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_native.h>
|
906 |
+
#include <ATen/ops/mkldnn_max_pool3d_native.h>
|
907 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
|
908 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h>
|
909 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
|
910 |
+
#include <ATen/ops/mkldnn_rnn_layer_native.h>
|
911 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
|
912 |
+
#include <ATen/ops/mm_native.h>
|
913 |
+
#include <ATen/ops/mode_native.h>
|
914 |
+
#include <ATen/ops/moveaxis_native.h>
|
915 |
+
#include <ATen/ops/movedim_native.h>
|
916 |
+
#include <ATen/ops/mps_convolution_backward_native.h>
|
917 |
+
#include <ATen/ops/mps_convolution_transpose_backward_native.h>
|
918 |
+
#include <ATen/ops/mse_loss_native.h>
|
919 |
+
#include <ATen/ops/mse_loss_backward_native.h>
|
920 |
+
#include <ATen/ops/msort_native.h>
|
921 |
+
#include <ATen/ops/mul_native.h>
|
922 |
+
#include <ATen/ops/multi_margin_loss_native.h>
|
923 |
+
#include <ATen/ops/multi_margin_loss_backward_native.h>
|
924 |
+
#include <ATen/ops/multilabel_margin_loss_native.h>
|
925 |
+
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
|
926 |
+
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
|
927 |
+
#include <ATen/ops/multinomial_native.h>
|
928 |
+
#include <ATen/ops/multiply_native.h>
|
929 |
+
#include <ATen/ops/mv_native.h>
|
930 |
+
#include <ATen/ops/mvlgamma_native.h>
|
931 |
+
#include <ATen/ops/nan_to_num_native.h>
|
932 |
+
#include <ATen/ops/nanmean_native.h>
|
933 |
+
#include <ATen/ops/nanmedian_native.h>
|
934 |
+
#include <ATen/ops/nanquantile_native.h>
|
935 |
+
#include <ATen/ops/nansum_native.h>
|
936 |
+
#include <ATen/ops/narrow_native.h>
|
937 |
+
#include <ATen/ops/narrow_copy_native.h>
|
938 |
+
#include <ATen/ops/native_batch_norm_native.h>
|
939 |
+
#include <ATen/ops/native_batch_norm_backward_native.h>
|
940 |
+
#include <ATen/ops/native_channel_shuffle_native.h>
|
941 |
+
#include <ATen/ops/native_dropout_native.h>
|
942 |
+
#include <ATen/ops/native_dropout_backward_native.h>
|
943 |
+
#include <ATen/ops/native_group_norm_native.h>
|
944 |
+
#include <ATen/ops/native_group_norm_backward_native.h>
|
945 |
+
#include <ATen/ops/native_layer_norm_native.h>
|
946 |
+
#include <ATen/ops/native_layer_norm_backward_native.h>
|
947 |
+
#include <ATen/ops/native_norm_native.h>
|
948 |
+
#include <ATen/ops/ne_native.h>
|
949 |
+
#include <ATen/ops/neg_native.h>
|
950 |
+
#include <ATen/ops/negative_native.h>
|
951 |
+
#include <ATen/ops/nested_to_padded_tensor_native.h>
|
952 |
+
#include <ATen/ops/new_empty_native.h>
|
953 |
+
#include <ATen/ops/new_empty_strided_native.h>
|
954 |
+
#include <ATen/ops/new_full_native.h>
|
955 |
+
#include <ATen/ops/new_ones_native.h>
|
956 |
+
#include <ATen/ops/new_zeros_native.h>
|
957 |
+
#include <ATen/ops/nextafter_native.h>
|
958 |
+
#include <ATen/ops/nll_loss_native.h>
|
959 |
+
#include <ATen/ops/nll_loss2d_native.h>
|
960 |
+
#include <ATen/ops/nll_loss2d_backward_native.h>
|
961 |
+
#include <ATen/ops/nll_loss2d_forward_native.h>
|
962 |
+
#include <ATen/ops/nll_loss_backward_native.h>
|
963 |
+
#include <ATen/ops/nll_loss_forward_native.h>
|
964 |
+
#include <ATen/ops/nll_loss_nd_native.h>
|
965 |
+
#include <ATen/ops/nonzero_native.h>
|
966 |
+
#include <ATen/ops/nonzero_numpy_native.h>
|
967 |
+
#include <ATen/ops/nonzero_static_native.h>
|
968 |
+
#include <ATen/ops/norm_native.h>
|
969 |
+
#include <ATen/ops/norm_except_dim_native.h>
|
970 |
+
#include <ATen/ops/normal_native.h>
|
971 |
+
#include <ATen/ops/not_equal_native.h>
|
972 |
+
#include <ATen/ops/nuclear_norm_native.h>
|
973 |
+
#include <ATen/ops/numpy_T_native.h>
|
974 |
+
#include <ATen/ops/one_hot_native.h>
|
975 |
+
#include <ATen/ops/ones_native.h>
|
976 |
+
#include <ATen/ops/ones_like_native.h>
|
977 |
+
#include <ATen/ops/or_native.h>
|
978 |
+
#include <ATen/ops/orgqr_native.h>
|
979 |
+
#include <ATen/ops/ormqr_native.h>
|
980 |
+
#include <ATen/ops/outer_native.h>
|
981 |
+
#include <ATen/ops/output_nr_native.h>
|
982 |
+
#include <ATen/ops/pad_native.h>
|
983 |
+
#include <ATen/ops/pad_sequence_native.h>
|
984 |
+
#include <ATen/ops/pairwise_distance_native.h>
|
985 |
+
#include <ATen/ops/pdist_native.h>
|
986 |
+
#include <ATen/ops/permute_native.h>
|
987 |
+
#include <ATen/ops/permute_copy_native.h>
|
988 |
+
#include <ATen/ops/pin_memory_native.h>
|
989 |
+
#include <ATen/ops/pinverse_native.h>
|
990 |
+
#include <ATen/ops/pixel_shuffle_native.h>
|
991 |
+
#include <ATen/ops/pixel_unshuffle_native.h>
|
992 |
+
#include <ATen/ops/poisson_native.h>
|
993 |
+
#include <ATen/ops/poisson_nll_loss_native.h>
|
994 |
+
#include <ATen/ops/polar_native.h>
|
995 |
+
#include <ATen/ops/polygamma_native.h>
|
996 |
+
#include <ATen/ops/positive_native.h>
|
997 |
+
#include <ATen/ops/pow_native.h>
|
998 |
+
#include <ATen/ops/prelu_native.h>
|
999 |
+
#include <ATen/ops/prod_native.h>
|
1000 |
+
#include <ATen/ops/promote_types_native.h>
|
1001 |
+
#include <ATen/ops/put_native.h>
|
1002 |
+
#include <ATen/ops/q_per_channel_axis_native.h>
|
1003 |
+
#include <ATen/ops/q_per_channel_scales_native.h>
|
1004 |
+
#include <ATen/ops/q_per_channel_zero_points_native.h>
|
1005 |
+
#include <ATen/ops/q_scale_native.h>
|
1006 |
+
#include <ATen/ops/q_zero_point_native.h>
|
1007 |
+
#include <ATen/ops/qr_native.h>
|
1008 |
+
#include <ATen/ops/qscheme_native.h>
|
1009 |
+
#include <ATen/ops/quantile_native.h>
|
1010 |
+
#include <ATen/ops/quantize_per_channel_native.h>
|
1011 |
+
#include <ATen/ops/quantize_per_tensor_native.h>
|
1012 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
|
1013 |
+
#include <ATen/ops/quantized_batch_norm_native.h>
|
1014 |
+
#include <ATen/ops/quantized_gru_cell_native.h>
|
1015 |
+
#include <ATen/ops/quantized_lstm_cell_native.h>
|
1016 |
+
#include <ATen/ops/quantized_max_pool1d_native.h>
|
1017 |
+
#include <ATen/ops/quantized_max_pool2d_native.h>
|
1018 |
+
#include <ATen/ops/quantized_max_pool3d_native.h>
|
1019 |
+
#include <ATen/ops/quantized_rnn_relu_cell_native.h>
|
1020 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_native.h>
|
1021 |
+
#include <ATen/ops/rad2deg_native.h>
|
1022 |
+
#include <ATen/ops/rand_native.h>
|
1023 |
+
#include <ATen/ops/rand_like_native.h>
|
1024 |
+
#include <ATen/ops/randint_native.h>
|
1025 |
+
#include <ATen/ops/randint_like_native.h>
|
1026 |
+
#include <ATen/ops/randn_native.h>
|
1027 |
+
#include <ATen/ops/randn_like_native.h>
|
1028 |
+
#include <ATen/ops/random_native.h>
|
1029 |
+
#include <ATen/ops/randperm_native.h>
|
1030 |
+
#include <ATen/ops/range_native.h>
|
1031 |
+
#include <ATen/ops/ravel_native.h>
|
1032 |
+
#include <ATen/ops/real_native.h>
|
1033 |
+
#include <ATen/ops/reciprocal_native.h>
|
1034 |
+
#include <ATen/ops/record_stream_native.h>
|
1035 |
+
#include <ATen/ops/refine_names_native.h>
|
1036 |
+
#include <ATen/ops/reflection_pad1d_native.h>
|
1037 |
+
#include <ATen/ops/reflection_pad1d_backward_native.h>
|
1038 |
+
#include <ATen/ops/reflection_pad2d_native.h>
|
1039 |
+
#include <ATen/ops/reflection_pad2d_backward_native.h>
|
1040 |
+
#include <ATen/ops/reflection_pad3d_native.h>
|
1041 |
+
#include <ATen/ops/reflection_pad3d_backward_native.h>
|
1042 |
+
#include <ATen/ops/relu_native.h>
|
1043 |
+
#include <ATen/ops/relu6_native.h>
|
1044 |
+
#include <ATen/ops/remainder_native.h>
|
1045 |
+
#include <ATen/ops/rename_native.h>
|
1046 |
+
#include <ATen/ops/renorm_native.h>
|
1047 |
+
#include <ATen/ops/repeat_native.h>
|
1048 |
+
#include <ATen/ops/repeat_interleave_native.h>
|
1049 |
+
#include <ATen/ops/replication_pad1d_native.h>
|
1050 |
+
#include <ATen/ops/replication_pad1d_backward_native.h>
|
1051 |
+
#include <ATen/ops/replication_pad2d_native.h>
|
1052 |
+
#include <ATen/ops/replication_pad2d_backward_native.h>
|
1053 |
+
#include <ATen/ops/replication_pad3d_native.h>
|
1054 |
+
#include <ATen/ops/replication_pad3d_backward_native.h>
|
1055 |
+
#include <ATen/ops/requires_grad_native.h>
|
1056 |
+
#include <ATen/ops/reshape_native.h>
|
1057 |
+
#include <ATen/ops/reshape_as_native.h>
|
1058 |
+
#include <ATen/ops/resize_native.h>
|
1059 |
+
#include <ATen/ops/resize_as_native.h>
|
1060 |
+
#include <ATen/ops/resize_as_sparse_native.h>
|
1061 |
+
#include <ATen/ops/resolve_conj_native.h>
|
1062 |
+
#include <ATen/ops/resolve_neg_native.h>
|
1063 |
+
#include <ATen/ops/result_type_native.h>
|
1064 |
+
#include <ATen/ops/retain_grad_native.h>
|
1065 |
+
#include <ATen/ops/retains_grad_native.h>
|
1066 |
+
#include <ATen/ops/rnn_relu_native.h>
|
1067 |
+
#include <ATen/ops/rnn_relu_cell_native.h>
|
1068 |
+
#include <ATen/ops/rnn_tanh_native.h>
|
1069 |
+
#include <ATen/ops/rnn_tanh_cell_native.h>
|
1070 |
+
#include <ATen/ops/roll_native.h>
|
1071 |
+
#include <ATen/ops/rot90_native.h>
|
1072 |
+
#include <ATen/ops/round_native.h>
|
1073 |
+
#include <ATen/ops/row_indices_native.h>
|
1074 |
+
#include <ATen/ops/row_indices_copy_native.h>
|
1075 |
+
#include <ATen/ops/row_stack_native.h>
|
1076 |
+
#include <ATen/ops/rrelu_native.h>
|
1077 |
+
#include <ATen/ops/rrelu_with_noise_native.h>
|
1078 |
+
#include <ATen/ops/rrelu_with_noise_backward_native.h>
|
1079 |
+
#include <ATen/ops/rshift_native.h>
|
1080 |
+
#include <ATen/ops/rsqrt_native.h>
|
1081 |
+
#include <ATen/ops/rsub_native.h>
|
1082 |
+
#include <ATen/ops/scalar_tensor_native.h>
|
1083 |
+
#include <ATen/ops/scaled_dot_product_attention_native.h>
|
1084 |
+
#include <ATen/ops/scatter_native.h>
|
1085 |
+
#include <ATen/ops/scatter_add_native.h>
|
1086 |
+
#include <ATen/ops/scatter_reduce_native.h>
|
1087 |
+
#include <ATen/ops/searchsorted_native.h>
|
1088 |
+
#include <ATen/ops/segment_reduce_native.h>
|
1089 |
+
#include <ATen/ops/select_native.h>
|
1090 |
+
#include <ATen/ops/select_backward_native.h>
|
1091 |
+
#include <ATen/ops/select_copy_native.h>
|
1092 |
+
#include <ATen/ops/select_scatter_native.h>
|
1093 |
+
#include <ATen/ops/selu_native.h>
|
1094 |
+
#include <ATen/ops/set_native.h>
|
1095 |
+
#include <ATen/ops/set_data_native.h>
|
1096 |
+
#include <ATen/ops/sgn_native.h>
|
1097 |
+
#include <ATen/ops/sigmoid_native.h>
|
1098 |
+
#include <ATen/ops/sigmoid_backward_native.h>
|
1099 |
+
#include <ATen/ops/sign_native.h>
|
1100 |
+
#include <ATen/ops/signbit_native.h>
|
1101 |
+
#include <ATen/ops/silu_native.h>
|
1102 |
+
#include <ATen/ops/silu_backward_native.h>
|
1103 |
+
#include <ATen/ops/sin_native.h>
|
1104 |
+
#include <ATen/ops/sinc_native.h>
|
1105 |
+
#include <ATen/ops/sinh_native.h>
|
1106 |
+
#include <ATen/ops/size_native.h>
|
1107 |
+
#include <ATen/ops/slice_native.h>
|
1108 |
+
#include <ATen/ops/slice_backward_native.h>
|
1109 |
+
#include <ATen/ops/slice_copy_native.h>
|
1110 |
+
#include <ATen/ops/slice_inverse_native.h>
|
1111 |
+
#include <ATen/ops/slice_scatter_native.h>
|
1112 |
+
#include <ATen/ops/slogdet_native.h>
|
1113 |
+
#include <ATen/ops/slow_conv3d_native.h>
|
1114 |
+
#include <ATen/ops/slow_conv3d_forward_native.h>
|
1115 |
+
#include <ATen/ops/slow_conv_dilated2d_native.h>
|
1116 |
+
#include <ATen/ops/slow_conv_dilated3d_native.h>
|
1117 |
+
#include <ATen/ops/slow_conv_transpose2d_native.h>
|
1118 |
+
#include <ATen/ops/slow_conv_transpose3d_native.h>
|
1119 |
+
#include <ATen/ops/smm_native.h>
|
1120 |
+
#include <ATen/ops/smooth_l1_loss_native.h>
|
1121 |
+
#include <ATen/ops/smooth_l1_loss_backward_native.h>
|
1122 |
+
#include <ATen/ops/soft_margin_loss_native.h>
|
1123 |
+
#include <ATen/ops/soft_margin_loss_backward_native.h>
|
1124 |
+
#include <ATen/ops/softmax_native.h>
|
1125 |
+
#include <ATen/ops/softplus_native.h>
|
1126 |
+
#include <ATen/ops/softplus_backward_native.h>
|
1127 |
+
#include <ATen/ops/softshrink_native.h>
|
1128 |
+
#include <ATen/ops/softshrink_backward_native.h>
|
1129 |
+
#include <ATen/ops/sort_native.h>
|
1130 |
+
#include <ATen/ops/sparse_bsc_tensor_native.h>
|
1131 |
+
#include <ATen/ops/sparse_bsr_tensor_native.h>
|
1132 |
+
#include <ATen/ops/sparse_compressed_tensor_native.h>
|
1133 |
+
#include <ATen/ops/sparse_coo_tensor_native.h>
|
1134 |
+
#include <ATen/ops/sparse_csc_tensor_native.h>
|
1135 |
+
#include <ATen/ops/sparse_csr_tensor_native.h>
|
1136 |
+
#include <ATen/ops/sparse_dim_native.h>
|
1137 |
+
#include <ATen/ops/sparse_mask_native.h>
|
1138 |
+
#include <ATen/ops/sparse_resize_native.h>
|
1139 |
+
#include <ATen/ops/sparse_resize_and_clear_native.h>
|
1140 |
+
#include <ATen/ops/sparse_sampled_addmm_native.h>
|
1141 |
+
#include <ATen/ops/special_airy_ai_native.h>
|
1142 |
+
#include <ATen/ops/special_bessel_j0_native.h>
|
1143 |
+
#include <ATen/ops/special_bessel_j1_native.h>
|
1144 |
+
#include <ATen/ops/special_bessel_y0_native.h>
|
1145 |
+
#include <ATen/ops/special_bessel_y1_native.h>
|
1146 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
|
1147 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
|
1148 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
|
1149 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
|
1150 |
+
#include <ATen/ops/special_digamma_native.h>
|
1151 |
+
#include <ATen/ops/special_entr_native.h>
|
1152 |
+
#include <ATen/ops/special_erf_native.h>
|
1153 |
+
#include <ATen/ops/special_erfc_native.h>
|
1154 |
+
#include <ATen/ops/special_erfcx_native.h>
|
1155 |
+
#include <ATen/ops/special_erfinv_native.h>
|
1156 |
+
#include <ATen/ops/special_exp2_native.h>
|
1157 |
+
#include <ATen/ops/special_expit_native.h>
|
1158 |
+
#include <ATen/ops/special_expm1_native.h>
|
1159 |
+
#include <ATen/ops/special_gammainc_native.h>
|
1160 |
+
#include <ATen/ops/special_gammaincc_native.h>
|
1161 |
+
#include <ATen/ops/special_gammaln_native.h>
|
1162 |
+
#include <ATen/ops/special_hermite_polynomial_h_native.h>
|
1163 |
+
#include <ATen/ops/special_hermite_polynomial_he_native.h>
|
1164 |
+
#include <ATen/ops/special_i0_native.h>
|
1165 |
+
#include <ATen/ops/special_i0e_native.h>
|
1166 |
+
#include <ATen/ops/special_i1_native.h>
|
1167 |
+
#include <ATen/ops/special_i1e_native.h>
|
1168 |
+
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
|
1169 |
+
#include <ATen/ops/special_legendre_polynomial_p_native.h>
|
1170 |
+
#include <ATen/ops/special_log1p_native.h>
|
1171 |
+
#include <ATen/ops/special_log_ndtr_native.h>
|
1172 |
+
#include <ATen/ops/special_log_softmax_native.h>
|
1173 |
+
#include <ATen/ops/special_logit_native.h>
|
1174 |
+
#include <ATen/ops/special_logsumexp_native.h>
|
1175 |
+
#include <ATen/ops/special_modified_bessel_i0_native.h>
|
1176 |
+
#include <ATen/ops/special_modified_bessel_i1_native.h>
|
1177 |
+
#include <ATen/ops/special_modified_bessel_k0_native.h>
|
1178 |
+
#include <ATen/ops/special_modified_bessel_k1_native.h>
|
1179 |
+
#include <ATen/ops/special_multigammaln_native.h>
|
1180 |
+
#include <ATen/ops/special_ndtr_native.h>
|
1181 |
+
#include <ATen/ops/special_ndtri_native.h>
|
1182 |
+
#include <ATen/ops/special_polygamma_native.h>
|
1183 |
+
#include <ATen/ops/special_psi_native.h>
|
1184 |
+
#include <ATen/ops/special_round_native.h>
|
1185 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
|
1186 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
|
1187 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
|
1188 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
|
1189 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
|
1190 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
|
1191 |
+
#include <ATen/ops/special_sinc_native.h>
|
1192 |
+
#include <ATen/ops/special_softmax_native.h>
|
1193 |
+
#include <ATen/ops/special_spherical_bessel_j0_native.h>
|
1194 |
+
#include <ATen/ops/special_xlog1py_native.h>
|
1195 |
+
#include <ATen/ops/special_xlogy_native.h>
|
1196 |
+
#include <ATen/ops/special_zeta_native.h>
|
1197 |
+
#include <ATen/ops/split_native.h>
|
1198 |
+
#include <ATen/ops/split_copy_native.h>
|
1199 |
+
#include <ATen/ops/split_with_sizes_native.h>
|
1200 |
+
#include <ATen/ops/split_with_sizes_copy_native.h>
|
1201 |
+
#include <ATen/ops/sqrt_native.h>
|
1202 |
+
#include <ATen/ops/square_native.h>
|
1203 |
+
#include <ATen/ops/squeeze_native.h>
|
1204 |
+
#include <ATen/ops/squeeze_copy_native.h>
|
1205 |
+
#include <ATen/ops/sspaddmm_native.h>
|
1206 |
+
#include <ATen/ops/stack_native.h>
|
1207 |
+
#include <ATen/ops/std_native.h>
|
1208 |
+
#include <ATen/ops/std_mean_native.h>
|
1209 |
+
#include <ATen/ops/stft_native.h>
|
1210 |
+
#include <ATen/ops/stride_native.h>
|
1211 |
+
#include <ATen/ops/sub_native.h>
|
1212 |
+
#include <ATen/ops/subtract_native.h>
|
1213 |
+
#include <ATen/ops/sum_native.h>
|
1214 |
+
#include <ATen/ops/sum_to_size_native.h>
|
1215 |
+
#include <ATen/ops/svd_native.h>
|
1216 |
+
#include <ATen/ops/swapaxes_native.h>
|
1217 |
+
#include <ATen/ops/swapdims_native.h>
|
1218 |
+
#include <ATen/ops/sym_constrain_range_native.h>
|
1219 |
+
#include <ATen/ops/sym_constrain_range_for_size_native.h>
|
1220 |
+
#include <ATen/ops/sym_numel_native.h>
|
1221 |
+
#include <ATen/ops/sym_size_native.h>
|
1222 |
+
#include <ATen/ops/sym_storage_offset_native.h>
|
1223 |
+
#include <ATen/ops/sym_stride_native.h>
|
1224 |
+
#include <ATen/ops/t_native.h>
|
1225 |
+
#include <ATen/ops/t_copy_native.h>
|
1226 |
+
#include <ATen/ops/take_native.h>
|
1227 |
+
#include <ATen/ops/take_along_dim_native.h>
|
1228 |
+
#include <ATen/ops/tan_native.h>
|
1229 |
+
#include <ATen/ops/tanh_native.h>
|
1230 |
+
#include <ATen/ops/tanh_backward_native.h>
|
1231 |
+
#include <ATen/ops/tensor_split_native.h>
|
1232 |
+
#include <ATen/ops/tensordot_native.h>
|
1233 |
+
#include <ATen/ops/thnn_conv2d_native.h>
|
1234 |
+
#include <ATen/ops/threshold_native.h>
|
1235 |
+
#include <ATen/ops/threshold_backward_native.h>
|
1236 |
+
#include <ATen/ops/tile_native.h>
|
1237 |
+
#include <ATen/ops/to_native.h>
|
1238 |
+
#include <ATen/ops/to_dense_native.h>
|
1239 |
+
#include <ATen/ops/to_dense_backward_native.h>
|
1240 |
+
#include <ATen/ops/to_mkldnn_native.h>
|
1241 |
+
#include <ATen/ops/to_mkldnn_backward_native.h>
|
1242 |
+
#include <ATen/ops/to_padded_tensor_native.h>
|
1243 |
+
#include <ATen/ops/to_sparse_native.h>
|
1244 |
+
#include <ATen/ops/to_sparse_bsc_native.h>
|
1245 |
+
#include <ATen/ops/to_sparse_bsr_native.h>
|
1246 |
+
#include <ATen/ops/to_sparse_csc_native.h>
|
1247 |
+
#include <ATen/ops/to_sparse_csr_native.h>
|
1248 |
+
#include <ATen/ops/topk_native.h>
|
1249 |
+
#include <ATen/ops/trace_native.h>
|
1250 |
+
#include <ATen/ops/trace_backward_native.h>
|
1251 |
+
#include <ATen/ops/transpose_native.h>
|
1252 |
+
#include <ATen/ops/transpose_copy_native.h>
|
1253 |
+
#include <ATen/ops/trapezoid_native.h>
|
1254 |
+
#include <ATen/ops/trapz_native.h>
|
1255 |
+
#include <ATen/ops/triangular_solve_native.h>
|
1256 |
+
#include <ATen/ops/tril_native.h>
|
1257 |
+
#include <ATen/ops/tril_indices_native.h>
|
1258 |
+
#include <ATen/ops/triplet_margin_loss_native.h>
|
1259 |
+
#include <ATen/ops/triu_native.h>
|
1260 |
+
#include <ATen/ops/triu_indices_native.h>
|
1261 |
+
#include <ATen/ops/true_divide_native.h>
|
1262 |
+
#include <ATen/ops/trunc_native.h>
|
1263 |
+
#include <ATen/ops/type_as_native.h>
|
1264 |
+
#include <ATen/ops/unbind_native.h>
|
1265 |
+
#include <ATen/ops/unbind_copy_native.h>
|
1266 |
+
#include <ATen/ops/unflatten_native.h>
|
1267 |
+
#include <ATen/ops/unflatten_dense_tensors_native.h>
|
1268 |
+
#include <ATen/ops/unfold_native.h>
|
1269 |
+
#include <ATen/ops/unfold_backward_native.h>
|
1270 |
+
#include <ATen/ops/unfold_copy_native.h>
|
1271 |
+
#include <ATen/ops/uniform_native.h>
|
1272 |
+
#include <ATen/ops/unique_consecutive_native.h>
|
1273 |
+
#include <ATen/ops/unique_dim_native.h>
|
1274 |
+
#include <ATen/ops/unique_dim_consecutive_native.h>
|
1275 |
+
#include <ATen/ops/unsafe_chunk_native.h>
|
1276 |
+
#include <ATen/ops/unsafe_split_native.h>
|
1277 |
+
#include <ATen/ops/unsafe_split_with_sizes_native.h>
|
1278 |
+
#include <ATen/ops/unsqueeze_native.h>
|
1279 |
+
#include <ATen/ops/unsqueeze_copy_native.h>
|
1280 |
+
#include <ATen/ops/upsample_bicubic2d_native.h>
|
1281 |
+
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
|
1282 |
+
#include <ATen/ops/upsample_bilinear2d_native.h>
|
1283 |
+
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
|
1284 |
+
#include <ATen/ops/upsample_linear1d_native.h>
|
1285 |
+
#include <ATen/ops/upsample_linear1d_backward_native.h>
|
1286 |
+
#include <ATen/ops/upsample_nearest1d_native.h>
|
1287 |
+
#include <ATen/ops/upsample_nearest1d_backward_native.h>
|
1288 |
+
#include <ATen/ops/upsample_nearest2d_native.h>
|
1289 |
+
#include <ATen/ops/upsample_nearest2d_backward_native.h>
|
1290 |
+
#include <ATen/ops/upsample_nearest3d_native.h>
|
1291 |
+
#include <ATen/ops/upsample_nearest3d_backward_native.h>
|
1292 |
+
#include <ATen/ops/upsample_trilinear3d_native.h>
|
1293 |
+
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
|
1294 |
+
#include <ATen/ops/value_selecting_reduction_backward_native.h>
|
1295 |
+
#include <ATen/ops/values_native.h>
|
1296 |
+
#include <ATen/ops/values_copy_native.h>
|
1297 |
+
#include <ATen/ops/vander_native.h>
|
1298 |
+
#include <ATen/ops/var_native.h>
|
1299 |
+
#include <ATen/ops/var_mean_native.h>
|
1300 |
+
#include <ATen/ops/vdot_native.h>
|
1301 |
+
#include <ATen/ops/view_native.h>
|
1302 |
+
#include <ATen/ops/view_as_native.h>
|
1303 |
+
#include <ATen/ops/view_as_complex_native.h>
|
1304 |
+
#include <ATen/ops/view_as_complex_copy_native.h>
|
1305 |
+
#include <ATen/ops/view_as_real_native.h>
|
1306 |
+
#include <ATen/ops/view_as_real_copy_native.h>
|
1307 |
+
#include <ATen/ops/view_copy_native.h>
|
1308 |
+
#include <ATen/ops/vsplit_native.h>
|
1309 |
+
#include <ATen/ops/vstack_native.h>
|
1310 |
+
#include <ATen/ops/where_native.h>
|
1311 |
+
#include <ATen/ops/xlogy_native.h>
|
1312 |
+
#include <ATen/ops/xor_native.h>
|
1313 |
+
#include <ATen/ops/zero_native.h>
|
1314 |
+
#include <ATen/ops/zeros_native.h>
|
1315 |
+
#include <ATen/ops/zeros_like_native.h>
|
1316 |
+
|
1317 |
+
|
venv/lib/python3.10/site-packages/torch/include/ATen/NativeMetaFunctions.h
ADDED
@@ -0,0 +1,1303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeMetaFunctions.h
|
4 |
+
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
#include <ATen/core/IListRef.h>
|
7 |
+
#include <ATen/TensorMeta.h>
|
8 |
+
#include <ATen/TensorIterator.h>
|
9 |
+
|
10 |
+
#include <ATen/ops/_adaptive_avg_pool2d_meta.h>
|
11 |
+
#include <ATen/ops/_adaptive_avg_pool2d_backward_meta.h>
|
12 |
+
#include <ATen/ops/_adaptive_avg_pool3d_meta.h>
|
13 |
+
#include <ATen/ops/_adaptive_avg_pool3d_backward_meta.h>
|
14 |
+
#include <ATen/ops/_add_batch_dim_meta.h>
|
15 |
+
#include <ATen/ops/_add_relu_meta.h>
|
16 |
+
#include <ATen/ops/_addmm_activation_meta.h>
|
17 |
+
#include <ATen/ops/_aminmax_meta.h>
|
18 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_meta.h>
|
19 |
+
#include <ATen/ops/_amp_update_scale_meta.h>
|
20 |
+
#include <ATen/ops/_assert_async_meta.h>
|
21 |
+
#include <ATen/ops/_assert_scalar_meta.h>
|
22 |
+
#include <ATen/ops/_assert_tensor_metadata_meta.h>
|
23 |
+
#include <ATen/ops/_autocast_to_full_precision_meta.h>
|
24 |
+
#include <ATen/ops/_autocast_to_reduced_precision_meta.h>
|
25 |
+
#include <ATen/ops/_backward_meta.h>
|
26 |
+
#include <ATen/ops/_batch_norm_impl_index_meta.h>
|
27 |
+
#include <ATen/ops/_batch_norm_impl_index_backward_meta.h>
|
28 |
+
#include <ATen/ops/_cast_Byte_meta.h>
|
29 |
+
#include <ATen/ops/_cast_Char_meta.h>
|
30 |
+
#include <ATen/ops/_cast_Double_meta.h>
|
31 |
+
#include <ATen/ops/_cast_Float_meta.h>
|
32 |
+
#include <ATen/ops/_cast_Half_meta.h>
|
33 |
+
#include <ATen/ops/_cast_Int_meta.h>
|
34 |
+
#include <ATen/ops/_cast_Long_meta.h>
|
35 |
+
#include <ATen/ops/_cast_Short_meta.h>
|
36 |
+
#include <ATen/ops/_cdist_backward_meta.h>
|
37 |
+
#include <ATen/ops/_cdist_forward_meta.h>
|
38 |
+
#include <ATen/ops/_cholesky_solve_helper_meta.h>
|
39 |
+
#include <ATen/ops/_choose_qparams_per_tensor_meta.h>
|
40 |
+
#include <ATen/ops/_chunk_cat_meta.h>
|
41 |
+
#include <ATen/ops/_coalesce_meta.h>
|
42 |
+
#include <ATen/ops/_coalesced_meta.h>
|
43 |
+
#include <ATen/ops/_compute_linear_combination_meta.h>
|
44 |
+
#include <ATen/ops/_conj_meta.h>
|
45 |
+
#include <ATen/ops/_conj_copy_meta.h>
|
46 |
+
#include <ATen/ops/_conj_physical_meta.h>
|
47 |
+
#include <ATen/ops/_conv_depthwise2d_meta.h>
|
48 |
+
#include <ATen/ops/_convert_indices_from_coo_to_csr_meta.h>
|
49 |
+
#include <ATen/ops/_convert_indices_from_csr_to_coo_meta.h>
|
50 |
+
#include <ATen/ops/_convert_weight_to_int4pack_meta.h>
|
51 |
+
#include <ATen/ops/_convolution_meta.h>
|
52 |
+
#include <ATen/ops/_convolution_double_backward_meta.h>
|
53 |
+
#include <ATen/ops/_convolution_mode_meta.h>
|
54 |
+
#include <ATen/ops/_copy_from_meta.h>
|
55 |
+
#include <ATen/ops/_copy_from_and_resize_meta.h>
|
56 |
+
#include <ATen/ops/_cslt_compress_meta.h>
|
57 |
+
#include <ATen/ops/_cslt_sparse_mm_meta.h>
|
58 |
+
#include <ATen/ops/_cslt_sparse_mm_search_meta.h>
|
59 |
+
#include <ATen/ops/_ctc_loss_meta.h>
|
60 |
+
#include <ATen/ops/_ctc_loss_backward_meta.h>
|
61 |
+
#include <ATen/ops/_cudnn_ctc_loss_meta.h>
|
62 |
+
#include <ATen/ops/_cudnn_init_dropout_state_meta.h>
|
63 |
+
#include <ATen/ops/_cudnn_rnn_meta.h>
|
64 |
+
#include <ATen/ops/_cudnn_rnn_backward_meta.h>
|
65 |
+
#include <ATen/ops/_cudnn_rnn_flatten_weight_meta.h>
|
66 |
+
#include <ATen/ops/_cufft_clear_plan_cache_meta.h>
|
67 |
+
#include <ATen/ops/_cufft_get_plan_cache_max_size_meta.h>
|
68 |
+
#include <ATen/ops/_cufft_get_plan_cache_size_meta.h>
|
69 |
+
#include <ATen/ops/_cufft_set_plan_cache_max_size_meta.h>
|
70 |
+
#include <ATen/ops/_cummax_helper_meta.h>
|
71 |
+
#include <ATen/ops/_cummin_helper_meta.h>
|
72 |
+
#include <ATen/ops/_debug_has_internal_overlap_meta.h>
|
73 |
+
#include <ATen/ops/_dimI_meta.h>
|
74 |
+
#include <ATen/ops/_dimV_meta.h>
|
75 |
+
#include <ATen/ops/_dim_arange_meta.h>
|
76 |
+
#include <ATen/ops/_dirichlet_grad_meta.h>
|
77 |
+
#include <ATen/ops/_efficient_attention_backward_meta.h>
|
78 |
+
#include <ATen/ops/_efficient_attention_forward_meta.h>
|
79 |
+
#include <ATen/ops/_efficientzerotensor_meta.h>
|
80 |
+
#include <ATen/ops/_embedding_bag_meta.h>
|
81 |
+
#include <ATen/ops/_embedding_bag_backward_meta.h>
|
82 |
+
#include <ATen/ops/_embedding_bag_dense_backward_meta.h>
|
83 |
+
#include <ATen/ops/_embedding_bag_forward_only_meta.h>
|
84 |
+
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_meta.h>
|
85 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_meta.h>
|
86 |
+
#include <ATen/ops/_empty_affine_quantized_meta.h>
|
87 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized_meta.h>
|
88 |
+
#include <ATen/ops/_euclidean_dist_meta.h>
|
89 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_meta.h>
|
90 |
+
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_meta.h>
|
91 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_meta.h>
|
92 |
+
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_meta.h>
|
93 |
+
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_meta.h>
|
94 |
+
#include <ATen/ops/_fft_c2c_meta.h>
|
95 |
+
#include <ATen/ops/_fft_c2r_meta.h>
|
96 |
+
#include <ATen/ops/_fft_r2c_meta.h>
|
97 |
+
#include <ATen/ops/_fill_mem_eff_dropout_mask_meta.h>
|
98 |
+
#include <ATen/ops/_flash_attention_backward_meta.h>
|
99 |
+
#include <ATen/ops/_flash_attention_forward_meta.h>
|
100 |
+
#include <ATen/ops/_foobar_meta.h>
|
101 |
+
#include <ATen/ops/_foreach_abs_meta.h>
|
102 |
+
#include <ATen/ops/_foreach_acos_meta.h>
|
103 |
+
#include <ATen/ops/_foreach_add_meta.h>
|
104 |
+
#include <ATen/ops/_foreach_addcdiv_meta.h>
|
105 |
+
#include <ATen/ops/_foreach_addcmul_meta.h>
|
106 |
+
#include <ATen/ops/_foreach_asin_meta.h>
|
107 |
+
#include <ATen/ops/_foreach_atan_meta.h>
|
108 |
+
#include <ATen/ops/_foreach_ceil_meta.h>
|
109 |
+
#include <ATen/ops/_foreach_clamp_max_meta.h>
|
110 |
+
#include <ATen/ops/_foreach_clamp_min_meta.h>
|
111 |
+
#include <ATen/ops/_foreach_copy_meta.h>
|
112 |
+
#include <ATen/ops/_foreach_cos_meta.h>
|
113 |
+
#include <ATen/ops/_foreach_cosh_meta.h>
|
114 |
+
#include <ATen/ops/_foreach_div_meta.h>
|
115 |
+
#include <ATen/ops/_foreach_erf_meta.h>
|
116 |
+
#include <ATen/ops/_foreach_erfc_meta.h>
|
117 |
+
#include <ATen/ops/_foreach_exp_meta.h>
|
118 |
+
#include <ATen/ops/_foreach_expm1_meta.h>
|
119 |
+
#include <ATen/ops/_foreach_floor_meta.h>
|
120 |
+
#include <ATen/ops/_foreach_frac_meta.h>
|
121 |
+
#include <ATen/ops/_foreach_lerp_meta.h>
|
122 |
+
#include <ATen/ops/_foreach_lgamma_meta.h>
|
123 |
+
#include <ATen/ops/_foreach_log_meta.h>
|
124 |
+
#include <ATen/ops/_foreach_log10_meta.h>
|
125 |
+
#include <ATen/ops/_foreach_log1p_meta.h>
|
126 |
+
#include <ATen/ops/_foreach_log2_meta.h>
|
127 |
+
#include <ATen/ops/_foreach_maximum_meta.h>
|
128 |
+
#include <ATen/ops/_foreach_minimum_meta.h>
|
129 |
+
#include <ATen/ops/_foreach_mul_meta.h>
|
130 |
+
#include <ATen/ops/_foreach_neg_meta.h>
|
131 |
+
#include <ATen/ops/_foreach_norm_meta.h>
|
132 |
+
#include <ATen/ops/_foreach_pow_meta.h>
|
133 |
+
#include <ATen/ops/_foreach_reciprocal_meta.h>
|
134 |
+
#include <ATen/ops/_foreach_round_meta.h>
|
135 |
+
#include <ATen/ops/_foreach_sigmoid_meta.h>
|
136 |
+
#include <ATen/ops/_foreach_sign_meta.h>
|
137 |
+
#include <ATen/ops/_foreach_sin_meta.h>
|
138 |
+
#include <ATen/ops/_foreach_sinh_meta.h>
|
139 |
+
#include <ATen/ops/_foreach_sqrt_meta.h>
|
140 |
+
#include <ATen/ops/_foreach_sub_meta.h>
|
141 |
+
#include <ATen/ops/_foreach_tan_meta.h>
|
142 |
+
#include <ATen/ops/_foreach_tanh_meta.h>
|
143 |
+
#include <ATen/ops/_foreach_trunc_meta.h>
|
144 |
+
#include <ATen/ops/_foreach_zero_meta.h>
|
145 |
+
#include <ATen/ops/_functional_assert_async_meta.h>
|
146 |
+
#include <ATen/ops/_functional_assert_scalar_meta.h>
|
147 |
+
#include <ATen/ops/_functional_sym_constrain_range_meta.h>
|
148 |
+
#include <ATen/ops/_functional_sym_constrain_range_for_size_meta.h>
|
149 |
+
#include <ATen/ops/_fused_adam_meta.h>
|
150 |
+
#include <ATen/ops/_fused_adamw_meta.h>
|
151 |
+
#include <ATen/ops/_fused_dropout_meta.h>
|
152 |
+
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_meta.h>
|
153 |
+
#include <ATen/ops/_fused_sdp_choice_meta.h>
|
154 |
+
#include <ATen/ops/_fused_sgd_meta.h>
|
155 |
+
#include <ATen/ops/_fw_primal_meta.h>
|
156 |
+
#include <ATen/ops/_fw_primal_copy_meta.h>
|
157 |
+
#include <ATen/ops/_gather_sparse_backward_meta.h>
|
158 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_meta.h>
|
159 |
+
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_meta.h>
|
160 |
+
#include <ATen/ops/_has_compatible_shallow_copy_type_meta.h>
|
161 |
+
#include <ATen/ops/_has_same_storage_numel_meta.h>
|
162 |
+
#include <ATen/ops/_histogramdd_bin_edges_meta.h>
|
163 |
+
#include <ATen/ops/_histogramdd_from_bin_cts_meta.h>
|
164 |
+
#include <ATen/ops/_histogramdd_from_bin_tensors_meta.h>
|
165 |
+
#include <ATen/ops/_index_put_impl_meta.h>
|
166 |
+
#include <ATen/ops/_indices_meta.h>
|
167 |
+
#include <ATen/ops/_indices_copy_meta.h>
|
168 |
+
#include <ATen/ops/_int_mm_meta.h>
|
169 |
+
#include <ATen/ops/_is_all_true_meta.h>
|
170 |
+
#include <ATen/ops/_is_any_true_meta.h>
|
171 |
+
#include <ATen/ops/_is_zerotensor_meta.h>
|
172 |
+
#include <ATen/ops/_lazy_clone_meta.h>
|
173 |
+
#include <ATen/ops/_linalg_check_errors_meta.h>
|
174 |
+
#include <ATen/ops/_linalg_det_meta.h>
|
175 |
+
#include <ATen/ops/_linalg_eigh_meta.h>
|
176 |
+
#include <ATen/ops/_linalg_eigvals_meta.h>
|
177 |
+
#include <ATen/ops/_linalg_slogdet_meta.h>
|
178 |
+
#include <ATen/ops/_linalg_solve_ex_meta.h>
|
179 |
+
#include <ATen/ops/_linalg_svd_meta.h>
|
180 |
+
#include <ATen/ops/_local_scalar_dense_meta.h>
|
181 |
+
#include <ATen/ops/_log_softmax_meta.h>
|
182 |
+
#include <ATen/ops/_log_softmax_backward_data_meta.h>
|
183 |
+
#include <ATen/ops/_logcumsumexp_meta.h>
|
184 |
+
#include <ATen/ops/_lstm_mps_meta.h>
|
185 |
+
#include <ATen/ops/_lu_with_info_meta.h>
|
186 |
+
#include <ATen/ops/_make_dep_token_meta.h>
|
187 |
+
#include <ATen/ops/_make_dual_meta.h>
|
188 |
+
#include <ATen/ops/_make_dual_copy_meta.h>
|
189 |
+
#include <ATen/ops/_make_per_channel_quantized_tensor_meta.h>
|
190 |
+
#include <ATen/ops/_make_per_tensor_quantized_tensor_meta.h>
|
191 |
+
#include <ATen/ops/_masked_scale_meta.h>
|
192 |
+
#include <ATen/ops/_masked_softmax_meta.h>
|
193 |
+
#include <ATen/ops/_masked_softmax_backward_meta.h>
|
194 |
+
#include <ATen/ops/_mixed_dtypes_linear_meta.h>
|
195 |
+
#include <ATen/ops/_mkldnn_reshape_meta.h>
|
196 |
+
#include <ATen/ops/_mkldnn_transpose_meta.h>
|
197 |
+
#include <ATen/ops/_mps_convolution_meta.h>
|
198 |
+
#include <ATen/ops/_mps_convolution_transpose_meta.h>
|
199 |
+
#include <ATen/ops/_native_batch_norm_legit_meta.h>
|
200 |
+
#include <ATen/ops/_native_batch_norm_legit_no_training_meta.h>
|
201 |
+
#include <ATen/ops/_native_multi_head_attention_meta.h>
|
202 |
+
#include <ATen/ops/_neg_view_meta.h>
|
203 |
+
#include <ATen/ops/_neg_view_copy_meta.h>
|
204 |
+
#include <ATen/ops/_nested_from_padded_meta.h>
|
205 |
+
#include <ATen/ops/_nested_from_padded_and_nested_example_meta.h>
|
206 |
+
#include <ATen/ops/_nested_get_jagged_dummy_meta.h>
|
207 |
+
#include <ATen/ops/_nested_get_lengths_meta.h>
|
208 |
+
#include <ATen/ops/_nested_get_offsets_meta.h>
|
209 |
+
#include <ATen/ops/_nested_get_ragged_idx_meta.h>
|
210 |
+
#include <ATen/ops/_nested_get_values_meta.h>
|
211 |
+
#include <ATen/ops/_nested_get_values_copy_meta.h>
|
212 |
+
#include <ATen/ops/_nested_select_backward_meta.h>
|
213 |
+
#include <ATen/ops/_nested_sum_backward_meta.h>
|
214 |
+
#include <ATen/ops/_nested_tensor_from_mask_meta.h>
|
215 |
+
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_meta.h>
|
216 |
+
#include <ATen/ops/_nested_tensor_from_tensor_list_meta.h>
|
217 |
+
#include <ATen/ops/_nested_tensor_size_meta.h>
|
218 |
+
#include <ATen/ops/_nested_tensor_softmax_with_shape_meta.h>
|
219 |
+
#include <ATen/ops/_nested_tensor_storage_offsets_meta.h>
|
220 |
+
#include <ATen/ops/_nested_tensor_strides_meta.h>
|
221 |
+
#include <ATen/ops/_nested_view_from_buffer_meta.h>
|
222 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_meta.h>
|
223 |
+
#include <ATen/ops/_nested_view_from_jagged_meta.h>
|
224 |
+
#include <ATen/ops/_nested_view_from_jagged_copy_meta.h>
|
225 |
+
#include <ATen/ops/_new_zeros_with_same_feature_meta_meta.h>
|
226 |
+
#include <ATen/ops/_nnpack_available_meta.h>
|
227 |
+
#include <ATen/ops/_nnpack_spatial_convolution_meta.h>
|
228 |
+
#include <ATen/ops/_nnz_meta.h>
|
229 |
+
#include <ATen/ops/_pack_padded_sequence_meta.h>
|
230 |
+
#include <ATen/ops/_pack_padded_sequence_backward_meta.h>
|
231 |
+
#include <ATen/ops/_pad_circular_meta.h>
|
232 |
+
#include <ATen/ops/_pad_enum_meta.h>
|
233 |
+
#include <ATen/ops/_pad_packed_sequence_meta.h>
|
234 |
+
#include <ATen/ops/_pdist_backward_meta.h>
|
235 |
+
#include <ATen/ops/_pdist_forward_meta.h>
|
236 |
+
#include <ATen/ops/_pin_memory_meta.h>
|
237 |
+
#include <ATen/ops/_prelu_kernel_meta.h>
|
238 |
+
#include <ATen/ops/_prelu_kernel_backward_meta.h>
|
239 |
+
#include <ATen/ops/_print_meta.h>
|
240 |
+
#include <ATen/ops/_propagate_xla_data_meta.h>
|
241 |
+
#include <ATen/ops/_remove_batch_dim_meta.h>
|
242 |
+
#include <ATen/ops/_reshape_alias_meta.h>
|
243 |
+
#include <ATen/ops/_reshape_alias_copy_meta.h>
|
244 |
+
#include <ATen/ops/_reshape_copy_meta.h>
|
245 |
+
#include <ATen/ops/_reshape_from_tensor_meta.h>
|
246 |
+
#include <ATen/ops/_resize_output_meta.h>
|
247 |
+
#include <ATen/ops/_rowwise_prune_meta.h>
|
248 |
+
#include <ATen/ops/_sample_dirichlet_meta.h>
|
249 |
+
#include <ATen/ops/_saturate_weight_to_fp16_meta.h>
|
250 |
+
#include <ATen/ops/_scaled_dot_product_attention_math_meta.h>
|
251 |
+
#include <ATen/ops/_scaled_dot_product_cudnn_attention_meta.h>
|
252 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_meta.h>
|
253 |
+
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_meta.h>
|
254 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_meta.h>
|
255 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_meta.h>
|
256 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_meta.h>
|
257 |
+
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_meta.h>
|
258 |
+
#include <ATen/ops/_scaled_mm_meta.h>
|
259 |
+
#include <ATen/ops/_segment_reduce_backward_meta.h>
|
260 |
+
#include <ATen/ops/_shape_as_tensor_meta.h>
|
261 |
+
#include <ATen/ops/_slow_conv2d_backward_meta.h>
|
262 |
+
#include <ATen/ops/_slow_conv2d_forward_meta.h>
|
263 |
+
#include <ATen/ops/_sobol_engine_draw_meta.h>
|
264 |
+
#include <ATen/ops/_sobol_engine_ff_meta.h>
|
265 |
+
#include <ATen/ops/_sobol_engine_initialize_state_meta.h>
|
266 |
+
#include <ATen/ops/_sobol_engine_scramble_meta.h>
|
267 |
+
#include <ATen/ops/_softmax_meta.h>
|
268 |
+
#include <ATen/ops/_softmax_backward_data_meta.h>
|
269 |
+
#include <ATen/ops/_sparse_addmm_meta.h>
|
270 |
+
#include <ATen/ops/_sparse_broadcast_to_meta.h>
|
271 |
+
#include <ATen/ops/_sparse_broadcast_to_copy_meta.h>
|
272 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe_meta.h>
|
273 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe_meta.h>
|
274 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe_meta.h>
|
275 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe_meta.h>
|
276 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_meta.h>
|
277 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_meta.h>
|
278 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe_meta.h>
|
279 |
+
#include <ATen/ops/_sparse_csr_prod_meta.h>
|
280 |
+
#include <ATen/ops/_sparse_csr_sum_meta.h>
|
281 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe_meta.h>
|
282 |
+
#include <ATen/ops/_sparse_log_softmax_meta.h>
|
283 |
+
#include <ATen/ops/_sparse_log_softmax_backward_data_meta.h>
|
284 |
+
#include <ATen/ops/_sparse_mask_projection_meta.h>
|
285 |
+
#include <ATen/ops/_sparse_mm_meta.h>
|
286 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_meta.h>
|
287 |
+
#include <ATen/ops/_sparse_mm_reduce_impl_backward_meta.h>
|
288 |
+
#include <ATen/ops/_sparse_semi_structured_linear_meta.h>
|
289 |
+
#include <ATen/ops/_sparse_softmax_meta.h>
|
290 |
+
#include <ATen/ops/_sparse_softmax_backward_data_meta.h>
|
291 |
+
#include <ATen/ops/_sparse_sparse_matmul_meta.h>
|
292 |
+
#include <ATen/ops/_sparse_sum_meta.h>
|
293 |
+
#include <ATen/ops/_sparse_sum_backward_meta.h>
|
294 |
+
#include <ATen/ops/_spdiags_meta.h>
|
295 |
+
#include <ATen/ops/_stack_meta.h>
|
296 |
+
#include <ATen/ops/_standard_gamma_meta.h>
|
297 |
+
#include <ATen/ops/_standard_gamma_grad_meta.h>
|
298 |
+
#include <ATen/ops/_test_ambiguous_defaults_meta.h>
|
299 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_meta.h>
|
300 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_meta.h>
|
301 |
+
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_meta.h>
|
302 |
+
#include <ATen/ops/_test_check_tensor_meta.h>
|
303 |
+
#include <ATen/ops/_test_functorch_fallback_meta.h>
|
304 |
+
#include <ATen/ops/_test_optional_filled_intlist_meta.h>
|
305 |
+
#include <ATen/ops/_test_optional_floatlist_meta.h>
|
306 |
+
#include <ATen/ops/_test_optional_intlist_meta.h>
|
307 |
+
#include <ATen/ops/_test_parallel_materialize_meta.h>
|
308 |
+
#include <ATen/ops/_test_serialization_subcmul_meta.h>
|
309 |
+
#include <ATen/ops/_test_string_default_meta.h>
|
310 |
+
#include <ATen/ops/_test_warn_in_autograd_meta.h>
|
311 |
+
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_meta.h>
|
312 |
+
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_meta.h>
|
313 |
+
#include <ATen/ops/_thnn_fused_gru_cell_meta.h>
|
314 |
+
#include <ATen/ops/_thnn_fused_gru_cell_backward_meta.h>
|
315 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_meta.h>
|
316 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_meta.h>
|
317 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_meta.h>
|
318 |
+
#include <ATen/ops/_to_copy_meta.h>
|
319 |
+
#include <ATen/ops/_to_cpu_meta.h>
|
320 |
+
#include <ATen/ops/_to_dense_meta.h>
|
321 |
+
#include <ATen/ops/_to_sparse_meta.h>
|
322 |
+
#include <ATen/ops/_to_sparse_bsc_meta.h>
|
323 |
+
#include <ATen/ops/_to_sparse_bsr_meta.h>
|
324 |
+
#include <ATen/ops/_to_sparse_csc_meta.h>
|
325 |
+
#include <ATen/ops/_to_sparse_csr_meta.h>
|
326 |
+
#include <ATen/ops/_to_sparse_semi_structured_meta.h>
|
327 |
+
#include <ATen/ops/_transform_bias_rescale_qkv_meta.h>
|
328 |
+
#include <ATen/ops/_transformer_encoder_layer_fwd_meta.h>
|
329 |
+
#include <ATen/ops/_trilinear_meta.h>
|
330 |
+
#include <ATen/ops/_triton_multi_head_attention_meta.h>
|
331 |
+
#include <ATen/ops/_triton_scaled_dot_attention_meta.h>
|
332 |
+
#include <ATen/ops/_unique_meta.h>
|
333 |
+
#include <ATen/ops/_unique2_meta.h>
|
334 |
+
#include <ATen/ops/_unpack_dual_meta.h>
|
335 |
+
#include <ATen/ops/_unsafe_index_meta.h>
|
336 |
+
#include <ATen/ops/_unsafe_index_put_meta.h>
|
337 |
+
#include <ATen/ops/_unsafe_view_meta.h>
|
338 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_meta.h>
|
339 |
+
#include <ATen/ops/_upsample_bicubic2d_aa_backward_meta.h>
|
340 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_meta.h>
|
341 |
+
#include <ATen/ops/_upsample_bilinear2d_aa_backward_meta.h>
|
342 |
+
#include <ATen/ops/_upsample_nearest_exact1d_meta.h>
|
343 |
+
#include <ATen/ops/_upsample_nearest_exact1d_backward_meta.h>
|
344 |
+
#include <ATen/ops/_upsample_nearest_exact2d_meta.h>
|
345 |
+
#include <ATen/ops/_upsample_nearest_exact2d_backward_meta.h>
|
346 |
+
#include <ATen/ops/_upsample_nearest_exact3d_meta.h>
|
347 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_meta.h>
|
348 |
+
#include <ATen/ops/_use_cudnn_ctc_loss_meta.h>
|
349 |
+
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_meta.h>
|
350 |
+
#include <ATen/ops/_validate_compressed_sparse_indices_meta.h>
|
351 |
+
#include <ATen/ops/_validate_sparse_bsc_tensor_args_meta.h>
|
352 |
+
#include <ATen/ops/_validate_sparse_bsr_tensor_args_meta.h>
|
353 |
+
#include <ATen/ops/_validate_sparse_compressed_tensor_args_meta.h>
|
354 |
+
#include <ATen/ops/_validate_sparse_coo_tensor_args_meta.h>
|
355 |
+
#include <ATen/ops/_validate_sparse_csc_tensor_args_meta.h>
|
356 |
+
#include <ATen/ops/_validate_sparse_csr_tensor_args_meta.h>
|
357 |
+
#include <ATen/ops/_values_meta.h>
|
358 |
+
#include <ATen/ops/_values_copy_meta.h>
|
359 |
+
#include <ATen/ops/_version_meta.h>
|
360 |
+
#include <ATen/ops/_weight_int4pack_mm_meta.h>
|
361 |
+
#include <ATen/ops/_weight_int8pack_mm_meta.h>
|
362 |
+
#include <ATen/ops/_weight_norm_meta.h>
|
363 |
+
#include <ATen/ops/_weight_norm_differentiable_backward_meta.h>
|
364 |
+
#include <ATen/ops/_weight_norm_interface_meta.h>
|
365 |
+
#include <ATen/ops/_weight_norm_interface_backward_meta.h>
|
366 |
+
#include <ATen/ops/abs_meta.h>
|
367 |
+
#include <ATen/ops/absolute_meta.h>
|
368 |
+
#include <ATen/ops/acos_meta.h>
|
369 |
+
#include <ATen/ops/acosh_meta.h>
|
370 |
+
#include <ATen/ops/adaptive_avg_pool1d_meta.h>
|
371 |
+
#include <ATen/ops/adaptive_avg_pool2d_meta.h>
|
372 |
+
#include <ATen/ops/adaptive_avg_pool3d_meta.h>
|
373 |
+
#include <ATen/ops/adaptive_avg_pool3d_backward_meta.h>
|
374 |
+
#include <ATen/ops/adaptive_max_pool1d_meta.h>
|
375 |
+
#include <ATen/ops/adaptive_max_pool2d_meta.h>
|
376 |
+
#include <ATen/ops/adaptive_max_pool2d_backward_meta.h>
|
377 |
+
#include <ATen/ops/adaptive_max_pool3d_meta.h>
|
378 |
+
#include <ATen/ops/adaptive_max_pool3d_backward_meta.h>
|
379 |
+
#include <ATen/ops/add_meta.h>
|
380 |
+
#include <ATen/ops/addbmm_meta.h>
|
381 |
+
#include <ATen/ops/addcdiv_meta.h>
|
382 |
+
#include <ATen/ops/addcmul_meta.h>
|
383 |
+
#include <ATen/ops/addmm_meta.h>
|
384 |
+
#include <ATen/ops/addmv_meta.h>
|
385 |
+
#include <ATen/ops/addr_meta.h>
|
386 |
+
#include <ATen/ops/adjoint_meta.h>
|
387 |
+
#include <ATen/ops/affine_grid_generator_meta.h>
|
388 |
+
#include <ATen/ops/affine_grid_generator_backward_meta.h>
|
389 |
+
#include <ATen/ops/alias_meta.h>
|
390 |
+
#include <ATen/ops/alias_copy_meta.h>
|
391 |
+
#include <ATen/ops/align_as_meta.h>
|
392 |
+
#include <ATen/ops/align_tensors_meta.h>
|
393 |
+
#include <ATen/ops/align_to_meta.h>
|
394 |
+
#include <ATen/ops/all_meta.h>
|
395 |
+
#include <ATen/ops/allclose_meta.h>
|
396 |
+
#include <ATen/ops/alpha_dropout_meta.h>
|
397 |
+
#include <ATen/ops/amax_meta.h>
|
398 |
+
#include <ATen/ops/amin_meta.h>
|
399 |
+
#include <ATen/ops/aminmax_meta.h>
|
400 |
+
#include <ATen/ops/and_meta.h>
|
401 |
+
#include <ATen/ops/angle_meta.h>
|
402 |
+
#include <ATen/ops/any_meta.h>
|
403 |
+
#include <ATen/ops/arange_meta.h>
|
404 |
+
#include <ATen/ops/arccos_meta.h>
|
405 |
+
#include <ATen/ops/arccosh_meta.h>
|
406 |
+
#include <ATen/ops/arcsin_meta.h>
|
407 |
+
#include <ATen/ops/arcsinh_meta.h>
|
408 |
+
#include <ATen/ops/arctan_meta.h>
|
409 |
+
#include <ATen/ops/arctan2_meta.h>
|
410 |
+
#include <ATen/ops/arctanh_meta.h>
|
411 |
+
#include <ATen/ops/argmax_meta.h>
|
412 |
+
#include <ATen/ops/argmin_meta.h>
|
413 |
+
#include <ATen/ops/argsort_meta.h>
|
414 |
+
#include <ATen/ops/argwhere_meta.h>
|
415 |
+
#include <ATen/ops/as_strided_meta.h>
|
416 |
+
#include <ATen/ops/as_strided_copy_meta.h>
|
417 |
+
#include <ATen/ops/as_strided_scatter_meta.h>
|
418 |
+
#include <ATen/ops/asin_meta.h>
|
419 |
+
#include <ATen/ops/asinh_meta.h>
|
420 |
+
#include <ATen/ops/atan_meta.h>
|
421 |
+
#include <ATen/ops/atan2_meta.h>
|
422 |
+
#include <ATen/ops/atanh_meta.h>
|
423 |
+
#include <ATen/ops/atleast_1d_meta.h>
|
424 |
+
#include <ATen/ops/atleast_2d_meta.h>
|
425 |
+
#include <ATen/ops/atleast_3d_meta.h>
|
426 |
+
#include <ATen/ops/avg_pool1d_meta.h>
|
427 |
+
#include <ATen/ops/avg_pool2d_meta.h>
|
428 |
+
#include <ATen/ops/avg_pool2d_backward_meta.h>
|
429 |
+
#include <ATen/ops/avg_pool3d_meta.h>
|
430 |
+
#include <ATen/ops/avg_pool3d_backward_meta.h>
|
431 |
+
#include <ATen/ops/baddbmm_meta.h>
|
432 |
+
#include <ATen/ops/bartlett_window_meta.h>
|
433 |
+
#include <ATen/ops/batch_norm_meta.h>
|
434 |
+
#include <ATen/ops/batch_norm_backward_elemt_meta.h>
|
435 |
+
#include <ATen/ops/batch_norm_backward_reduce_meta.h>
|
436 |
+
#include <ATen/ops/batch_norm_elemt_meta.h>
|
437 |
+
#include <ATen/ops/batch_norm_gather_stats_meta.h>
|
438 |
+
#include <ATen/ops/batch_norm_gather_stats_with_counts_meta.h>
|
439 |
+
#include <ATen/ops/batch_norm_stats_meta.h>
|
440 |
+
#include <ATen/ops/batch_norm_update_stats_meta.h>
|
441 |
+
#include <ATen/ops/bernoulli_meta.h>
|
442 |
+
#include <ATen/ops/bilinear_meta.h>
|
443 |
+
#include <ATen/ops/binary_cross_entropy_meta.h>
|
444 |
+
#include <ATen/ops/binary_cross_entropy_backward_meta.h>
|
445 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_meta.h>
|
446 |
+
#include <ATen/ops/bincount_meta.h>
|
447 |
+
#include <ATen/ops/binomial_meta.h>
|
448 |
+
#include <ATen/ops/bitwise_and_meta.h>
|
449 |
+
#include <ATen/ops/bitwise_left_shift_meta.h>
|
450 |
+
#include <ATen/ops/bitwise_not_meta.h>
|
451 |
+
#include <ATen/ops/bitwise_or_meta.h>
|
452 |
+
#include <ATen/ops/bitwise_right_shift_meta.h>
|
453 |
+
#include <ATen/ops/bitwise_xor_meta.h>
|
454 |
+
#include <ATen/ops/blackman_window_meta.h>
|
455 |
+
#include <ATen/ops/block_diag_meta.h>
|
456 |
+
#include <ATen/ops/bmm_meta.h>
|
457 |
+
#include <ATen/ops/broadcast_tensors_meta.h>
|
458 |
+
#include <ATen/ops/broadcast_to_meta.h>
|
459 |
+
#include <ATen/ops/bucketize_meta.h>
|
460 |
+
#include <ATen/ops/can_cast_meta.h>
|
461 |
+
#include <ATen/ops/cartesian_prod_meta.h>
|
462 |
+
#include <ATen/ops/cat_meta.h>
|
463 |
+
#include <ATen/ops/cauchy_meta.h>
|
464 |
+
#include <ATen/ops/ccol_indices_meta.h>
|
465 |
+
#include <ATen/ops/ccol_indices_copy_meta.h>
|
466 |
+
#include <ATen/ops/cdist_meta.h>
|
467 |
+
#include <ATen/ops/ceil_meta.h>
|
468 |
+
#include <ATen/ops/celu_meta.h>
|
469 |
+
#include <ATen/ops/chain_matmul_meta.h>
|
470 |
+
#include <ATen/ops/chalf_meta.h>
|
471 |
+
#include <ATen/ops/channel_shuffle_meta.h>
|
472 |
+
#include <ATen/ops/cholesky_meta.h>
|
473 |
+
#include <ATen/ops/cholesky_inverse_meta.h>
|
474 |
+
#include <ATen/ops/cholesky_solve_meta.h>
|
475 |
+
#include <ATen/ops/choose_qparams_optimized_meta.h>
|
476 |
+
#include <ATen/ops/chunk_meta.h>
|
477 |
+
#include <ATen/ops/clamp_meta.h>
|
478 |
+
#include <ATen/ops/clamp_max_meta.h>
|
479 |
+
#include <ATen/ops/clamp_min_meta.h>
|
480 |
+
#include <ATen/ops/clip_meta.h>
|
481 |
+
#include <ATen/ops/clone_meta.h>
|
482 |
+
#include <ATen/ops/coalesce_meta.h>
|
483 |
+
#include <ATen/ops/col2im_meta.h>
|
484 |
+
#include <ATen/ops/col_indices_meta.h>
|
485 |
+
#include <ATen/ops/col_indices_copy_meta.h>
|
486 |
+
#include <ATen/ops/column_stack_meta.h>
|
487 |
+
#include <ATen/ops/combinations_meta.h>
|
488 |
+
#include <ATen/ops/complex_meta.h>
|
489 |
+
#include <ATen/ops/concat_meta.h>
|
490 |
+
#include <ATen/ops/concatenate_meta.h>
|
491 |
+
#include <ATen/ops/conj_meta.h>
|
492 |
+
#include <ATen/ops/conj_physical_meta.h>
|
493 |
+
#include <ATen/ops/constant_pad_nd_meta.h>
|
494 |
+
#include <ATen/ops/contiguous_meta.h>
|
495 |
+
#include <ATen/ops/conv1d_meta.h>
|
496 |
+
#include <ATen/ops/conv2d_meta.h>
|
497 |
+
#include <ATen/ops/conv3d_meta.h>
|
498 |
+
#include <ATen/ops/conv_depthwise3d_meta.h>
|
499 |
+
#include <ATen/ops/conv_tbc_meta.h>
|
500 |
+
#include <ATen/ops/conv_tbc_backward_meta.h>
|
501 |
+
#include <ATen/ops/conv_transpose1d_meta.h>
|
502 |
+
#include <ATen/ops/conv_transpose2d_meta.h>
|
503 |
+
#include <ATen/ops/conv_transpose3d_meta.h>
|
504 |
+
#include <ATen/ops/convolution_meta.h>
|
505 |
+
#include <ATen/ops/convolution_backward_meta.h>
|
506 |
+
#include <ATen/ops/convolution_backward_overrideable_meta.h>
|
507 |
+
#include <ATen/ops/convolution_overrideable_meta.h>
|
508 |
+
#include <ATen/ops/copy_meta.h>
|
509 |
+
#include <ATen/ops/copy_sparse_to_sparse_meta.h>
|
510 |
+
#include <ATen/ops/copysign_meta.h>
|
511 |
+
#include <ATen/ops/corrcoef_meta.h>
|
512 |
+
#include <ATen/ops/cos_meta.h>
|
513 |
+
#include <ATen/ops/cosh_meta.h>
|
514 |
+
#include <ATen/ops/cosine_embedding_loss_meta.h>
|
515 |
+
#include <ATen/ops/cosine_similarity_meta.h>
|
516 |
+
#include <ATen/ops/count_nonzero_meta.h>
|
517 |
+
#include <ATen/ops/cov_meta.h>
|
518 |
+
#include <ATen/ops/cross_meta.h>
|
519 |
+
#include <ATen/ops/cross_entropy_loss_meta.h>
|
520 |
+
#include <ATen/ops/crow_indices_meta.h>
|
521 |
+
#include <ATen/ops/crow_indices_copy_meta.h>
|
522 |
+
#include <ATen/ops/ctc_loss_meta.h>
|
523 |
+
#include <ATen/ops/cudnn_affine_grid_generator_meta.h>
|
524 |
+
#include <ATen/ops/cudnn_affine_grid_generator_backward_meta.h>
|
525 |
+
#include <ATen/ops/cudnn_batch_norm_meta.h>
|
526 |
+
#include <ATen/ops/cudnn_batch_norm_backward_meta.h>
|
527 |
+
#include <ATen/ops/cudnn_convolution_meta.h>
|
528 |
+
#include <ATen/ops/cudnn_convolution_add_relu_meta.h>
|
529 |
+
#include <ATen/ops/cudnn_convolution_relu_meta.h>
|
530 |
+
#include <ATen/ops/cudnn_convolution_transpose_meta.h>
|
531 |
+
#include <ATen/ops/cudnn_grid_sampler_meta.h>
|
532 |
+
#include <ATen/ops/cudnn_grid_sampler_backward_meta.h>
|
533 |
+
#include <ATen/ops/cudnn_is_acceptable_meta.h>
|
534 |
+
#include <ATen/ops/cummax_meta.h>
|
535 |
+
#include <ATen/ops/cummaxmin_backward_meta.h>
|
536 |
+
#include <ATen/ops/cummin_meta.h>
|
537 |
+
#include <ATen/ops/cumprod_meta.h>
|
538 |
+
#include <ATen/ops/cumprod_backward_meta.h>
|
539 |
+
#include <ATen/ops/cumsum_meta.h>
|
540 |
+
#include <ATen/ops/cumulative_trapezoid_meta.h>
|
541 |
+
#include <ATen/ops/data_meta.h>
|
542 |
+
#include <ATen/ops/deg2rad_meta.h>
|
543 |
+
#include <ATen/ops/dense_dim_meta.h>
|
544 |
+
#include <ATen/ops/dequantize_meta.h>
|
545 |
+
#include <ATen/ops/det_meta.h>
|
546 |
+
#include <ATen/ops/detach_meta.h>
|
547 |
+
#include <ATen/ops/detach_copy_meta.h>
|
548 |
+
#include <ATen/ops/diag_meta.h>
|
549 |
+
#include <ATen/ops/diag_embed_meta.h>
|
550 |
+
#include <ATen/ops/diagflat_meta.h>
|
551 |
+
#include <ATen/ops/diagonal_meta.h>
|
552 |
+
#include <ATen/ops/diagonal_backward_meta.h>
|
553 |
+
#include <ATen/ops/diagonal_copy_meta.h>
|
554 |
+
#include <ATen/ops/diagonal_scatter_meta.h>
|
555 |
+
#include <ATen/ops/diff_meta.h>
|
556 |
+
#include <ATen/ops/digamma_meta.h>
|
557 |
+
#include <ATen/ops/dist_meta.h>
|
558 |
+
#include <ATen/ops/div_meta.h>
|
559 |
+
#include <ATen/ops/divide_meta.h>
|
560 |
+
#include <ATen/ops/dot_meta.h>
|
561 |
+
#include <ATen/ops/dropout_meta.h>
|
562 |
+
#include <ATen/ops/dsplit_meta.h>
|
563 |
+
#include <ATen/ops/dstack_meta.h>
|
564 |
+
#include <ATen/ops/einsum_meta.h>
|
565 |
+
#include <ATen/ops/elu_meta.h>
|
566 |
+
#include <ATen/ops/elu_backward_meta.h>
|
567 |
+
#include <ATen/ops/embedding_meta.h>
|
568 |
+
#include <ATen/ops/embedding_backward_meta.h>
|
569 |
+
#include <ATen/ops/embedding_bag_meta.h>
|
570 |
+
#include <ATen/ops/embedding_dense_backward_meta.h>
|
571 |
+
#include <ATen/ops/embedding_renorm_meta.h>
|
572 |
+
#include <ATen/ops/embedding_sparse_backward_meta.h>
|
573 |
+
#include <ATen/ops/empty_meta.h>
|
574 |
+
#include <ATen/ops/empty_like_meta.h>
|
575 |
+
#include <ATen/ops/empty_permuted_meta.h>
|
576 |
+
#include <ATen/ops/empty_quantized_meta.h>
|
577 |
+
#include <ATen/ops/empty_strided_meta.h>
|
578 |
+
#include <ATen/ops/eq_meta.h>
|
579 |
+
#include <ATen/ops/equal_meta.h>
|
580 |
+
#include <ATen/ops/erf_meta.h>
|
581 |
+
#include <ATen/ops/erfc_meta.h>
|
582 |
+
#include <ATen/ops/erfinv_meta.h>
|
583 |
+
#include <ATen/ops/exp_meta.h>
|
584 |
+
#include <ATen/ops/exp2_meta.h>
|
585 |
+
#include <ATen/ops/expand_meta.h>
|
586 |
+
#include <ATen/ops/expand_as_meta.h>
|
587 |
+
#include <ATen/ops/expand_copy_meta.h>
|
588 |
+
#include <ATen/ops/expm1_meta.h>
|
589 |
+
#include <ATen/ops/exponential_meta.h>
|
590 |
+
#include <ATen/ops/eye_meta.h>
|
591 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_meta.h>
|
592 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_meta.h>
|
593 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_meta.h>
|
594 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_meta.h>
|
595 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_meta.h>
|
596 |
+
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_meta.h>
|
597 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_meta.h>
|
598 |
+
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_meta.h>
|
599 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_meta.h>
|
600 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_meta.h>
|
601 |
+
#include <ATen/ops/fbgemm_linear_quantize_weight_meta.h>
|
602 |
+
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_meta.h>
|
603 |
+
#include <ATen/ops/fbgemm_pack_quantized_matrix_meta.h>
|
604 |
+
#include <ATen/ops/feature_alpha_dropout_meta.h>
|
605 |
+
#include <ATen/ops/feature_dropout_meta.h>
|
606 |
+
#include <ATen/ops/fft_fft_meta.h>
|
607 |
+
#include <ATen/ops/fft_fft2_meta.h>
|
608 |
+
#include <ATen/ops/fft_fftfreq_meta.h>
|
609 |
+
#include <ATen/ops/fft_fftn_meta.h>
|
610 |
+
#include <ATen/ops/fft_fftshift_meta.h>
|
611 |
+
#include <ATen/ops/fft_hfft_meta.h>
|
612 |
+
#include <ATen/ops/fft_hfft2_meta.h>
|
613 |
+
#include <ATen/ops/fft_hfftn_meta.h>
|
614 |
+
#include <ATen/ops/fft_ifft_meta.h>
|
615 |
+
#include <ATen/ops/fft_ifft2_meta.h>
|
616 |
+
#include <ATen/ops/fft_ifftn_meta.h>
|
617 |
+
#include <ATen/ops/fft_ifftshift_meta.h>
|
618 |
+
#include <ATen/ops/fft_ihfft_meta.h>
|
619 |
+
#include <ATen/ops/fft_ihfft2_meta.h>
|
620 |
+
#include <ATen/ops/fft_ihfftn_meta.h>
|
621 |
+
#include <ATen/ops/fft_irfft_meta.h>
|
622 |
+
#include <ATen/ops/fft_irfft2_meta.h>
|
623 |
+
#include <ATen/ops/fft_irfftn_meta.h>
|
624 |
+
#include <ATen/ops/fft_rfft_meta.h>
|
625 |
+
#include <ATen/ops/fft_rfft2_meta.h>
|
626 |
+
#include <ATen/ops/fft_rfftfreq_meta.h>
|
627 |
+
#include <ATen/ops/fft_rfftn_meta.h>
|
628 |
+
#include <ATen/ops/fill_meta.h>
|
629 |
+
#include <ATen/ops/fill_diagonal_meta.h>
|
630 |
+
#include <ATen/ops/fix_meta.h>
|
631 |
+
#include <ATen/ops/flatten_meta.h>
|
632 |
+
#include <ATen/ops/flatten_dense_tensors_meta.h>
|
633 |
+
#include <ATen/ops/flip_meta.h>
|
634 |
+
#include <ATen/ops/fliplr_meta.h>
|
635 |
+
#include <ATen/ops/flipud_meta.h>
|
636 |
+
#include <ATen/ops/float_power_meta.h>
|
637 |
+
#include <ATen/ops/floor_meta.h>
|
638 |
+
#include <ATen/ops/floor_divide_meta.h>
|
639 |
+
#include <ATen/ops/fmax_meta.h>
|
640 |
+
#include <ATen/ops/fmin_meta.h>
|
641 |
+
#include <ATen/ops/fmod_meta.h>
|
642 |
+
#include <ATen/ops/frac_meta.h>
|
643 |
+
#include <ATen/ops/fractional_max_pool2d_meta.h>
|
644 |
+
#include <ATen/ops/fractional_max_pool2d_backward_meta.h>
|
645 |
+
#include <ATen/ops/fractional_max_pool3d_meta.h>
|
646 |
+
#include <ATen/ops/fractional_max_pool3d_backward_meta.h>
|
647 |
+
#include <ATen/ops/frexp_meta.h>
|
648 |
+
#include <ATen/ops/frobenius_norm_meta.h>
|
649 |
+
#include <ATen/ops/from_file_meta.h>
|
650 |
+
#include <ATen/ops/full_meta.h>
|
651 |
+
#include <ATen/ops/full_like_meta.h>
|
652 |
+
#include <ATen/ops/fused_moving_avg_obs_fake_quant_meta.h>
|
653 |
+
#include <ATen/ops/gather_meta.h>
|
654 |
+
#include <ATen/ops/gather_backward_meta.h>
|
655 |
+
#include <ATen/ops/gcd_meta.h>
|
656 |
+
#include <ATen/ops/ge_meta.h>
|
657 |
+
#include <ATen/ops/gelu_meta.h>
|
658 |
+
#include <ATen/ops/gelu_backward_meta.h>
|
659 |
+
#include <ATen/ops/geometric_meta.h>
|
660 |
+
#include <ATen/ops/geqrf_meta.h>
|
661 |
+
#include <ATen/ops/ger_meta.h>
|
662 |
+
#include <ATen/ops/glu_meta.h>
|
663 |
+
#include <ATen/ops/glu_backward_meta.h>
|
664 |
+
#include <ATen/ops/glu_backward_jvp_meta.h>
|
665 |
+
#include <ATen/ops/glu_jvp_meta.h>
|
666 |
+
#include <ATen/ops/gradient_meta.h>
|
667 |
+
#include <ATen/ops/greater_meta.h>
|
668 |
+
#include <ATen/ops/greater_equal_meta.h>
|
669 |
+
#include <ATen/ops/grid_sampler_meta.h>
|
670 |
+
#include <ATen/ops/grid_sampler_2d_meta.h>
|
671 |
+
#include <ATen/ops/grid_sampler_2d_backward_meta.h>
|
672 |
+
#include <ATen/ops/grid_sampler_3d_meta.h>
|
673 |
+
#include <ATen/ops/grid_sampler_3d_backward_meta.h>
|
674 |
+
#include <ATen/ops/group_norm_meta.h>
|
675 |
+
#include <ATen/ops/gru_meta.h>
|
676 |
+
#include <ATen/ops/gru_cell_meta.h>
|
677 |
+
#include <ATen/ops/gt_meta.h>
|
678 |
+
#include <ATen/ops/hamming_window_meta.h>
|
679 |
+
#include <ATen/ops/hann_window_meta.h>
|
680 |
+
#include <ATen/ops/hardshrink_meta.h>
|
681 |
+
#include <ATen/ops/hardshrink_backward_meta.h>
|
682 |
+
#include <ATen/ops/hardsigmoid_meta.h>
|
683 |
+
#include <ATen/ops/hardsigmoid_backward_meta.h>
|
684 |
+
#include <ATen/ops/hardswish_meta.h>
|
685 |
+
#include <ATen/ops/hardswish_backward_meta.h>
|
686 |
+
#include <ATen/ops/hardtanh_meta.h>
|
687 |
+
#include <ATen/ops/hardtanh_backward_meta.h>
|
688 |
+
#include <ATen/ops/heaviside_meta.h>
|
689 |
+
#include <ATen/ops/hinge_embedding_loss_meta.h>
|
690 |
+
#include <ATen/ops/histc_meta.h>
|
691 |
+
#include <ATen/ops/histogram_meta.h>
|
692 |
+
#include <ATen/ops/histogramdd_meta.h>
|
693 |
+
#include <ATen/ops/hsplit_meta.h>
|
694 |
+
#include <ATen/ops/hspmm_meta.h>
|
695 |
+
#include <ATen/ops/hstack_meta.h>
|
696 |
+
#include <ATen/ops/huber_loss_meta.h>
|
697 |
+
#include <ATen/ops/huber_loss_backward_meta.h>
|
698 |
+
#include <ATen/ops/hypot_meta.h>
|
699 |
+
#include <ATen/ops/i0_meta.h>
|
700 |
+
#include <ATen/ops/igamma_meta.h>
|
701 |
+
#include <ATen/ops/igammac_meta.h>
|
702 |
+
#include <ATen/ops/im2col_meta.h>
|
703 |
+
#include <ATen/ops/imag_meta.h>
|
704 |
+
#include <ATen/ops/index_meta.h>
|
705 |
+
#include <ATen/ops/index_add_meta.h>
|
706 |
+
#include <ATen/ops/index_copy_meta.h>
|
707 |
+
#include <ATen/ops/index_fill_meta.h>
|
708 |
+
#include <ATen/ops/index_put_meta.h>
|
709 |
+
#include <ATen/ops/index_reduce_meta.h>
|
710 |
+
#include <ATen/ops/index_select_meta.h>
|
711 |
+
#include <ATen/ops/index_select_backward_meta.h>
|
712 |
+
#include <ATen/ops/indices_meta.h>
|
713 |
+
#include <ATen/ops/indices_copy_meta.h>
|
714 |
+
#include <ATen/ops/infinitely_differentiable_gelu_backward_meta.h>
|
715 |
+
#include <ATen/ops/inner_meta.h>
|
716 |
+
#include <ATen/ops/instance_norm_meta.h>
|
717 |
+
#include <ATen/ops/int_repr_meta.h>
|
718 |
+
#include <ATen/ops/inverse_meta.h>
|
719 |
+
#include <ATen/ops/is_coalesced_meta.h>
|
720 |
+
#include <ATen/ops/is_complex_meta.h>
|
721 |
+
#include <ATen/ops/is_conj_meta.h>
|
722 |
+
#include <ATen/ops/is_distributed_meta.h>
|
723 |
+
#include <ATen/ops/is_floating_point_meta.h>
|
724 |
+
#include <ATen/ops/is_inference_meta.h>
|
725 |
+
#include <ATen/ops/is_leaf_meta.h>
|
726 |
+
#include <ATen/ops/is_neg_meta.h>
|
727 |
+
#include <ATen/ops/is_nonzero_meta.h>
|
728 |
+
#include <ATen/ops/is_pinned_meta.h>
|
729 |
+
#include <ATen/ops/is_same_size_meta.h>
|
730 |
+
#include <ATen/ops/is_set_to_meta.h>
|
731 |
+
#include <ATen/ops/is_signed_meta.h>
|
732 |
+
#include <ATen/ops/is_vulkan_available_meta.h>
|
733 |
+
#include <ATen/ops/isclose_meta.h>
|
734 |
+
#include <ATen/ops/isfinite_meta.h>
|
735 |
+
#include <ATen/ops/isin_meta.h>
|
736 |
+
#include <ATen/ops/isinf_meta.h>
|
737 |
+
#include <ATen/ops/isnan_meta.h>
|
738 |
+
#include <ATen/ops/isneginf_meta.h>
|
739 |
+
#include <ATen/ops/isposinf_meta.h>
|
740 |
+
#include <ATen/ops/isreal_meta.h>
|
741 |
+
#include <ATen/ops/istft_meta.h>
|
742 |
+
#include <ATen/ops/item_meta.h>
|
743 |
+
#include <ATen/ops/kaiser_window_meta.h>
|
744 |
+
#include <ATen/ops/kl_div_meta.h>
|
745 |
+
#include <ATen/ops/kron_meta.h>
|
746 |
+
#include <ATen/ops/kthvalue_meta.h>
|
747 |
+
#include <ATen/ops/l1_loss_meta.h>
|
748 |
+
#include <ATen/ops/layer_norm_meta.h>
|
749 |
+
#include <ATen/ops/lcm_meta.h>
|
750 |
+
#include <ATen/ops/ldexp_meta.h>
|
751 |
+
#include <ATen/ops/le_meta.h>
|
752 |
+
#include <ATen/ops/leaky_relu_meta.h>
|
753 |
+
#include <ATen/ops/leaky_relu_backward_meta.h>
|
754 |
+
#include <ATen/ops/lerp_meta.h>
|
755 |
+
#include <ATen/ops/less_meta.h>
|
756 |
+
#include <ATen/ops/less_equal_meta.h>
|
757 |
+
#include <ATen/ops/lgamma_meta.h>
|
758 |
+
#include <ATen/ops/lift_meta.h>
|
759 |
+
#include <ATen/ops/lift_fresh_meta.h>
|
760 |
+
#include <ATen/ops/lift_fresh_copy_meta.h>
|
761 |
+
#include <ATen/ops/linalg_cholesky_meta.h>
|
762 |
+
#include <ATen/ops/linalg_cholesky_ex_meta.h>
|
763 |
+
#include <ATen/ops/linalg_cond_meta.h>
|
764 |
+
#include <ATen/ops/linalg_cross_meta.h>
|
765 |
+
#include <ATen/ops/linalg_det_meta.h>
|
766 |
+
#include <ATen/ops/linalg_diagonal_meta.h>
|
767 |
+
#include <ATen/ops/linalg_eig_meta.h>
|
768 |
+
#include <ATen/ops/linalg_eigh_meta.h>
|
769 |
+
#include <ATen/ops/linalg_eigvals_meta.h>
|
770 |
+
#include <ATen/ops/linalg_eigvalsh_meta.h>
|
771 |
+
#include <ATen/ops/linalg_householder_product_meta.h>
|
772 |
+
#include <ATen/ops/linalg_inv_meta.h>
|
773 |
+
#include <ATen/ops/linalg_inv_ex_meta.h>
|
774 |
+
#include <ATen/ops/linalg_ldl_factor_meta.h>
|
775 |
+
#include <ATen/ops/linalg_ldl_factor_ex_meta.h>
|
776 |
+
#include <ATen/ops/linalg_ldl_solve_meta.h>
|
777 |
+
#include <ATen/ops/linalg_lstsq_meta.h>
|
778 |
+
#include <ATen/ops/linalg_lu_meta.h>
|
779 |
+
#include <ATen/ops/linalg_lu_factor_meta.h>
|
780 |
+
#include <ATen/ops/linalg_lu_factor_ex_meta.h>
|
781 |
+
#include <ATen/ops/linalg_lu_solve_meta.h>
|
782 |
+
#include <ATen/ops/linalg_matmul_meta.h>
|
783 |
+
#include <ATen/ops/linalg_matrix_exp_meta.h>
|
784 |
+
#include <ATen/ops/linalg_matrix_norm_meta.h>
|
785 |
+
#include <ATen/ops/linalg_matrix_power_meta.h>
|
786 |
+
#include <ATen/ops/linalg_matrix_rank_meta.h>
|
787 |
+
#include <ATen/ops/linalg_multi_dot_meta.h>
|
788 |
+
#include <ATen/ops/linalg_norm_meta.h>
|
789 |
+
#include <ATen/ops/linalg_pinv_meta.h>
|
790 |
+
#include <ATen/ops/linalg_qr_meta.h>
|
791 |
+
#include <ATen/ops/linalg_slogdet_meta.h>
|
792 |
+
#include <ATen/ops/linalg_solve_meta.h>
|
793 |
+
#include <ATen/ops/linalg_solve_ex_meta.h>
|
794 |
+
#include <ATen/ops/linalg_solve_triangular_meta.h>
|
795 |
+
#include <ATen/ops/linalg_svd_meta.h>
|
796 |
+
#include <ATen/ops/linalg_svdvals_meta.h>
|
797 |
+
#include <ATen/ops/linalg_tensorinv_meta.h>
|
798 |
+
#include <ATen/ops/linalg_tensorsolve_meta.h>
|
799 |
+
#include <ATen/ops/linalg_vander_meta.h>
|
800 |
+
#include <ATen/ops/linalg_vecdot_meta.h>
|
801 |
+
#include <ATen/ops/linalg_vector_norm_meta.h>
|
802 |
+
#include <ATen/ops/linear_meta.h>
|
803 |
+
#include <ATen/ops/linear_backward_meta.h>
|
804 |
+
#include <ATen/ops/linspace_meta.h>
|
805 |
+
#include <ATen/ops/log_meta.h>
|
806 |
+
#include <ATen/ops/log10_meta.h>
|
807 |
+
#include <ATen/ops/log1p_meta.h>
|
808 |
+
#include <ATen/ops/log2_meta.h>
|
809 |
+
#include <ATen/ops/log_normal_meta.h>
|
810 |
+
#include <ATen/ops/log_sigmoid_meta.h>
|
811 |
+
#include <ATen/ops/log_sigmoid_backward_meta.h>
|
812 |
+
#include <ATen/ops/log_sigmoid_forward_meta.h>
|
813 |
+
#include <ATen/ops/log_softmax_meta.h>
|
814 |
+
#include <ATen/ops/logaddexp_meta.h>
|
815 |
+
#include <ATen/ops/logaddexp2_meta.h>
|
816 |
+
#include <ATen/ops/logcumsumexp_meta.h>
|
817 |
+
#include <ATen/ops/logdet_meta.h>
|
818 |
+
#include <ATen/ops/logical_and_meta.h>
|
819 |
+
#include <ATen/ops/logical_not_meta.h>
|
820 |
+
#include <ATen/ops/logical_or_meta.h>
|
821 |
+
#include <ATen/ops/logical_xor_meta.h>
|
822 |
+
#include <ATen/ops/logit_meta.h>
|
823 |
+
#include <ATen/ops/logit_backward_meta.h>
|
824 |
+
#include <ATen/ops/logspace_meta.h>
|
825 |
+
#include <ATen/ops/logsumexp_meta.h>
|
826 |
+
#include <ATen/ops/lshift_meta.h>
|
827 |
+
#include <ATen/ops/lstm_meta.h>
|
828 |
+
#include <ATen/ops/lstm_cell_meta.h>
|
829 |
+
#include <ATen/ops/lstm_mps_backward_meta.h>
|
830 |
+
#include <ATen/ops/lt_meta.h>
|
831 |
+
#include <ATen/ops/lu_solve_meta.h>
|
832 |
+
#include <ATen/ops/lu_unpack_meta.h>
|
833 |
+
#include <ATen/ops/mH_meta.h>
|
834 |
+
#include <ATen/ops/mT_meta.h>
|
835 |
+
#include <ATen/ops/margin_ranking_loss_meta.h>
|
836 |
+
#include <ATen/ops/masked_fill_meta.h>
|
837 |
+
#include <ATen/ops/masked_scatter_meta.h>
|
838 |
+
#include <ATen/ops/masked_scatter_backward_meta.h>
|
839 |
+
#include <ATen/ops/masked_select_meta.h>
|
840 |
+
#include <ATen/ops/masked_select_backward_meta.h>
|
841 |
+
#include <ATen/ops/matmul_meta.h>
|
842 |
+
#include <ATen/ops/matmul_backward_meta.h>
|
843 |
+
#include <ATen/ops/matrix_H_meta.h>
|
844 |
+
#include <ATen/ops/matrix_exp_meta.h>
|
845 |
+
#include <ATen/ops/matrix_exp_backward_meta.h>
|
846 |
+
#include <ATen/ops/matrix_power_meta.h>
|
847 |
+
#include <ATen/ops/max_meta.h>
|
848 |
+
#include <ATen/ops/max_pool1d_meta.h>
|
849 |
+
#include <ATen/ops/max_pool1d_with_indices_meta.h>
|
850 |
+
#include <ATen/ops/max_pool2d_meta.h>
|
851 |
+
#include <ATen/ops/max_pool2d_backward_meta.h>
|
852 |
+
#include <ATen/ops/max_pool2d_with_indices_meta.h>
|
853 |
+
#include <ATen/ops/max_pool2d_with_indices_backward_meta.h>
|
854 |
+
#include <ATen/ops/max_pool3d_meta.h>
|
855 |
+
#include <ATen/ops/max_pool3d_with_indices_meta.h>
|
856 |
+
#include <ATen/ops/max_pool3d_with_indices_backward_meta.h>
|
857 |
+
#include <ATen/ops/max_unpool2d_meta.h>
|
858 |
+
#include <ATen/ops/max_unpool3d_meta.h>
|
859 |
+
#include <ATen/ops/maximum_meta.h>
|
860 |
+
#include <ATen/ops/mean_meta.h>
|
861 |
+
#include <ATen/ops/median_meta.h>
|
862 |
+
#include <ATen/ops/meshgrid_meta.h>
|
863 |
+
#include <ATen/ops/min_meta.h>
|
864 |
+
#include <ATen/ops/minimum_meta.h>
|
865 |
+
#include <ATen/ops/miopen_batch_norm_meta.h>
|
866 |
+
#include <ATen/ops/miopen_batch_norm_backward_meta.h>
|
867 |
+
#include <ATen/ops/miopen_convolution_meta.h>
|
868 |
+
#include <ATen/ops/miopen_convolution_add_relu_meta.h>
|
869 |
+
#include <ATen/ops/miopen_convolution_relu_meta.h>
|
870 |
+
#include <ATen/ops/miopen_convolution_transpose_meta.h>
|
871 |
+
#include <ATen/ops/miopen_depthwise_convolution_meta.h>
|
872 |
+
#include <ATen/ops/miopen_rnn_meta.h>
|
873 |
+
#include <ATen/ops/miopen_rnn_backward_meta.h>
|
874 |
+
#include <ATen/ops/mish_meta.h>
|
875 |
+
#include <ATen/ops/mish_backward_meta.h>
|
876 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_meta.h>
|
877 |
+
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_meta.h>
|
878 |
+
#include <ATen/ops/mkldnn_convolution_meta.h>
|
879 |
+
#include <ATen/ops/mkldnn_linear_meta.h>
|
880 |
+
#include <ATen/ops/mkldnn_linear_backward_meta.h>
|
881 |
+
#include <ATen/ops/mkldnn_linear_backward_input_meta.h>
|
882 |
+
#include <ATen/ops/mkldnn_linear_backward_weights_meta.h>
|
883 |
+
#include <ATen/ops/mkldnn_max_pool2d_meta.h>
|
884 |
+
#include <ATen/ops/mkldnn_max_pool2d_backward_meta.h>
|
885 |
+
#include <ATen/ops/mkldnn_max_pool3d_meta.h>
|
886 |
+
#include <ATen/ops/mkldnn_max_pool3d_backward_meta.h>
|
887 |
+
#include <ATen/ops/mkldnn_reorder_conv2d_weight_meta.h>
|
888 |
+
#include <ATen/ops/mkldnn_reorder_conv3d_weight_meta.h>
|
889 |
+
#include <ATen/ops/mkldnn_rnn_layer_meta.h>
|
890 |
+
#include <ATen/ops/mkldnn_rnn_layer_backward_meta.h>
|
891 |
+
#include <ATen/ops/mm_meta.h>
|
892 |
+
#include <ATen/ops/mode_meta.h>
|
893 |
+
#include <ATen/ops/moveaxis_meta.h>
|
894 |
+
#include <ATen/ops/movedim_meta.h>
|
895 |
+
#include <ATen/ops/mps_convolution_backward_meta.h>
|
896 |
+
#include <ATen/ops/mps_convolution_transpose_backward_meta.h>
|
897 |
+
#include <ATen/ops/mse_loss_meta.h>
|
898 |
+
#include <ATen/ops/mse_loss_backward_meta.h>
|
899 |
+
#include <ATen/ops/msort_meta.h>
|
900 |
+
#include <ATen/ops/mul_meta.h>
|
901 |
+
#include <ATen/ops/multi_margin_loss_meta.h>
|
902 |
+
#include <ATen/ops/multi_margin_loss_backward_meta.h>
|
903 |
+
#include <ATen/ops/multilabel_margin_loss_meta.h>
|
904 |
+
#include <ATen/ops/multilabel_margin_loss_backward_meta.h>
|
905 |
+
#include <ATen/ops/multilabel_margin_loss_forward_meta.h>
|
906 |
+
#include <ATen/ops/multinomial_meta.h>
|
907 |
+
#include <ATen/ops/multiply_meta.h>
|
908 |
+
#include <ATen/ops/mv_meta.h>
|
909 |
+
#include <ATen/ops/mvlgamma_meta.h>
|
910 |
+
#include <ATen/ops/nan_to_num_meta.h>
|
911 |
+
#include <ATen/ops/nanmean_meta.h>
|
912 |
+
#include <ATen/ops/nanmedian_meta.h>
|
913 |
+
#include <ATen/ops/nanquantile_meta.h>
|
914 |
+
#include <ATen/ops/nansum_meta.h>
|
915 |
+
#include <ATen/ops/narrow_meta.h>
|
916 |
+
#include <ATen/ops/narrow_copy_meta.h>
|
917 |
+
#include <ATen/ops/native_batch_norm_meta.h>
|
918 |
+
#include <ATen/ops/native_batch_norm_backward_meta.h>
|
919 |
+
#include <ATen/ops/native_channel_shuffle_meta.h>
|
920 |
+
#include <ATen/ops/native_dropout_meta.h>
|
921 |
+
#include <ATen/ops/native_dropout_backward_meta.h>
|
922 |
+
#include <ATen/ops/native_group_norm_meta.h>
|
923 |
+
#include <ATen/ops/native_group_norm_backward_meta.h>
|
924 |
+
#include <ATen/ops/native_layer_norm_meta.h>
|
925 |
+
#include <ATen/ops/native_layer_norm_backward_meta.h>
|
926 |
+
#include <ATen/ops/native_norm_meta.h>
|
927 |
+
#include <ATen/ops/ne_meta.h>
|
928 |
+
#include <ATen/ops/neg_meta.h>
|
929 |
+
#include <ATen/ops/negative_meta.h>
|
930 |
+
#include <ATen/ops/nested_to_padded_tensor_meta.h>
|
931 |
+
#include <ATen/ops/new_empty_meta.h>
|
932 |
+
#include <ATen/ops/new_empty_strided_meta.h>
|
933 |
+
#include <ATen/ops/new_full_meta.h>
|
934 |
+
#include <ATen/ops/new_ones_meta.h>
|
935 |
+
#include <ATen/ops/new_zeros_meta.h>
|
936 |
+
#include <ATen/ops/nextafter_meta.h>
|
937 |
+
#include <ATen/ops/nll_loss_meta.h>
|
938 |
+
#include <ATen/ops/nll_loss2d_meta.h>
|
939 |
+
#include <ATen/ops/nll_loss2d_backward_meta.h>
|
940 |
+
#include <ATen/ops/nll_loss2d_forward_meta.h>
|
941 |
+
#include <ATen/ops/nll_loss_backward_meta.h>
|
942 |
+
#include <ATen/ops/nll_loss_forward_meta.h>
|
943 |
+
#include <ATen/ops/nll_loss_nd_meta.h>
|
944 |
+
#include <ATen/ops/nonzero_meta.h>
|
945 |
+
#include <ATen/ops/nonzero_numpy_meta.h>
|
946 |
+
#include <ATen/ops/nonzero_static_meta.h>
|
947 |
+
#include <ATen/ops/norm_meta.h>
|
948 |
+
#include <ATen/ops/norm_except_dim_meta.h>
|
949 |
+
#include <ATen/ops/normal_meta.h>
|
950 |
+
#include <ATen/ops/not_equal_meta.h>
|
951 |
+
#include <ATen/ops/nuclear_norm_meta.h>
|
952 |
+
#include <ATen/ops/numpy_T_meta.h>
|
953 |
+
#include <ATen/ops/one_hot_meta.h>
|
954 |
+
#include <ATen/ops/ones_meta.h>
|
955 |
+
#include <ATen/ops/ones_like_meta.h>
|
956 |
+
#include <ATen/ops/or_meta.h>
|
957 |
+
#include <ATen/ops/orgqr_meta.h>
|
958 |
+
#include <ATen/ops/ormqr_meta.h>
|
959 |
+
#include <ATen/ops/outer_meta.h>
|
960 |
+
#include <ATen/ops/output_nr_meta.h>
|
961 |
+
#include <ATen/ops/pad_meta.h>
|
962 |
+
#include <ATen/ops/pad_sequence_meta.h>
|
963 |
+
#include <ATen/ops/pairwise_distance_meta.h>
|
964 |
+
#include <ATen/ops/pdist_meta.h>
|
965 |
+
#include <ATen/ops/permute_meta.h>
|
966 |
+
#include <ATen/ops/permute_copy_meta.h>
|
967 |
+
#include <ATen/ops/pin_memory_meta.h>
|
968 |
+
#include <ATen/ops/pinverse_meta.h>
|
969 |
+
#include <ATen/ops/pixel_shuffle_meta.h>
|
970 |
+
#include <ATen/ops/pixel_unshuffle_meta.h>
|
971 |
+
#include <ATen/ops/poisson_meta.h>
|
972 |
+
#include <ATen/ops/poisson_nll_loss_meta.h>
|
973 |
+
#include <ATen/ops/polar_meta.h>
|
974 |
+
#include <ATen/ops/polygamma_meta.h>
|
975 |
+
#include <ATen/ops/positive_meta.h>
|
976 |
+
#include <ATen/ops/pow_meta.h>
|
977 |
+
#include <ATen/ops/prelu_meta.h>
|
978 |
+
#include <ATen/ops/prod_meta.h>
|
979 |
+
#include <ATen/ops/promote_types_meta.h>
|
980 |
+
#include <ATen/ops/put_meta.h>
|
981 |
+
#include <ATen/ops/q_per_channel_axis_meta.h>
|
982 |
+
#include <ATen/ops/q_per_channel_scales_meta.h>
|
983 |
+
#include <ATen/ops/q_per_channel_zero_points_meta.h>
|
984 |
+
#include <ATen/ops/q_scale_meta.h>
|
985 |
+
#include <ATen/ops/q_zero_point_meta.h>
|
986 |
+
#include <ATen/ops/qr_meta.h>
|
987 |
+
#include <ATen/ops/qscheme_meta.h>
|
988 |
+
#include <ATen/ops/quantile_meta.h>
|
989 |
+
#include <ATen/ops/quantize_per_channel_meta.h>
|
990 |
+
#include <ATen/ops/quantize_per_tensor_meta.h>
|
991 |
+
#include <ATen/ops/quantize_per_tensor_dynamic_meta.h>
|
992 |
+
#include <ATen/ops/quantized_batch_norm_meta.h>
|
993 |
+
#include <ATen/ops/quantized_gru_cell_meta.h>
|
994 |
+
#include <ATen/ops/quantized_lstm_cell_meta.h>
|
995 |
+
#include <ATen/ops/quantized_max_pool1d_meta.h>
|
996 |
+
#include <ATen/ops/quantized_max_pool2d_meta.h>
|
997 |
+
#include <ATen/ops/quantized_max_pool3d_meta.h>
|
998 |
+
#include <ATen/ops/quantized_rnn_relu_cell_meta.h>
|
999 |
+
#include <ATen/ops/quantized_rnn_tanh_cell_meta.h>
|
1000 |
+
#include <ATen/ops/rad2deg_meta.h>
|
1001 |
+
#include <ATen/ops/rand_meta.h>
|
1002 |
+
#include <ATen/ops/rand_like_meta.h>
|
1003 |
+
#include <ATen/ops/randint_meta.h>
|
1004 |
+
#include <ATen/ops/randint_like_meta.h>
|
1005 |
+
#include <ATen/ops/randn_meta.h>
|
1006 |
+
#include <ATen/ops/randn_like_meta.h>
|
1007 |
+
#include <ATen/ops/random_meta.h>
|
1008 |
+
#include <ATen/ops/randperm_meta.h>
|
1009 |
+
#include <ATen/ops/range_meta.h>
|
1010 |
+
#include <ATen/ops/ravel_meta.h>
|
1011 |
+
#include <ATen/ops/real_meta.h>
|
1012 |
+
#include <ATen/ops/reciprocal_meta.h>
|
1013 |
+
#include <ATen/ops/record_stream_meta.h>
|
1014 |
+
#include <ATen/ops/refine_names_meta.h>
|
1015 |
+
#include <ATen/ops/reflection_pad1d_meta.h>
|
1016 |
+
#include <ATen/ops/reflection_pad1d_backward_meta.h>
|
1017 |
+
#include <ATen/ops/reflection_pad2d_meta.h>
|
1018 |
+
#include <ATen/ops/reflection_pad2d_backward_meta.h>
|
1019 |
+
#include <ATen/ops/reflection_pad3d_meta.h>
|
1020 |
+
#include <ATen/ops/reflection_pad3d_backward_meta.h>
|
1021 |
+
#include <ATen/ops/relu_meta.h>
|
1022 |
+
#include <ATen/ops/relu6_meta.h>
|
1023 |
+
#include <ATen/ops/remainder_meta.h>
|
1024 |
+
#include <ATen/ops/rename_meta.h>
|
1025 |
+
#include <ATen/ops/renorm_meta.h>
|
1026 |
+
#include <ATen/ops/repeat_meta.h>
|
1027 |
+
#include <ATen/ops/repeat_interleave_meta.h>
|
1028 |
+
#include <ATen/ops/replication_pad1d_meta.h>
|
1029 |
+
#include <ATen/ops/replication_pad1d_backward_meta.h>
|
1030 |
+
#include <ATen/ops/replication_pad2d_meta.h>
|
1031 |
+
#include <ATen/ops/replication_pad2d_backward_meta.h>
|
1032 |
+
#include <ATen/ops/replication_pad3d_meta.h>
|
1033 |
+
#include <ATen/ops/replication_pad3d_backward_meta.h>
|
1034 |
+
#include <ATen/ops/requires_grad_meta.h>
|
1035 |
+
#include <ATen/ops/reshape_meta.h>
|
1036 |
+
#include <ATen/ops/reshape_as_meta.h>
|
1037 |
+
#include <ATen/ops/resize_meta.h>
|
1038 |
+
#include <ATen/ops/resize_as_meta.h>
|
1039 |
+
#include <ATen/ops/resize_as_sparse_meta.h>
|
1040 |
+
#include <ATen/ops/resolve_conj_meta.h>
|
1041 |
+
#include <ATen/ops/resolve_neg_meta.h>
|
1042 |
+
#include <ATen/ops/result_type_meta.h>
|
1043 |
+
#include <ATen/ops/retain_grad_meta.h>
|
1044 |
+
#include <ATen/ops/retains_grad_meta.h>
|
1045 |
+
#include <ATen/ops/rnn_relu_meta.h>
|
1046 |
+
#include <ATen/ops/rnn_relu_cell_meta.h>
|
1047 |
+
#include <ATen/ops/rnn_tanh_meta.h>
|
1048 |
+
#include <ATen/ops/rnn_tanh_cell_meta.h>
|
1049 |
+
#include <ATen/ops/roll_meta.h>
|
1050 |
+
#include <ATen/ops/rot90_meta.h>
|
1051 |
+
#include <ATen/ops/round_meta.h>
|
1052 |
+
#include <ATen/ops/row_indices_meta.h>
|
1053 |
+
#include <ATen/ops/row_indices_copy_meta.h>
|
1054 |
+
#include <ATen/ops/row_stack_meta.h>
|
1055 |
+
#include <ATen/ops/rrelu_meta.h>
|
1056 |
+
#include <ATen/ops/rrelu_with_noise_meta.h>
|
1057 |
+
#include <ATen/ops/rrelu_with_noise_backward_meta.h>
|
1058 |
+
#include <ATen/ops/rshift_meta.h>
|
1059 |
+
#include <ATen/ops/rsqrt_meta.h>
|
1060 |
+
#include <ATen/ops/rsub_meta.h>
|
1061 |
+
#include <ATen/ops/scalar_tensor_meta.h>
|
1062 |
+
#include <ATen/ops/scaled_dot_product_attention_meta.h>
|
1063 |
+
#include <ATen/ops/scatter_meta.h>
|
1064 |
+
#include <ATen/ops/scatter_add_meta.h>
|
1065 |
+
#include <ATen/ops/scatter_reduce_meta.h>
|
1066 |
+
#include <ATen/ops/searchsorted_meta.h>
|
1067 |
+
#include <ATen/ops/segment_reduce_meta.h>
|
1068 |
+
#include <ATen/ops/select_meta.h>
|
1069 |
+
#include <ATen/ops/select_backward_meta.h>
|
1070 |
+
#include <ATen/ops/select_copy_meta.h>
|
1071 |
+
#include <ATen/ops/select_scatter_meta.h>
|
1072 |
+
#include <ATen/ops/selu_meta.h>
|
1073 |
+
#include <ATen/ops/set_meta.h>
|
1074 |
+
#include <ATen/ops/set_data_meta.h>
|
1075 |
+
#include <ATen/ops/sgn_meta.h>
|
1076 |
+
#include <ATen/ops/sigmoid_meta.h>
|
1077 |
+
#include <ATen/ops/sigmoid_backward_meta.h>
|
1078 |
+
#include <ATen/ops/sign_meta.h>
|
1079 |
+
#include <ATen/ops/signbit_meta.h>
|
1080 |
+
#include <ATen/ops/silu_meta.h>
|
1081 |
+
#include <ATen/ops/silu_backward_meta.h>
|
1082 |
+
#include <ATen/ops/sin_meta.h>
|
1083 |
+
#include <ATen/ops/sinc_meta.h>
|
1084 |
+
#include <ATen/ops/sinh_meta.h>
|
1085 |
+
#include <ATen/ops/size_meta.h>
|
1086 |
+
#include <ATen/ops/slice_meta.h>
|
1087 |
+
#include <ATen/ops/slice_backward_meta.h>
|
1088 |
+
#include <ATen/ops/slice_copy_meta.h>
|
1089 |
+
#include <ATen/ops/slice_inverse_meta.h>
|
1090 |
+
#include <ATen/ops/slice_scatter_meta.h>
|
1091 |
+
#include <ATen/ops/slogdet_meta.h>
|
1092 |
+
#include <ATen/ops/slow_conv3d_meta.h>
|
1093 |
+
#include <ATen/ops/slow_conv3d_forward_meta.h>
|
1094 |
+
#include <ATen/ops/slow_conv_dilated2d_meta.h>
|
1095 |
+
#include <ATen/ops/slow_conv_dilated3d_meta.h>
|
1096 |
+
#include <ATen/ops/slow_conv_transpose2d_meta.h>
|
1097 |
+
#include <ATen/ops/slow_conv_transpose3d_meta.h>
|
1098 |
+
#include <ATen/ops/smm_meta.h>
|
1099 |
+
#include <ATen/ops/smooth_l1_loss_meta.h>
|
1100 |
+
#include <ATen/ops/smooth_l1_loss_backward_meta.h>
|
1101 |
+
#include <ATen/ops/soft_margin_loss_meta.h>
|
1102 |
+
#include <ATen/ops/soft_margin_loss_backward_meta.h>
|
1103 |
+
#include <ATen/ops/softmax_meta.h>
|
1104 |
+
#include <ATen/ops/softplus_meta.h>
|
1105 |
+
#include <ATen/ops/softplus_backward_meta.h>
|
1106 |
+
#include <ATen/ops/softshrink_meta.h>
|
1107 |
+
#include <ATen/ops/softshrink_backward_meta.h>
|
1108 |
+
#include <ATen/ops/sort_meta.h>
|
1109 |
+
#include <ATen/ops/sparse_bsc_tensor_meta.h>
|
1110 |
+
#include <ATen/ops/sparse_bsr_tensor_meta.h>
|
1111 |
+
#include <ATen/ops/sparse_compressed_tensor_meta.h>
|
1112 |
+
#include <ATen/ops/sparse_coo_tensor_meta.h>
|
1113 |
+
#include <ATen/ops/sparse_csc_tensor_meta.h>
|
1114 |
+
#include <ATen/ops/sparse_csr_tensor_meta.h>
|
1115 |
+
#include <ATen/ops/sparse_dim_meta.h>
|
1116 |
+
#include <ATen/ops/sparse_mask_meta.h>
|
1117 |
+
#include <ATen/ops/sparse_resize_meta.h>
|
1118 |
+
#include <ATen/ops/sparse_resize_and_clear_meta.h>
|
1119 |
+
#include <ATen/ops/sparse_sampled_addmm_meta.h>
|
1120 |
+
#include <ATen/ops/special_airy_ai_meta.h>
|
1121 |
+
#include <ATen/ops/special_bessel_j0_meta.h>
|
1122 |
+
#include <ATen/ops/special_bessel_j1_meta.h>
|
1123 |
+
#include <ATen/ops/special_bessel_y0_meta.h>
|
1124 |
+
#include <ATen/ops/special_bessel_y1_meta.h>
|
1125 |
+
#include <ATen/ops/special_chebyshev_polynomial_t_meta.h>
|
1126 |
+
#include <ATen/ops/special_chebyshev_polynomial_u_meta.h>
|
1127 |
+
#include <ATen/ops/special_chebyshev_polynomial_v_meta.h>
|
1128 |
+
#include <ATen/ops/special_chebyshev_polynomial_w_meta.h>
|
1129 |
+
#include <ATen/ops/special_digamma_meta.h>
|
1130 |
+
#include <ATen/ops/special_entr_meta.h>
|
1131 |
+
#include <ATen/ops/special_erf_meta.h>
|
1132 |
+
#include <ATen/ops/special_erfc_meta.h>
|
1133 |
+
#include <ATen/ops/special_erfcx_meta.h>
|
1134 |
+
#include <ATen/ops/special_erfinv_meta.h>
|
1135 |
+
#include <ATen/ops/special_exp2_meta.h>
|
1136 |
+
#include <ATen/ops/special_expit_meta.h>
|
1137 |
+
#include <ATen/ops/special_expm1_meta.h>
|
1138 |
+
#include <ATen/ops/special_gammainc_meta.h>
|
1139 |
+
#include <ATen/ops/special_gammaincc_meta.h>
|
1140 |
+
#include <ATen/ops/special_gammaln_meta.h>
|
1141 |
+
#include <ATen/ops/special_hermite_polynomial_h_meta.h>
|
1142 |
+
#include <ATen/ops/special_hermite_polynomial_he_meta.h>
|
1143 |
+
#include <ATen/ops/special_i0_meta.h>
|
1144 |
+
#include <ATen/ops/special_i0e_meta.h>
|
1145 |
+
#include <ATen/ops/special_i1_meta.h>
|
1146 |
+
#include <ATen/ops/special_i1e_meta.h>
|
1147 |
+
#include <ATen/ops/special_laguerre_polynomial_l_meta.h>
|
1148 |
+
#include <ATen/ops/special_legendre_polynomial_p_meta.h>
|
1149 |
+
#include <ATen/ops/special_log1p_meta.h>
|
1150 |
+
#include <ATen/ops/special_log_ndtr_meta.h>
|
1151 |
+
#include <ATen/ops/special_log_softmax_meta.h>
|
1152 |
+
#include <ATen/ops/special_logit_meta.h>
|
1153 |
+
#include <ATen/ops/special_logsumexp_meta.h>
|
1154 |
+
#include <ATen/ops/special_modified_bessel_i0_meta.h>
|
1155 |
+
#include <ATen/ops/special_modified_bessel_i1_meta.h>
|
1156 |
+
#include <ATen/ops/special_modified_bessel_k0_meta.h>
|
1157 |
+
#include <ATen/ops/special_modified_bessel_k1_meta.h>
|
1158 |
+
#include <ATen/ops/special_multigammaln_meta.h>
|
1159 |
+
#include <ATen/ops/special_ndtr_meta.h>
|
1160 |
+
#include <ATen/ops/special_ndtri_meta.h>
|
1161 |
+
#include <ATen/ops/special_polygamma_meta.h>
|
1162 |
+
#include <ATen/ops/special_psi_meta.h>
|
1163 |
+
#include <ATen/ops/special_round_meta.h>
|
1164 |
+
#include <ATen/ops/special_scaled_modified_bessel_k0_meta.h>
|
1165 |
+
#include <ATen/ops/special_scaled_modified_bessel_k1_meta.h>
|
1166 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_meta.h>
|
1167 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_meta.h>
|
1168 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_meta.h>
|
1169 |
+
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_meta.h>
|
1170 |
+
#include <ATen/ops/special_sinc_meta.h>
|
1171 |
+
#include <ATen/ops/special_softmax_meta.h>
|
1172 |
+
#include <ATen/ops/special_spherical_bessel_j0_meta.h>
|
1173 |
+
#include <ATen/ops/special_xlog1py_meta.h>
|
1174 |
+
#include <ATen/ops/special_xlogy_meta.h>
|
1175 |
+
#include <ATen/ops/special_zeta_meta.h>
|
1176 |
+
#include <ATen/ops/split_meta.h>
|
1177 |
+
#include <ATen/ops/split_copy_meta.h>
|
1178 |
+
#include <ATen/ops/split_with_sizes_meta.h>
|
1179 |
+
#include <ATen/ops/split_with_sizes_copy_meta.h>
|
1180 |
+
#include <ATen/ops/sqrt_meta.h>
|
1181 |
+
#include <ATen/ops/square_meta.h>
|
1182 |
+
#include <ATen/ops/squeeze_meta.h>
|
1183 |
+
#include <ATen/ops/squeeze_copy_meta.h>
|
1184 |
+
#include <ATen/ops/sspaddmm_meta.h>
|
1185 |
+
#include <ATen/ops/stack_meta.h>
|
1186 |
+
#include <ATen/ops/std_meta.h>
|
1187 |
+
#include <ATen/ops/std_mean_meta.h>
|
1188 |
+
#include <ATen/ops/stft_meta.h>
|
1189 |
+
#include <ATen/ops/stride_meta.h>
|
1190 |
+
#include <ATen/ops/sub_meta.h>
|
1191 |
+
#include <ATen/ops/subtract_meta.h>
|
1192 |
+
#include <ATen/ops/sum_meta.h>
|
1193 |
+
#include <ATen/ops/sum_to_size_meta.h>
|
1194 |
+
#include <ATen/ops/svd_meta.h>
|
1195 |
+
#include <ATen/ops/swapaxes_meta.h>
|
1196 |
+
#include <ATen/ops/swapdims_meta.h>
|
1197 |
+
#include <ATen/ops/sym_constrain_range_meta.h>
|
1198 |
+
#include <ATen/ops/sym_constrain_range_for_size_meta.h>
|
1199 |
+
#include <ATen/ops/sym_numel_meta.h>
|
1200 |
+
#include <ATen/ops/sym_size_meta.h>
|
1201 |
+
#include <ATen/ops/sym_storage_offset_meta.h>
|
1202 |
+
#include <ATen/ops/sym_stride_meta.h>
|
1203 |
+
#include <ATen/ops/t_meta.h>
|
1204 |
+
#include <ATen/ops/t_copy_meta.h>
|
1205 |
+
#include <ATen/ops/take_meta.h>
|
1206 |
+
#include <ATen/ops/take_along_dim_meta.h>
|
1207 |
+
#include <ATen/ops/tan_meta.h>
|
1208 |
+
#include <ATen/ops/tanh_meta.h>
|
1209 |
+
#include <ATen/ops/tanh_backward_meta.h>
|
1210 |
+
#include <ATen/ops/tensor_split_meta.h>
|
1211 |
+
#include <ATen/ops/tensordot_meta.h>
|
1212 |
+
#include <ATen/ops/thnn_conv2d_meta.h>
|
1213 |
+
#include <ATen/ops/threshold_meta.h>
|
1214 |
+
#include <ATen/ops/threshold_backward_meta.h>
|
1215 |
+
#include <ATen/ops/tile_meta.h>
|
1216 |
+
#include <ATen/ops/to_meta.h>
|
1217 |
+
#include <ATen/ops/to_dense_meta.h>
|
1218 |
+
#include <ATen/ops/to_dense_backward_meta.h>
|
1219 |
+
#include <ATen/ops/to_mkldnn_meta.h>
|
1220 |
+
#include <ATen/ops/to_mkldnn_backward_meta.h>
|
1221 |
+
#include <ATen/ops/to_padded_tensor_meta.h>
|
1222 |
+
#include <ATen/ops/to_sparse_meta.h>
|
1223 |
+
#include <ATen/ops/to_sparse_bsc_meta.h>
|
1224 |
+
#include <ATen/ops/to_sparse_bsr_meta.h>
|
1225 |
+
#include <ATen/ops/to_sparse_csc_meta.h>
|
1226 |
+
#include <ATen/ops/to_sparse_csr_meta.h>
|
1227 |
+
#include <ATen/ops/topk_meta.h>
|
1228 |
+
#include <ATen/ops/trace_meta.h>
|
1229 |
+
#include <ATen/ops/trace_backward_meta.h>
|
1230 |
+
#include <ATen/ops/transpose_meta.h>
|
1231 |
+
#include <ATen/ops/transpose_copy_meta.h>
|
1232 |
+
#include <ATen/ops/trapezoid_meta.h>
|
1233 |
+
#include <ATen/ops/trapz_meta.h>
|
1234 |
+
#include <ATen/ops/triangular_solve_meta.h>
|
1235 |
+
#include <ATen/ops/tril_meta.h>
|
1236 |
+
#include <ATen/ops/tril_indices_meta.h>
|
1237 |
+
#include <ATen/ops/triplet_margin_loss_meta.h>
|
1238 |
+
#include <ATen/ops/triu_meta.h>
|
1239 |
+
#include <ATen/ops/triu_indices_meta.h>
|
1240 |
+
#include <ATen/ops/true_divide_meta.h>
|
1241 |
+
#include <ATen/ops/trunc_meta.h>
|
1242 |
+
#include <ATen/ops/type_as_meta.h>
|
1243 |
+
#include <ATen/ops/unbind_meta.h>
|
1244 |
+
#include <ATen/ops/unbind_copy_meta.h>
|
1245 |
+
#include <ATen/ops/unflatten_meta.h>
|
1246 |
+
#include <ATen/ops/unflatten_dense_tensors_meta.h>
|
1247 |
+
#include <ATen/ops/unfold_meta.h>
|
1248 |
+
#include <ATen/ops/unfold_backward_meta.h>
|
1249 |
+
#include <ATen/ops/unfold_copy_meta.h>
|
1250 |
+
#include <ATen/ops/uniform_meta.h>
|
1251 |
+
#include <ATen/ops/unique_consecutive_meta.h>
|
1252 |
+
#include <ATen/ops/unique_dim_meta.h>
|
1253 |
+
#include <ATen/ops/unique_dim_consecutive_meta.h>
|
1254 |
+
#include <ATen/ops/unsafe_chunk_meta.h>
|
1255 |
+
#include <ATen/ops/unsafe_split_meta.h>
|
1256 |
+
#include <ATen/ops/unsafe_split_with_sizes_meta.h>
|
1257 |
+
#include <ATen/ops/unsqueeze_meta.h>
|
1258 |
+
#include <ATen/ops/unsqueeze_copy_meta.h>
|
1259 |
+
#include <ATen/ops/upsample_bicubic2d_meta.h>
|
1260 |
+
#include <ATen/ops/upsample_bicubic2d_backward_meta.h>
|
1261 |
+
#include <ATen/ops/upsample_bilinear2d_meta.h>
|
1262 |
+
#include <ATen/ops/upsample_bilinear2d_backward_meta.h>
|
1263 |
+
#include <ATen/ops/upsample_linear1d_meta.h>
|
1264 |
+
#include <ATen/ops/upsample_linear1d_backward_meta.h>
|
1265 |
+
#include <ATen/ops/upsample_nearest1d_meta.h>
|
1266 |
+
#include <ATen/ops/upsample_nearest1d_backward_meta.h>
|
1267 |
+
#include <ATen/ops/upsample_nearest2d_meta.h>
|
1268 |
+
#include <ATen/ops/upsample_nearest2d_backward_meta.h>
|
1269 |
+
#include <ATen/ops/upsample_nearest3d_meta.h>
|
1270 |
+
#include <ATen/ops/upsample_nearest3d_backward_meta.h>
|
1271 |
+
#include <ATen/ops/upsample_trilinear3d_meta.h>
|
1272 |
+
#include <ATen/ops/upsample_trilinear3d_backward_meta.h>
|
1273 |
+
#include <ATen/ops/value_selecting_reduction_backward_meta.h>
|
1274 |
+
#include <ATen/ops/values_meta.h>
|
1275 |
+
#include <ATen/ops/values_copy_meta.h>
|
1276 |
+
#include <ATen/ops/vander_meta.h>
|
1277 |
+
#include <ATen/ops/var_meta.h>
|
1278 |
+
#include <ATen/ops/var_mean_meta.h>
|
1279 |
+
#include <ATen/ops/vdot_meta.h>
|
1280 |
+
#include <ATen/ops/view_meta.h>
|
1281 |
+
#include <ATen/ops/view_as_meta.h>
|
1282 |
+
#include <ATen/ops/view_as_complex_meta.h>
|
1283 |
+
#include <ATen/ops/view_as_complex_copy_meta.h>
|
1284 |
+
#include <ATen/ops/view_as_real_meta.h>
|
1285 |
+
#include <ATen/ops/view_as_real_copy_meta.h>
|
1286 |
+
#include <ATen/ops/view_copy_meta.h>
|
1287 |
+
#include <ATen/ops/vsplit_meta.h>
|
1288 |
+
#include <ATen/ops/vstack_meta.h>
|
1289 |
+
#include <ATen/ops/where_meta.h>
|
1290 |
+
#include <ATen/ops/xlogy_meta.h>
|
1291 |
+
#include <ATen/ops/xor_meta.h>
|
1292 |
+
#include <ATen/ops/zero_meta.h>
|
1293 |
+
#include <ATen/ops/zeros_meta.h>
|
1294 |
+
#include <ATen/ops/zeros_like_meta.h>
|
1295 |
+
|
1296 |
+
namespace at {
|
1297 |
+
|
1298 |
+
namespace meta {
|
1299 |
+
|
1300 |
+
|
1301 |
+
|
1302 |
+
} // namespace meta
|
1303 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#ifdef __HIPCC__
|
4 |
+
#include <hip/hip_runtime.h>
|
5 |
+
#endif
|
6 |
+
|
7 |
+
#include <c10/macros/Macros.h>
|
8 |
+
#include <c10/util/BFloat16.h>
|
9 |
+
#include <c10/util/Float8_e4m3fn.h>
|
10 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
11 |
+
#include <c10/util/Float8_e5m2.h>
|
12 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
13 |
+
#include <c10/util/Half.h>
|
14 |
+
#include <c10/util/complex.h>
|
15 |
+
|
16 |
+
#include <cmath>
|
17 |
+
#include <type_traits>
|
18 |
+
|
19 |
+
namespace at {
|
20 |
+
|
21 |
+
// std::isnan isn't performant to use on integral types; it will
|
22 |
+
// (uselessly) convert to floating point and then do the test.
|
23 |
+
// This function is.
|
24 |
+
|
25 |
+
template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
|
26 |
+
inline C10_HOST_DEVICE bool _isnan(T /*val*/) {
|
27 |
+
return false;
|
28 |
+
}
|
29 |
+
|
30 |
+
template <typename T, std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
|
31 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
32 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
33 |
+
return ::isnan(val);
|
34 |
+
#else
|
35 |
+
return std::isnan(val);
|
36 |
+
#endif
|
37 |
+
}
|
38 |
+
|
39 |
+
template <typename T, std::enable_if_t<c10::is_complex<T>::value, int> = 0>
|
40 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
41 |
+
return std::isnan(val.real()) || std::isnan(val.imag());
|
42 |
+
}
|
43 |
+
|
44 |
+
template <typename T, std::enable_if_t<std::is_same_v<T, at::Half>, int> = 0>
|
45 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
46 |
+
return at::_isnan(static_cast<float>(val));
|
47 |
+
}
|
48 |
+
|
49 |
+
template <
|
50 |
+
typename T,
|
51 |
+
std::enable_if_t<std::is_same_v<T, at::BFloat16>, int> = 0>
|
52 |
+
inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) {
|
53 |
+
return at::_isnan(static_cast<float>(val));
|
54 |
+
}
|
55 |
+
|
56 |
+
inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) {
|
57 |
+
return at::_isnan(static_cast<float>(val));
|
58 |
+
}
|
59 |
+
|
60 |
+
template <
|
61 |
+
typename T,
|
62 |
+
std::enable_if_t<std::is_same_v<T, at::Float8_e5m2>, int> = 0>
|
63 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
64 |
+
return val.isnan();
|
65 |
+
}
|
66 |
+
|
67 |
+
template <
|
68 |
+
typename T,
|
69 |
+
std::enable_if_t<std::is_same_v<T, at::Float8_e4m3fn>, int> = 0>
|
70 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
71 |
+
return val.isnan();
|
72 |
+
}
|
73 |
+
|
74 |
+
template <
|
75 |
+
typename T,
|
76 |
+
std::enable_if_t<std::is_same_v<T, at::Float8_e5m2fnuz>, int> = 0>
|
77 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
78 |
+
return val.isnan();
|
79 |
+
}
|
80 |
+
|
81 |
+
template <
|
82 |
+
typename T,
|
83 |
+
std::enable_if_t<std::is_same_v<T, at::Float8_e4m3fnuz>, int> = 0>
|
84 |
+
inline C10_HOST_DEVICE bool _isnan(T val) {
|
85 |
+
return val.isnan();
|
86 |
+
}
|
87 |
+
|
88 |
+
// std::isinf isn't performant to use on integral types; it will
|
89 |
+
// (uselessly) convert to floating point and then do the test.
|
90 |
+
// This function is.
|
91 |
+
|
92 |
+
template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
|
93 |
+
inline C10_HOST_DEVICE bool _isinf(T /*val*/) {
|
94 |
+
return false;
|
95 |
+
}
|
96 |
+
|
97 |
+
template <typename T, std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
|
98 |
+
inline C10_HOST_DEVICE bool _isinf(T val) {
|
99 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
100 |
+
return ::isinf(val);
|
101 |
+
#else
|
102 |
+
return std::isinf(val);
|
103 |
+
#endif
|
104 |
+
}
|
105 |
+
|
106 |
+
inline C10_HOST_DEVICE bool _isinf(at::Half val) {
|
107 |
+
return at::_isinf(static_cast<float>(val));
|
108 |
+
}
|
109 |
+
|
110 |
+
inline C10_HOST_DEVICE bool _isinf(at::BFloat16 val) {
|
111 |
+
return at::_isinf(static_cast<float>(val));
|
112 |
+
}
|
113 |
+
|
114 |
+
inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2 val) {
|
115 |
+
return val.isinf();
|
116 |
+
}
|
117 |
+
|
118 |
+
inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fn val) {
|
119 |
+
return false;
|
120 |
+
}
|
121 |
+
|
122 |
+
inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2fnuz val) {
|
123 |
+
return false;
|
124 |
+
}
|
125 |
+
|
126 |
+
inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fnuz val) {
|
127 |
+
return false;
|
128 |
+
}
|
129 |
+
|
130 |
+
template <typename T>
|
131 |
+
C10_HOST_DEVICE inline T exp(T x) {
|
132 |
+
static_assert(
|
133 |
+
!std::is_same_v<T, double>,
|
134 |
+
"this template must be used with float or less precise type");
|
135 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
|
136 |
+
// use __expf fast approximation for peak bandwidth
|
137 |
+
return __expf(x);
|
138 |
+
#else
|
139 |
+
return ::exp(x);
|
140 |
+
#endif
|
141 |
+
}
|
142 |
+
|
143 |
+
template <>
|
144 |
+
C10_HOST_DEVICE inline double exp<double>(double x) {
|
145 |
+
return ::exp(x);
|
146 |
+
}
|
147 |
+
|
148 |
+
template <typename T>
|
149 |
+
C10_HOST_DEVICE inline T log(T x) {
|
150 |
+
static_assert(
|
151 |
+
!std::is_same_v<T, double>,
|
152 |
+
"this template must be used with float or less precise type");
|
153 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
|
154 |
+
// use __logf fast approximation for peak bandwidth
|
155 |
+
return __logf(x);
|
156 |
+
#else
|
157 |
+
return ::log(x);
|
158 |
+
#endif
|
159 |
+
}
|
160 |
+
|
161 |
+
template <>
|
162 |
+
C10_HOST_DEVICE inline double log<double>(double x) {
|
163 |
+
return ::log(x);
|
164 |
+
}
|
165 |
+
|
166 |
+
template <typename T>
|
167 |
+
C10_HOST_DEVICE inline T log1p(T x) {
|
168 |
+
static_assert(
|
169 |
+
!std::is_same_v<T, double>,
|
170 |
+
"this template must be used with float or less precise type");
|
171 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
|
172 |
+
// use __logf fast approximation for peak bandwidth
|
173 |
+
// NOTE: There is no __log1pf so unfortunately we lose precision.
|
174 |
+
return __logf(1.0f + x);
|
175 |
+
#else
|
176 |
+
return ::log1p(x);
|
177 |
+
#endif
|
178 |
+
}
|
179 |
+
|
180 |
+
template <>
|
181 |
+
C10_HOST_DEVICE inline double log1p<double>(double x) {
|
182 |
+
return ::log1p(x);
|
183 |
+
}
|
184 |
+
|
185 |
+
template <typename T>
|
186 |
+
C10_HOST_DEVICE inline T tan(T x) {
|
187 |
+
static_assert(
|
188 |
+
!std::is_same_v<T, double>,
|
189 |
+
"this template must be used with float or less precise type");
|
190 |
+
#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
|
191 |
+
// use __tanf fast approximation for peak bandwidth
|
192 |
+
return __tanf(x);
|
193 |
+
#else
|
194 |
+
return ::tan(x);
|
195 |
+
#endif
|
196 |
+
}
|
197 |
+
|
198 |
+
template <>
|
199 |
+
C10_HOST_DEVICE inline double tan<double>(double x) {
|
200 |
+
return ::tan(x);
|
201 |
+
}
|
202 |
+
|
203 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Parallel.h>
|
4 |
+
#include <c10/core/thread_pool.h>
|
5 |
+
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
class TORCH_API PTThreadPool : public c10::ThreadPool {
|
9 |
+
public:
|
10 |
+
explicit PTThreadPool(int pool_size, int numa_node_id = -1)
|
11 |
+
: c10::ThreadPool(pool_size, numa_node_id, []() {
|
12 |
+
c10::setThreadName("PTThreadPool");
|
13 |
+
at::init_num_threads();
|
14 |
+
}) {}
|
15 |
+
};
|
16 |
+
|
17 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/PadNd.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/Exception.h>
|
3 |
+
#include <c10/util/string_view.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
|
7 |
+
enum class padding_mode {
|
8 |
+
reflect,
|
9 |
+
replicate,
|
10 |
+
circular,
|
11 |
+
constant,
|
12 |
+
};
|
13 |
+
|
14 |
+
static inline c10::string_view padding_mode_string(padding_mode m) {
|
15 |
+
switch (m) {
|
16 |
+
case padding_mode::reflect:
|
17 |
+
return "reflect";
|
18 |
+
case padding_mode::replicate:
|
19 |
+
return "replicate";
|
20 |
+
case padding_mode::circular:
|
21 |
+
return "circular";
|
22 |
+
case padding_mode::constant:
|
23 |
+
return "constant";
|
24 |
+
}
|
25 |
+
TORCH_CHECK(false, "Invalid padding mode (", static_cast<int64_t>(m), ")");
|
26 |
+
}
|
27 |
+
|
28 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Parallel-inl.h
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/Exception.h>
|
4 |
+
#include <c10/util/ParallelGuard.h>
|
5 |
+
#include <c10/util/SmallVector.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
template <class F>
|
10 |
+
inline void parallel_for(
|
11 |
+
const int64_t begin,
|
12 |
+
const int64_t end,
|
13 |
+
const int64_t grain_size,
|
14 |
+
const F& f) {
|
15 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(grain_size >= 0);
|
16 |
+
if (begin >= end) {
|
17 |
+
return;
|
18 |
+
}
|
19 |
+
|
20 |
+
#ifdef INTRA_OP_PARALLEL
|
21 |
+
at::internal::lazy_init_num_threads();
|
22 |
+
const auto numiter = end - begin;
|
23 |
+
const bool use_parallel =
|
24 |
+
(numiter > grain_size && numiter > 1 && !at::in_parallel_region() &&
|
25 |
+
at::get_num_threads() > 1);
|
26 |
+
if (!use_parallel) {
|
27 |
+
internal::ThreadIdGuard tid_guard(0);
|
28 |
+
c10::ParallelGuard guard(true);
|
29 |
+
f(begin, end);
|
30 |
+
return;
|
31 |
+
}
|
32 |
+
|
33 |
+
internal::invoke_parallel(
|
34 |
+
begin, end, grain_size, [&](int64_t begin, int64_t end) {
|
35 |
+
c10::ParallelGuard guard(true);
|
36 |
+
f(begin, end);
|
37 |
+
});
|
38 |
+
#else
|
39 |
+
internal::ThreadIdGuard tid_guard(0);
|
40 |
+
c10::ParallelGuard guard(true);
|
41 |
+
f(begin, end);
|
42 |
+
#endif
|
43 |
+
}
|
44 |
+
|
45 |
+
template <class scalar_t, class F, class SF>
|
46 |
+
inline scalar_t parallel_reduce(
|
47 |
+
const int64_t begin,
|
48 |
+
const int64_t end,
|
49 |
+
const int64_t grain_size,
|
50 |
+
const scalar_t ident,
|
51 |
+
const F& f,
|
52 |
+
const SF& sf) {
|
53 |
+
TORCH_CHECK(grain_size >= 0);
|
54 |
+
if (begin >= end) {
|
55 |
+
return ident;
|
56 |
+
}
|
57 |
+
|
58 |
+
#ifdef INTRA_OP_PARALLEL
|
59 |
+
at::internal::lazy_init_num_threads();
|
60 |
+
const auto max_threads = at::get_num_threads();
|
61 |
+
const bool use_parallel =
|
62 |
+
((end - begin) > grain_size && !at::in_parallel_region() &&
|
63 |
+
max_threads > 1);
|
64 |
+
if (!use_parallel) {
|
65 |
+
internal::ThreadIdGuard tid_guard(0);
|
66 |
+
c10::ParallelGuard guard(true);
|
67 |
+
return f(begin, end, ident);
|
68 |
+
}
|
69 |
+
|
70 |
+
c10::SmallVector<scalar_t, 64> results(max_threads, ident);
|
71 |
+
internal::invoke_parallel(
|
72 |
+
begin,
|
73 |
+
end,
|
74 |
+
grain_size,
|
75 |
+
[&](const int64_t my_begin, const int64_t my_end) {
|
76 |
+
const auto tid = at::get_thread_num();
|
77 |
+
c10::ParallelGuard guard(true);
|
78 |
+
results[tid] = f(my_begin, my_end, ident);
|
79 |
+
});
|
80 |
+
|
81 |
+
scalar_t result = ident;
|
82 |
+
for (auto partial_result : results) {
|
83 |
+
result = sf(result, partial_result);
|
84 |
+
}
|
85 |
+
return result;
|
86 |
+
#else
|
87 |
+
internal::ThreadIdGuard tid_guard(0);
|
88 |
+
c10::ParallelGuard guard(true);
|
89 |
+
return f(begin, end, ident);
|
90 |
+
#endif
|
91 |
+
}
|
92 |
+
|
93 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <atomic>
|
4 |
+
#include <cstddef>
|
5 |
+
#include <exception>
|
6 |
+
|
7 |
+
#include <c10/util/Exception.h>
|
8 |
+
|
9 |
+
#ifdef _WIN32
|
10 |
+
#ifndef WIN32_LEAN_AND_MEAN
|
11 |
+
#define WIN32_LEAN_AND_MEAN
|
12 |
+
#endif
|
13 |
+
#endif
|
14 |
+
#include <tbb/tbb.h>
|
15 |
+
|
16 |
+
#define INTRA_OP_PARALLEL
|
17 |
+
|
18 |
+
namespace at::internal {
|
19 |
+
|
20 |
+
template <typename F>
|
21 |
+
inline void invoke_parallel(
|
22 |
+
const int64_t begin,
|
23 |
+
const int64_t end,
|
24 |
+
const int64_t grain_size,
|
25 |
+
const F& f) {
|
26 |
+
// Choose number of tasks based on grain size and number of threads.
|
27 |
+
int64_t chunk_size = divup((end - begin), get_num_threads());
|
28 |
+
// Make sure each task is at least grain_size size.
|
29 |
+
chunk_size = std::max(grain_size, chunk_size);
|
30 |
+
|
31 |
+
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
|
32 |
+
std::exception_ptr eptr;
|
33 |
+
tbb::parallel_for(
|
34 |
+
tbb::blocked_range<int64_t>(begin, end, chunk_size),
|
35 |
+
[&eptr, &err_flag, f](const tbb::blocked_range<int64_t>& r) {
|
36 |
+
try {
|
37 |
+
internal::ThreadIdGuard tid_guard(
|
38 |
+
tbb::this_task_arena::current_thread_index());
|
39 |
+
f(r.begin(), r.end());
|
40 |
+
} catch (...) {
|
41 |
+
if (!err_flag.test_and_set()) {
|
42 |
+
eptr = std::current_exception();
|
43 |
+
}
|
44 |
+
}
|
45 |
+
},
|
46 |
+
tbb::static_partitioner{});
|
47 |
+
if (eptr) {
|
48 |
+
std::rethrow_exception(eptr);
|
49 |
+
}
|
50 |
+
}
|
51 |
+
|
52 |
+
} // namespace at::internal
|
venv/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/torch/include/ATen/SavedTensorHooks.h
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <c10/util/Optional.h>
|
5 |
+
#include <c10/util/python_stub.h>
|
6 |
+
#include <stack>
|
7 |
+
#include <string>
|
8 |
+
|
9 |
+
#include <utility>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
|
13 |
+
namespace impl {
|
14 |
+
|
15 |
+
struct TORCH_API SavedTensorDefaultHooksTLS {
|
16 |
+
// PyObject is defined in c10/util/python_stub.h
|
17 |
+
std::stack<std::pair<PyObject*, PyObject*>> stack;
|
18 |
+
|
19 |
+
// See NOTE: [Disabling SavedTensorDefaultHooks] for context
|
20 |
+
// NOTE: [disabled_error_message invariant]
|
21 |
+
// disabled_error_message is nullopt IFF Saved Tensor hooks is enabled
|
22 |
+
// We did this for efficiency (so we didn't have to keep a separate bool
|
23 |
+
// around)
|
24 |
+
c10::optional<std::string> disabled_error_message;
|
25 |
+
};
|
26 |
+
|
27 |
+
} // namespace impl
|
28 |
+
|
29 |
+
struct TORCH_API SavedTensorDefaultHooks {
|
30 |
+
static void push_hooks(PyObject* pack_hook, PyObject* unpack_hook);
|
31 |
+
static void pop_hooks();
|
32 |
+
static std::pair<PyObject*, PyObject*> get_hooks();
|
33 |
+
static void lazy_initialize();
|
34 |
+
static std::stack<std::pair<PyObject*, PyObject*>> get_stack();
|
35 |
+
static void set_stack(std::stack<std::pair<PyObject*, PyObject*>>);
|
36 |
+
|
37 |
+
static const impl::SavedTensorDefaultHooksTLS& get_tls_state();
|
38 |
+
static void set_tls_state(const impl::SavedTensorDefaultHooksTLS& tls);
|
39 |
+
|
40 |
+
// NOTE: [Disabling SavedTensorDefaultHooks]
|
41 |
+
// A developer of a PyTorch feature may choose to disable SavedTensorDefault
|
42 |
+
// hooks, especially if their feature does not work with it. If they are
|
43 |
+
// disabled, then the following will raise an error:
|
44 |
+
// - Attempting to push_hooks
|
45 |
+
// - calling disable(message) with a non-zero stack (from get_stack) size
|
46 |
+
static void disable(const std::string& error_message);
|
47 |
+
static void enable();
|
48 |
+
static bool is_enabled();
|
49 |
+
static const c10::optional<std::string>& get_disabled_error_message();
|
50 |
+
};
|
51 |
+
|
52 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/Scalar.h
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Scalar.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/ScalarOps.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Tensor.h>
|
4 |
+
#include <c10/core/Scalar.h>
|
5 |
+
|
6 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
7 |
+
#include <ATen/Functions.h>
|
8 |
+
#else
|
9 |
+
#include <ATen/ops/scalar_tensor.h>
|
10 |
+
#endif
|
11 |
+
|
12 |
+
namespace at::detail {
|
13 |
+
// When filling a number to 1-element CPU tensor, we want to skip
|
14 |
+
// everything but manipulate data ptr directly.
|
15 |
+
// Ideally this fast pass should be implemented in TensorIterator,
|
16 |
+
// but we also want to skip compute_types which in not avoidable
|
17 |
+
// in TensorIterator for now.
|
18 |
+
Tensor& scalar_fill(Tensor& self, const Scalar& value);
|
19 |
+
TORCH_API Tensor scalar_tensor_static(
|
20 |
+
const Scalar& s,
|
21 |
+
c10::optional<ScalarType> dtype_opt,
|
22 |
+
c10::optional<Device> device_opt);
|
23 |
+
} // namespace at::detail
|
24 |
+
|
25 |
+
// This is in the c10 namespace because we use ADL to find the functions in it.
|
26 |
+
namespace c10 {
|
27 |
+
|
28 |
+
// FIXME: this should be (and was) Scalar::toTensor, but there is currently no
|
29 |
+
// way to implement this without going through Derived Types (which are not part
|
30 |
+
// of core).
|
31 |
+
inline at::Tensor scalar_to_tensor(
|
32 |
+
const Scalar& s,
|
33 |
+
const Device device = at::kCPU) {
|
34 |
+
// This is the fast track we have for CPU scalar tensors.
|
35 |
+
if (device == at::kCPU) {
|
36 |
+
return at::detail::scalar_tensor_static(s, s.type(), at::kCPU);
|
37 |
+
}
|
38 |
+
return at::scalar_tensor(s, at::device(device).dtype(s.type()));
|
39 |
+
}
|
40 |
+
|
41 |
+
} // namespace c10
|
42 |
+
|
43 |
+
namespace at::native {
|
44 |
+
|
45 |
+
inline Tensor wrapped_scalar_tensor(
|
46 |
+
const Scalar& scalar,
|
47 |
+
const Device device = at::kCPU) {
|
48 |
+
auto tensor = scalar_to_tensor(scalar, device);
|
49 |
+
tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
|
50 |
+
return tensor;
|
51 |
+
}
|
52 |
+
|
53 |
+
} // namespace at::native
|
venv/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/SmallVector.h>
|
venv/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorImpl.h
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Tensor.h>
|
4 |
+
#include <c10/core/TensorImpl.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
namespace at {
|
7 |
+
|
8 |
+
// Struct implementing a sparse CSR tensor. It uses three 1-D tensors for
|
9 |
+
// denoting the data: `crow_indices_`, `col_indices_` and `values_`.
|
10 |
+
// The `crow_indices_` tensor is a integer tensor of shape `(size(0) + 1)`
|
11 |
+
// that represents the compressed row indices of the CSR tensor. The
|
12 |
+
// `col_indices_` tensor is an integer tensor of shape `(nnz())`
|
13 |
+
// that explicitly stores the column indices of each value of the sparse
|
14 |
+
// tensor. The `values_` tensor can be of any pytorch-supported data type
|
15 |
+
// and has shape `(nnz())`.
|
16 |
+
//
|
17 |
+
// Since the main advantage of the CSR format over the COO format is speed of
|
18 |
+
// computation, care must be taken to facilitate smooth interfacing of
|
19 |
+
// these data structures with optimized libraries such as MKL and MAGMA.
|
20 |
+
// Since the MKL interface for pytorch currently uses indexing with int32
|
21 |
+
// type, it is important to make sure that the `crow_indices` and `col_indices`
|
22 |
+
// are of type int32 when calling MKL routines such as SPMM or SPMV.
|
23 |
+
//
|
24 |
+
// If not calling MKL, it should be alright to use 64 bit integer tensors
|
25 |
+
// for indexing.
|
26 |
+
struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
|
27 |
+
Tensor crow_indices_;
|
28 |
+
Tensor col_indices_;
|
29 |
+
Tensor values_;
|
30 |
+
Layout layout_;
|
31 |
+
|
32 |
+
public:
|
33 |
+
explicit SparseCsrTensorImpl(
|
34 |
+
at::DispatchKeySet,
|
35 |
+
at::Device device,
|
36 |
+
Layout layout,
|
37 |
+
const caffe2::TypeMeta);
|
38 |
+
|
39 |
+
void resize_(int64_t nnz, IntArrayRef size);
|
40 |
+
void resize_and_clear_(
|
41 |
+
int64_t sparse_dim,
|
42 |
+
int64_t dense_dim,
|
43 |
+
IntArrayRef size);
|
44 |
+
void resize_as_sparse_compressed_tensor_(const Tensor& src);
|
45 |
+
void set_member_tensors(
|
46 |
+
const Tensor& crow_indices,
|
47 |
+
const Tensor& col_indices,
|
48 |
+
const Tensor& values,
|
49 |
+
c10::SymIntArrayRef size);
|
50 |
+
void set_member_tensors(
|
51 |
+
const Tensor& crow_indices,
|
52 |
+
const Tensor& col_indices,
|
53 |
+
const Tensor& values,
|
54 |
+
IntArrayRef size);
|
55 |
+
const Tensor& compressed_indices() const {
|
56 |
+
return crow_indices_;
|
57 |
+
}
|
58 |
+
const Tensor& plain_indices() const {
|
59 |
+
return col_indices_;
|
60 |
+
}
|
61 |
+
const Tensor& values() const {
|
62 |
+
return values_;
|
63 |
+
}
|
64 |
+
int64_t nnz() {
|
65 |
+
return col_indices_.size(-1);
|
66 |
+
}
|
67 |
+
|
68 |
+
inline int64_t batch_dim() const noexcept {
|
69 |
+
return crow_indices_.dim() - 1;
|
70 |
+
}
|
71 |
+
|
72 |
+
inline int64_t sparse_dim() const noexcept {
|
73 |
+
return 2;
|
74 |
+
}
|
75 |
+
|
76 |
+
inline int64_t dense_dim() const noexcept {
|
77 |
+
return values_.dim() - batch_dim() - block_dim() - 1;
|
78 |
+
}
|
79 |
+
|
80 |
+
private:
|
81 |
+
inline int64_t block_dim() const noexcept {
|
82 |
+
return (layout_ == kSparseBsr || layout_ == kSparseBsc ? 2 : 0);
|
83 |
+
}
|
84 |
+
|
85 |
+
protected:
|
86 |
+
IntArrayRef strides_custom() const override;
|
87 |
+
SymIntArrayRef sym_strides_custom() const override;
|
88 |
+
bool is_contiguous_custom(MemoryFormat) const override;
|
89 |
+
|
90 |
+
public:
|
91 |
+
void set_size(int64_t dim, int64_t new_size) override;
|
92 |
+
void set_stride(int64_t dim, int64_t new_stride) override;
|
93 |
+
void set_storage_offset(int64_t storage_offset) override;
|
94 |
+
Layout layout_impl() const override {
|
95 |
+
return layout_;
|
96 |
+
}
|
97 |
+
void set_layout(Layout layout) {
|
98 |
+
switch (layout) {
|
99 |
+
case kSparseCsr:
|
100 |
+
case kSparseCsc:
|
101 |
+
case kSparseBsr:
|
102 |
+
case kSparseBsc:
|
103 |
+
layout_ = layout;
|
104 |
+
break;
|
105 |
+
default:
|
106 |
+
TORCH_CHECK(false, "unsupported layout ", layout);
|
107 |
+
}
|
108 |
+
}
|
109 |
+
|
110 |
+
/**
|
111 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
112 |
+
*
|
113 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
114 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
115 |
+
*/
|
116 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
117 |
+
const c10::VariableVersion& version_counter,
|
118 |
+
bool allow_tensor_metadata_change) const override {
|
119 |
+
auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
|
120 |
+
key_set(), device(), layout_impl(), dtype());
|
121 |
+
copy_tensor_metadata(
|
122 |
+
/*src_sparse_impl=*/this,
|
123 |
+
/*dest_sparse_impl=*/impl.get(),
|
124 |
+
/*version_counter=*/version_counter,
|
125 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
126 |
+
impl->refresh_numel();
|
127 |
+
return impl;
|
128 |
+
}
|
129 |
+
|
130 |
+
/**
|
131 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
132 |
+
*
|
133 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
134 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
135 |
+
*/
|
136 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
137 |
+
c10::VariableVersion&& version_counter,
|
138 |
+
bool allow_tensor_metadata_change) const override {
|
139 |
+
auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
|
140 |
+
key_set(), device(), layout_impl(), dtype());
|
141 |
+
copy_tensor_metadata(
|
142 |
+
/*src_sparse_impl=*/this,
|
143 |
+
/*dest_sparse_impl=*/impl.get(),
|
144 |
+
/*version_counter=*/std::move(version_counter),
|
145 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
146 |
+
impl->refresh_numel();
|
147 |
+
return impl;
|
148 |
+
}
|
149 |
+
|
150 |
+
private:
|
151 |
+
explicit SparseCsrTensorImpl(
|
152 |
+
at::DispatchKeySet key_set,
|
153 |
+
const caffe2::TypeMeta data_type,
|
154 |
+
at::Tensor crow_indices,
|
155 |
+
at::Tensor col_indices,
|
156 |
+
at::Tensor values,
|
157 |
+
at::Layout layout);
|
158 |
+
|
159 |
+
const char* tensorimpl_type_name() const override;
|
160 |
+
|
161 |
+
/**
|
162 |
+
* Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
|
163 |
+
* storage_offset) from one TensorImpl to another TensorImpl.
|
164 |
+
*
|
165 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
|
166 |
+
* [ TensorImpl Shallow-Copying ].
|
167 |
+
*/
|
168 |
+
static void copy_tensor_metadata(
|
169 |
+
const SparseCsrTensorImpl* src_sparse_impl,
|
170 |
+
SparseCsrTensorImpl* dest_sparse_impl,
|
171 |
+
c10::VariableVersion version_counter,
|
172 |
+
bool allow_tensor_metadata_change) {
|
173 |
+
TensorImpl::copy_tensor_metadata(
|
174 |
+
src_sparse_impl,
|
175 |
+
dest_sparse_impl,
|
176 |
+
std::move(version_counter),
|
177 |
+
allow_tensor_metadata_change);
|
178 |
+
|
179 |
+
// Sparse-specific fields
|
180 |
+
dest_sparse_impl->crow_indices_ = src_sparse_impl->compressed_indices();
|
181 |
+
dest_sparse_impl->col_indices_ = src_sparse_impl->plain_indices();
|
182 |
+
dest_sparse_impl->values_ = src_sparse_impl->values();
|
183 |
+
dest_sparse_impl->layout_ = src_sparse_impl->layout_impl();
|
184 |
+
}
|
185 |
+
};
|
186 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h
ADDED
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Tensor.h>
|
4 |
+
#include <c10/core/TensorImpl.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
9 |
+
#include <ATen/Functions.h>
|
10 |
+
#else
|
11 |
+
#include <ATen/ops/empty.h>
|
12 |
+
#include <ATen/ops/resize.h>
|
13 |
+
#endif
|
14 |
+
|
15 |
+
namespace at {
|
16 |
+
struct TORCH_API SparseTensorImpl : public TensorImpl {
|
17 |
+
// Stored in COO format, indices + values.
|
18 |
+
|
19 |
+
// INVARIANTS:
|
20 |
+
// sparse_dim: range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
|
21 |
+
// dense_dim : range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
|
22 |
+
// _indices.shape: dimensionality: 2, shape: (sparse_dim, nnz)
|
23 |
+
// _values.shape: dimensionality: 1 + dense_dim. shape: (nnz,
|
24 |
+
// shape[sparse_dim:])
|
25 |
+
|
26 |
+
int64_t sparse_dim_ = 0; // number of sparse dimensions
|
27 |
+
int64_t dense_dim_ = 0; // number of dense dimensions
|
28 |
+
|
29 |
+
Tensor indices_; // always a LongTensor
|
30 |
+
Tensor values_;
|
31 |
+
|
32 |
+
// A sparse tensor is 'coalesced' if every index occurs at most once in
|
33 |
+
// the indices tensor, and the indices are in sorted order. (This means
|
34 |
+
// that it is very easy to convert a coalesced tensor to CSR format: you
|
35 |
+
// need only compute CSR format indices.)
|
36 |
+
//
|
37 |
+
// Most math operations can only be performed on coalesced sparse tensors,
|
38 |
+
// because many algorithms proceed by merging two sorted lists (of indices).
|
39 |
+
bool coalesced_ = false;
|
40 |
+
|
41 |
+
// compute_numel with integer multiplication overflow check, see gh-57542
|
42 |
+
void refresh_numel() {
|
43 |
+
TensorImpl::safe_refresh_numel();
|
44 |
+
}
|
45 |
+
|
46 |
+
public:
|
47 |
+
// Public for now...
|
48 |
+
explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
|
49 |
+
|
50 |
+
void release_resources() override;
|
51 |
+
|
52 |
+
int64_t nnz() const {
|
53 |
+
return values_.size(0);
|
54 |
+
}
|
55 |
+
|
56 |
+
c10::SymInt sym_nnz() const {
|
57 |
+
return values_.sym_size(0);
|
58 |
+
}
|
59 |
+
int64_t sparse_dim() const {
|
60 |
+
return sparse_dim_;
|
61 |
+
}
|
62 |
+
int64_t dense_dim() const {
|
63 |
+
return dense_dim_;
|
64 |
+
}
|
65 |
+
bool coalesced() const {
|
66 |
+
return coalesced_;
|
67 |
+
}
|
68 |
+
Tensor indices() const {
|
69 |
+
return indices_;
|
70 |
+
}
|
71 |
+
Tensor values() const {
|
72 |
+
return values_;
|
73 |
+
}
|
74 |
+
|
75 |
+
void set_size(int64_t dim, int64_t new_size) override;
|
76 |
+
void set_stride(int64_t dim, int64_t new_stride) override;
|
77 |
+
void set_storage_offset(int64_t storage_offset) override;
|
78 |
+
|
79 |
+
#ifdef DEBUG
|
80 |
+
bool has_storage() const override;
|
81 |
+
#endif
|
82 |
+
|
83 |
+
// WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim
|
84 |
+
// with respect to indices and values
|
85 |
+
void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
|
86 |
+
TORCH_CHECK(
|
87 |
+
allow_tensor_metadata_change(),
|
88 |
+
"raw_resize_ ",
|
89 |
+
err_msg_tensor_metadata_change_not_allowed);
|
90 |
+
TORCH_CHECK(
|
91 |
+
!has_symbolic_sizes_strides_,
|
92 |
+
"raw_resize_ called on tensor with symbolic shape")
|
93 |
+
set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
|
94 |
+
sparse_dim_ = sparse_dim;
|
95 |
+
dense_dim_ = dense_dim;
|
96 |
+
refresh_numel();
|
97 |
+
}
|
98 |
+
|
99 |
+
// NOTE: This function preserves invariants of sparse_dim/dense_dim with
|
100 |
+
// respect to indices and values.
|
101 |
+
//
|
102 |
+
// NOTE: This function supports the following cases:
|
103 |
+
// 1. When we keep the number of dense dimensions unchanged, and NOT shrinking
|
104 |
+
// the size of any of the dense dimensions.
|
105 |
+
// 2. When we keep the number of sparse dimensions unchanged, and NOT
|
106 |
+
// shrinking the size of any of the sparse dimensions.
|
107 |
+
// 3. When the sparse tensor has zero nnz, in which case we are free to change
|
108 |
+
// the shapes of both its sparse and dense dimensions.
|
109 |
+
//
|
110 |
+
// This function DOESN'T support (and will throw an error) the following
|
111 |
+
// cases:
|
112 |
+
// 1. When we attempt to change the number of sparse dimensions on a non-empty
|
113 |
+
// sparse tensor (such an operation will invalidate the indices stored).
|
114 |
+
// 2. When we attempt to change the number of dense dimensions on a non-empty
|
115 |
+
// sparse tensor (such an operation will behave differently from an equivalent
|
116 |
+
// dense tensor's resize method, and for API consistency we don't support it).
|
117 |
+
// 3. When we attempt to shrink the size of any of the dense dimensions on a
|
118 |
+
// non-empty sparse tensor (such an operation will behave differently from an
|
119 |
+
// equivalent dense tensor's resize method, and for API consistency we don't
|
120 |
+
// support it).
|
121 |
+
// 4. When we attempt to shrink the size of any of the sparse dimensions on a
|
122 |
+
// non-empty sparse tensor (this could make some of the stored indices
|
123 |
+
// out-of-bound and thus unsafe).
|
124 |
+
template <typename T>
|
125 |
+
void _resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<T> size) {
|
126 |
+
TORCH_CHECK(
|
127 |
+
allow_tensor_metadata_change(),
|
128 |
+
"resize_ ",
|
129 |
+
err_msg_tensor_metadata_change_not_allowed);
|
130 |
+
TORCH_CHECK(
|
131 |
+
!has_symbolic_sizes_strides_,
|
132 |
+
"resize_ called on tensor with symbolic shape")
|
133 |
+
TORCH_CHECK(
|
134 |
+
sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
|
135 |
+
"number of dimensions must be sparse_dim (",
|
136 |
+
sparse_dim,
|
137 |
+
") + dense_dim (",
|
138 |
+
dense_dim,
|
139 |
+
"), but got ",
|
140 |
+
size.size());
|
141 |
+
if (nnz() > 0) {
|
142 |
+
auto alt_options_msg =
|
143 |
+
"You could try the following options:\n\
|
144 |
+
1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\
|
145 |
+
2. If you need to resize this tensor, you have the following options:\n\
|
146 |
+
1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\
|
147 |
+
2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor.";
|
148 |
+
|
149 |
+
TORCH_CHECK(
|
150 |
+
sparse_dim == sparse_dim_,
|
151 |
+
"changing the number of sparse dimensions (from ",
|
152 |
+
sparse_dim_,
|
153 |
+
" to ",
|
154 |
+
sparse_dim,
|
155 |
+
") on a non-empty sparse tensor is not supported.\n",
|
156 |
+
alt_options_msg);
|
157 |
+
|
158 |
+
TORCH_CHECK(
|
159 |
+
dense_dim == dense_dim_,
|
160 |
+
"changing the number of dense dimensions (from ",
|
161 |
+
dense_dim_,
|
162 |
+
" to ",
|
163 |
+
dense_dim,
|
164 |
+
") on a non-empty sparse tensor is not supported.\n",
|
165 |
+
alt_options_msg);
|
166 |
+
|
167 |
+
bool shrinking_sparse_dims = false;
|
168 |
+
bool shrinking_dense_dim = false;
|
169 |
+
auto sparse_size_original = generic_sizes<T>().slice(0, sparse_dim);
|
170 |
+
auto sparse_size_new = size.slice(0, sparse_dim);
|
171 |
+
for (const auto i : c10::irange(sparse_dim)) {
|
172 |
+
if (sparse_size_new[i] < sparse_size_original[i]) {
|
173 |
+
shrinking_sparse_dims = true;
|
174 |
+
break;
|
175 |
+
}
|
176 |
+
}
|
177 |
+
auto dense_size_original = generic_sizes<T>().slice(sparse_dim);
|
178 |
+
auto dense_size_new = size.slice(sparse_dim);
|
179 |
+
for (const auto i : c10::irange(dense_dim)) {
|
180 |
+
if (dense_size_new[i] < dense_size_original[i]) {
|
181 |
+
shrinking_dense_dim = true;
|
182 |
+
break;
|
183 |
+
}
|
184 |
+
}
|
185 |
+
|
186 |
+
TORCH_CHECK(
|
187 |
+
!shrinking_sparse_dims,
|
188 |
+
"shrinking the size of sparse dimensions (from ",
|
189 |
+
sparse_size_original,
|
190 |
+
" to ",
|
191 |
+
sparse_size_new,
|
192 |
+
") on a non-empty sparse tensor is not supported.\n",
|
193 |
+
alt_options_msg);
|
194 |
+
|
195 |
+
TORCH_CHECK(
|
196 |
+
!shrinking_dense_dim,
|
197 |
+
"shrinking the size of dense dimensions (from ",
|
198 |
+
dense_size_original,
|
199 |
+
" to ",
|
200 |
+
dense_size_new,
|
201 |
+
") on a non-empty sparse tensor is not supported.\n",
|
202 |
+
alt_options_msg);
|
203 |
+
}
|
204 |
+
|
205 |
+
auto sizes_and_strides = generic_sizes<T>();
|
206 |
+
const bool size_equals_sizes = std::equal(
|
207 |
+
size.begin(),
|
208 |
+
size.end(),
|
209 |
+
sizes_and_strides.begin(),
|
210 |
+
sizes_and_strides.end());
|
211 |
+
if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) ||
|
212 |
+
(dense_dim != dense_dim_)) {
|
213 |
+
auto nnz = at::symint::sizes<T>(values())[0];
|
214 |
+
std::vector<T> values_size = {nnz};
|
215 |
+
auto dense_size = size.slice(sparse_dim);
|
216 |
+
values_size.insert(
|
217 |
+
values_size.end(), dense_size.begin(), dense_size.end());
|
218 |
+
at::symint::resize_<T>(values_, values_size);
|
219 |
+
at::symint::resize_<T>(indices_, {T(sparse_dim), nnz});
|
220 |
+
}
|
221 |
+
|
222 |
+
if (!size_equals_sizes) {
|
223 |
+
set_sizes_and_strides(size, std::vector<T>(size.size()));
|
224 |
+
}
|
225 |
+
sparse_dim_ = sparse_dim;
|
226 |
+
dense_dim_ = dense_dim;
|
227 |
+
refresh_numel();
|
228 |
+
}
|
229 |
+
|
230 |
+
void resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<int64_t> size) {
|
231 |
+
return _resize_(sparse_dim, dense_dim, size);
|
232 |
+
}
|
233 |
+
|
234 |
+
void resize_(
|
235 |
+
int64_t sparse_dim,
|
236 |
+
int64_t dense_dim,
|
237 |
+
ArrayRef<c10::SymInt> size) {
|
238 |
+
return _resize_(sparse_dim, dense_dim, size);
|
239 |
+
}
|
240 |
+
|
241 |
+
// NOTE: this function will resize the sparse tensor and also set `indices`
|
242 |
+
// and `values` to empty.
|
243 |
+
void resize_and_clear_(
|
244 |
+
int64_t sparse_dim,
|
245 |
+
int64_t dense_dim,
|
246 |
+
IntArrayRef size) {
|
247 |
+
TORCH_CHECK(
|
248 |
+
allow_tensor_metadata_change(),
|
249 |
+
"resize_and_clear_ ",
|
250 |
+
err_msg_tensor_metadata_change_not_allowed);
|
251 |
+
TORCH_CHECK(
|
252 |
+
!has_symbolic_sizes_strides_,
|
253 |
+
"resize_and_clear_ called on tensor with symbolic shape")
|
254 |
+
TORCH_CHECK(
|
255 |
+
sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
|
256 |
+
"number of dimensions must be sparse_dim (",
|
257 |
+
sparse_dim,
|
258 |
+
") + dense_dim (",
|
259 |
+
dense_dim,
|
260 |
+
"), but got ",
|
261 |
+
size.size());
|
262 |
+
|
263 |
+
set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
|
264 |
+
sparse_dim_ = sparse_dim;
|
265 |
+
dense_dim_ = dense_dim;
|
266 |
+
|
267 |
+
auto empty_indices = at::empty({sparse_dim, 0}, indices().options());
|
268 |
+
std::vector<int64_t> values_size = {0};
|
269 |
+
auto dense_size = sizes().slice(sparse_dim);
|
270 |
+
values_size.insert(values_size.end(), dense_size.begin(), dense_size.end());
|
271 |
+
auto empty_values = at::empty(values_size, values().options());
|
272 |
+
set_indices_and_values_unsafe(empty_indices, empty_values);
|
273 |
+
refresh_numel();
|
274 |
+
}
|
275 |
+
|
276 |
+
void set_coalesced(bool coalesced) {
|
277 |
+
TORCH_CHECK(
|
278 |
+
allow_tensor_metadata_change(),
|
279 |
+
"set_coalesced ",
|
280 |
+
err_msg_tensor_metadata_change_not_allowed);
|
281 |
+
coalesced_ = coalesced;
|
282 |
+
}
|
283 |
+
|
284 |
+
// NOTE: this function is only used internally and not exposed to Python
|
285 |
+
// frontend
|
286 |
+
void set_nnz_and_narrow(int64_t new_nnz) {
|
287 |
+
TORCH_CHECK(
|
288 |
+
allow_tensor_metadata_change(),
|
289 |
+
"set_nnz_and_narrow ",
|
290 |
+
err_msg_tensor_metadata_change_not_allowed);
|
291 |
+
AT_ASSERT(new_nnz <= nnz());
|
292 |
+
indices_ = indices_.narrow(1, 0, new_nnz);
|
293 |
+
values_ = values_.narrow(0, 0, new_nnz);
|
294 |
+
if (new_nnz < 2) {
|
295 |
+
coalesced_ = true;
|
296 |
+
}
|
297 |
+
}
|
298 |
+
|
299 |
+
// Takes indices and values and directly puts them into the sparse tensor, no
|
300 |
+
// copy. NOTE: this function is unsafe because it doesn't check whether any
|
301 |
+
// indices are out of boundaries of `sizes`, so it should ONLY be used where
|
302 |
+
// we know that the indices are guaranteed to be within bounds. This used to
|
303 |
+
// be called THSTensor_(_move) NB: This used to be able to avoid a refcount
|
304 |
+
// bump, but I was too lazy to make it happen
|
305 |
+
void set_indices_and_values_unsafe(
|
306 |
+
const Tensor& indices,
|
307 |
+
const Tensor& values);
|
308 |
+
|
309 |
+
/**
|
310 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
311 |
+
*
|
312 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
313 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
314 |
+
*/
|
315 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
316 |
+
const c10::VariableVersion& version_counter,
|
317 |
+
bool allow_tensor_metadata_change) const override {
|
318 |
+
auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
|
319 |
+
copy_tensor_metadata(
|
320 |
+
/*src_sparse_impl=*/this,
|
321 |
+
/*dest_sparse_impl=*/impl.get(),
|
322 |
+
/*version_counter=*/version_counter,
|
323 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
324 |
+
impl->refresh_numel();
|
325 |
+
return impl;
|
326 |
+
}
|
327 |
+
|
328 |
+
/**
|
329 |
+
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
|
330 |
+
*
|
331 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`,
|
332 |
+
* see NOTE [ TensorImpl Shallow-Copying ].
|
333 |
+
*/
|
334 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
335 |
+
c10::VariableVersion&& version_counter,
|
336 |
+
bool allow_tensor_metadata_change) const override {
|
337 |
+
auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
|
338 |
+
copy_tensor_metadata(
|
339 |
+
/*src_sparse_impl=*/this,
|
340 |
+
/*dest_sparse_impl=*/impl.get(),
|
341 |
+
/*version_counter=*/std::move(version_counter),
|
342 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
|
343 |
+
impl->refresh_numel();
|
344 |
+
return impl;
|
345 |
+
}
|
346 |
+
|
347 |
+
/**
|
348 |
+
* Shallow-copies data from another TensorImpl into this TensorImpl.
|
349 |
+
*
|
350 |
+
* For why this function doesn't check this TensorImpl's
|
351 |
+
* `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
|
352 |
+
*/
|
353 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
|
354 |
+
AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
|
355 |
+
auto sparse_impl = static_cast<const SparseTensorImpl*>(impl.get());
|
356 |
+
copy_tensor_metadata(
|
357 |
+
/*src_sparse_impl=*/sparse_impl,
|
358 |
+
/*dest_sparse_impl=*/this,
|
359 |
+
/*version_counter=*/version_counter(),
|
360 |
+
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
|
361 |
+
refresh_numel();
|
362 |
+
}
|
363 |
+
|
364 |
+
private:
|
365 |
+
explicit SparseTensorImpl(
|
366 |
+
at::DispatchKeySet,
|
367 |
+
const caffe2::TypeMeta,
|
368 |
+
at::Tensor indices,
|
369 |
+
at::Tensor values);
|
370 |
+
|
371 |
+
/**
|
372 |
+
* Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
|
373 |
+
* storage_offset) from one TensorImpl to another TensorImpl.
|
374 |
+
*
|
375 |
+
* For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
|
376 |
+
* [ TensorImpl Shallow-Copying ].
|
377 |
+
*/
|
378 |
+
static void copy_tensor_metadata(
|
379 |
+
const SparseTensorImpl* src_sparse_impl,
|
380 |
+
SparseTensorImpl* dest_sparse_impl,
|
381 |
+
c10::VariableVersion version_counter,
|
382 |
+
bool allow_tensor_metadata_change) {
|
383 |
+
TensorImpl::copy_tensor_metadata(
|
384 |
+
src_sparse_impl,
|
385 |
+
dest_sparse_impl,
|
386 |
+
std::move(version_counter),
|
387 |
+
allow_tensor_metadata_change);
|
388 |
+
|
389 |
+
// Sparse-specific fields
|
390 |
+
dest_sparse_impl->sparse_dim_ = src_sparse_impl->sparse_dim();
|
391 |
+
dest_sparse_impl->dense_dim_ = src_sparse_impl->dense_dim();
|
392 |
+
dest_sparse_impl->indices_ = src_sparse_impl->indices();
|
393 |
+
dest_sparse_impl->values_ = src_sparse_impl->values();
|
394 |
+
dest_sparse_impl->coalesced_ = src_sparse_impl->coalesced();
|
395 |
+
}
|
396 |
+
|
397 |
+
const char* tensorimpl_type_name() const override;
|
398 |
+
};
|
399 |
+
|
400 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/StorageUtils.h
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Storage.h>
|
4 |
+
#include <c10/core/StorageImpl.h>
|
5 |
+
#include <c10/util/intrusive_ptr.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
class TensorBase;
|
10 |
+
|
11 |
+
// Here we define a series of utils to create/manipulate ATen backed
|
12 |
+
// c10 storage implementations.
|
13 |
+
|
14 |
+
/**
|
15 |
+
* Create a new shared memory storage impl managed by file descriptor
|
16 |
+
*
|
17 |
+
* @param size size in bytes
|
18 |
+
*/
|
19 |
+
C10_EXPORT c10::intrusive_ptr<c10::StorageImpl> new_shm_fd_storage(size_t size);
|
20 |
+
|
21 |
+
/**
|
22 |
+
* Copy src to dst
|
23 |
+
* Caller must guarantee the validness of the storage objects
|
24 |
+
* during the entire copy process, esp. when it's async.
|
25 |
+
*
|
26 |
+
* This can probably live in c10 namespace later if needed,
|
27 |
+
* but for now keep it in at to keep implementation simple.
|
28 |
+
*
|
29 |
+
* @param dst dst tensor
|
30 |
+
* @param src src tensor
|
31 |
+
* @param non_blocking (default false) whether this operation blocks caller
|
32 |
+
*/
|
33 |
+
C10_EXPORT void storage_copy(
|
34 |
+
c10::Storage& dst,
|
35 |
+
const c10::Storage& src,
|
36 |
+
bool non_blocking = false);
|
37 |
+
|
38 |
+
/**
|
39 |
+
* In place change the storage to shm based.
|
40 |
+
*
|
41 |
+
* This is only applicable to CPU tensors not already shared.
|
42 |
+
* Otherwise, it's a no op to mirror the THP tensor behavior:
|
43 |
+
* https://pytorch.org/docs/stable/generated/torch.Tensor.share_memory_.html
|
44 |
+
*
|
45 |
+
* @param t a tensor
|
46 |
+
*/
|
47 |
+
C10_EXPORT void share_memory_(TensorBase& t);
|
48 |
+
|
49 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h
ADDED
@@ -0,0 +1,735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ExpandUtils.h>
|
4 |
+
#include <ATen/ScalarOps.h>
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
#include <ATen/core/TensorBody.h>
|
7 |
+
#include <c10/core/SymInt.h>
|
8 |
+
#include <c10/util/Optional.h>
|
9 |
+
#include <c10/util/irange.h>
|
10 |
+
|
11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
12 |
+
#include <ATen/Functions.h>
|
13 |
+
#include <ATen/NativeFunctions.h>
|
14 |
+
#else
|
15 |
+
#include <ATen/ops/alias.h>
|
16 |
+
#include <ATen/ops/empty.h>
|
17 |
+
#include <ATen/ops/scalar_tensor.h>
|
18 |
+
#include <ATen/ops/zeros.h>
|
19 |
+
#endif
|
20 |
+
|
21 |
+
#include <ATen/core/List.h>
|
22 |
+
|
23 |
+
#include <utility>
|
24 |
+
|
25 |
+
namespace at::indexing {
|
26 |
+
|
27 |
+
constexpr int64_t INDEX_MIN = c10::SymInt::min_representable_int();
|
28 |
+
constexpr int64_t INDEX_MAX = -(INDEX_MIN + 1);
|
29 |
+
|
30 |
+
enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor };
|
31 |
+
|
32 |
+
constexpr c10::nullopt_t None = c10::nullopt;
|
33 |
+
|
34 |
+
struct TORCH_API EllipsisIndexType final {
|
35 |
+
EllipsisIndexType() = default;
|
36 |
+
};
|
37 |
+
TORCH_API extern const EllipsisIndexType Ellipsis;
|
38 |
+
|
39 |
+
struct TORCH_API Slice final {
|
40 |
+
public:
|
41 |
+
Slice(
|
42 |
+
c10::optional<c10::SymInt> start_index = c10::nullopt,
|
43 |
+
c10::optional<c10::SymInt> stop_index = c10::nullopt,
|
44 |
+
c10::optional<c10::SymInt> step_index = c10::nullopt) {
|
45 |
+
if (!step_index.has_value()) {
|
46 |
+
step_ = c10::SymInt(1);
|
47 |
+
} else {
|
48 |
+
step_ = std::move(step_index).value();
|
49 |
+
}
|
50 |
+
|
51 |
+
TORCH_CHECK_VALUE(step_ != 0, "slice step cannot be zero");
|
52 |
+
|
53 |
+
if (!start_index.has_value()) {
|
54 |
+
start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0);
|
55 |
+
} else {
|
56 |
+
start_ = std::move(start_index).value();
|
57 |
+
}
|
58 |
+
|
59 |
+
if (!stop_index.has_value()) {
|
60 |
+
stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX);
|
61 |
+
} else {
|
62 |
+
stop_ = std::move(stop_index).value();
|
63 |
+
}
|
64 |
+
}
|
65 |
+
|
66 |
+
inline c10::SymInt start() const {
|
67 |
+
return start_;
|
68 |
+
}
|
69 |
+
|
70 |
+
inline c10::SymInt stop() const {
|
71 |
+
return stop_;
|
72 |
+
}
|
73 |
+
|
74 |
+
inline c10::SymInt step() const {
|
75 |
+
return step_;
|
76 |
+
}
|
77 |
+
|
78 |
+
private:
|
79 |
+
c10::SymInt start_;
|
80 |
+
c10::SymInt stop_;
|
81 |
+
c10::SymInt step_;
|
82 |
+
};
|
83 |
+
|
84 |
+
TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
85 |
+
|
86 |
+
// `at::indexing::TensorIndex` is used for converting C++ tensor indices such as
|
87 |
+
// `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}`
|
88 |
+
// into its equivalent `std::vector<TensorIndex>`, so that further tensor
|
89 |
+
// indexing operations can be performed using the supplied indices.
|
90 |
+
//
|
91 |
+
// There is one-to-one correspondence between Python and C++ tensor index types:
|
92 |
+
// Python | C++
|
93 |
+
// -----------------------------------------------------
|
94 |
+
// `None` | `at::indexing::None`
|
95 |
+
// `Ellipsis` | `at::indexing::Ellipsis`
|
96 |
+
// `...` | `"..."`
|
97 |
+
// `123` | `123`
|
98 |
+
// `True` / `False` | `true` / `false`
|
99 |
+
// `:` | `Slice()` / `Slice(None, None)`
|
100 |
+
// `::` | `Slice()` / `Slice(None, None, None)`
|
101 |
+
// `1:` | `Slice(1, None)`
|
102 |
+
// `1::` | `Slice(1, None, None)`
|
103 |
+
// `:3` | `Slice(None, 3)`
|
104 |
+
// `:3:` | `Slice(None, 3, None)`
|
105 |
+
// `::2` | `Slice(None, None, 2)`
|
106 |
+
// `1:3` | `Slice(1, 3)`
|
107 |
+
// `1::2` | `Slice(1, None, 2)`
|
108 |
+
// `:3:2` | `Slice(None, 3, 2)`
|
109 |
+
// `1:3:2` | `Slice(1, 3, 2)`
|
110 |
+
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
|
111 |
+
struct TORCH_API TensorIndex final {
|
112 |
+
// Case 1: `at::indexing::None`
|
113 |
+
TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {}
|
114 |
+
|
115 |
+
// Case 2: "..." / `at::indexing::Ellipsis`
|
116 |
+
TensorIndex(at::indexing::EllipsisIndexType)
|
117 |
+
: type_(TensorIndexType::Ellipsis) {}
|
118 |
+
TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) {
|
119 |
+
TORCH_CHECK_VALUE(
|
120 |
+
strcmp(str, "...") == 0,
|
121 |
+
"Expected \"...\" to represent an ellipsis index, but got \"",
|
122 |
+
str,
|
123 |
+
"\"");
|
124 |
+
}
|
125 |
+
|
126 |
+
// Case 3: (Sym) Integer value
|
127 |
+
TensorIndex(SymInt integer)
|
128 |
+
: integer_(std::move(integer)), type_(TensorIndexType::SymInt) {}
|
129 |
+
TensorIndex(int64_t integer) : TensorIndex(SymInt(integer)) {}
|
130 |
+
TensorIndex(int integer) : TensorIndex(SymInt(integer)) {}
|
131 |
+
|
132 |
+
// Case 4: Boolean value
|
133 |
+
template <class T, class = std::enable_if_t<std::is_same_v<bool, T>>>
|
134 |
+
TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {}
|
135 |
+
|
136 |
+
// Case 5: Slice represented in `at::indexing::Slice` form
|
137 |
+
TensorIndex(Slice slice)
|
138 |
+
: slice_(std::move(slice)), type_(TensorIndexType::Slice) {}
|
139 |
+
|
140 |
+
// Case 6: Tensor value
|
141 |
+
TensorIndex(Tensor tensor)
|
142 |
+
: tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {}
|
143 |
+
|
144 |
+
inline bool is_none() const {
|
145 |
+
return type_ == TensorIndexType::None;
|
146 |
+
}
|
147 |
+
|
148 |
+
inline bool is_ellipsis() const {
|
149 |
+
return type_ == TensorIndexType::Ellipsis;
|
150 |
+
}
|
151 |
+
|
152 |
+
inline bool is_integer() const {
|
153 |
+
return type_ == TensorIndexType::SymInt;
|
154 |
+
}
|
155 |
+
|
156 |
+
inline SymInt integer() const {
|
157 |
+
return integer_;
|
158 |
+
}
|
159 |
+
|
160 |
+
inline bool is_boolean() const {
|
161 |
+
return type_ == TensorIndexType::Boolean;
|
162 |
+
}
|
163 |
+
|
164 |
+
inline bool boolean() const {
|
165 |
+
return boolean_;
|
166 |
+
}
|
167 |
+
|
168 |
+
inline bool is_slice() const {
|
169 |
+
return type_ == TensorIndexType::Slice;
|
170 |
+
}
|
171 |
+
|
172 |
+
inline const Slice& slice() const {
|
173 |
+
return slice_;
|
174 |
+
}
|
175 |
+
|
176 |
+
inline bool is_tensor() const {
|
177 |
+
return type_ == TensorIndexType::Tensor;
|
178 |
+
}
|
179 |
+
|
180 |
+
inline const Tensor& tensor() const {
|
181 |
+
return tensor_;
|
182 |
+
}
|
183 |
+
|
184 |
+
private:
|
185 |
+
SymInt integer_ = 0;
|
186 |
+
bool boolean_ = false;
|
187 |
+
Slice slice_;
|
188 |
+
Tensor tensor_;
|
189 |
+
TensorIndexType type_;
|
190 |
+
};
|
191 |
+
|
192 |
+
TORCH_API std::ostream& operator<<(
|
193 |
+
std::ostream& stream,
|
194 |
+
const TensorIndex& tensor_index);
|
195 |
+
TORCH_API std::ostream& operator<<(
|
196 |
+
std::ostream& stream,
|
197 |
+
const std::vector<TensorIndex>& tensor_indices);
|
198 |
+
|
199 |
+
namespace impl {
|
200 |
+
static inline Tensor applySlice(
|
201 |
+
const Tensor& self,
|
202 |
+
int64_t dim,
|
203 |
+
c10::SymInt start,
|
204 |
+
c10::SymInt stop,
|
205 |
+
c10::SymInt step,
|
206 |
+
bool disable_slice_optimization,
|
207 |
+
const at::Device& self_device,
|
208 |
+
const c10::optional<SymIntArrayRef>& self_sizes) {
|
209 |
+
// TODO: implement negative step
|
210 |
+
TORCH_CHECK_VALUE(step > 0, "step must be greater than zero");
|
211 |
+
|
212 |
+
// See NOTE [nested tensor size for indexing]
|
213 |
+
if (self_sizes.has_value()) {
|
214 |
+
// Skip this optimization if we are tracing, as the trace may be polymorphic
|
215 |
+
// over the shape of the `self` tensor, and we still want to record
|
216 |
+
// the slice.
|
217 |
+
SymInt length = (self_device == at::kCPU || self_device == at::kCUDA)
|
218 |
+
? (*self_sizes)[dim]
|
219 |
+
: self.sym_size(dim);
|
220 |
+
if (!disable_slice_optimization &&
|
221 |
+
TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) && length == stop &&
|
222 |
+
step == 1) {
|
223 |
+
return self;
|
224 |
+
}
|
225 |
+
}
|
226 |
+
return self.slice_symint(
|
227 |
+
dim, std::move(start), std::move(stop), std::move(step));
|
228 |
+
}
|
229 |
+
|
230 |
+
static inline Tensor applySelect(
|
231 |
+
const Tensor& self,
|
232 |
+
int64_t dim,
|
233 |
+
SymInt index,
|
234 |
+
int64_t real_dim,
|
235 |
+
const at::Device& /*self_device*/,
|
236 |
+
const c10::optional<SymIntArrayRef>& self_sizes) {
|
237 |
+
// See NOTE [nested tensor size for indexing]
|
238 |
+
if (self_sizes.has_value()) {
|
239 |
+
auto maybe_index = index.maybe_as_int();
|
240 |
+
if (maybe_index.has_value()) {
|
241 |
+
TORCH_CHECK_INDEX(
|
242 |
+
!(maybe_index.value() == 0 && dim == 0 && self_sizes->empty()),
|
243 |
+
"invalid index of a 0-dim tensor. ",
|
244 |
+
"Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number");
|
245 |
+
}
|
246 |
+
|
247 |
+
auto size = (*self_sizes)[dim];
|
248 |
+
// Note: `size >= -index` is not equivalent to `size > -1 - index` if index
|
249 |
+
// is INT64_MIN For std::numeric_limits<int64_t>::min() result of unary
|
250 |
+
// minus is undefined by the standard but in practice is equal to self. On
|
251 |
+
// the other hand, indexing wraping is valid for all negative int64_t
|
252 |
+
// values, as x[INT64_MIN] is the same as x[INT64_MAX]
|
253 |
+
TORCH_CHECK_INDEX(
|
254 |
+
size > -1 - index && size > index,
|
255 |
+
"index ",
|
256 |
+
index,
|
257 |
+
" is out of bounds for dimension ",
|
258 |
+
real_dim,
|
259 |
+
" with size ",
|
260 |
+
size);
|
261 |
+
}
|
262 |
+
|
263 |
+
// if the index is negative, do not normalize it because that would fix the
|
264 |
+
// index on the current tensor size in the tracer. aten::select also works on
|
265 |
+
// negative indices
|
266 |
+
return self.select_symint(dim, std::move(index));
|
267 |
+
}
|
268 |
+
|
269 |
+
static inline Tensor boolToIndexingTensorCPUOrCUDA(
|
270 |
+
const Tensor& self,
|
271 |
+
bool value) {
|
272 |
+
// booleans add a dimension of size 1. true indexes this dimension as if 0:,
|
273 |
+
// false as empty.
|
274 |
+
if (value) {
|
275 |
+
return at::empty({1}, self.options().dtype(kLong)).fill_(0.);
|
276 |
+
} else {
|
277 |
+
return at::empty({0}, self.options().dtype(kLong));
|
278 |
+
}
|
279 |
+
}
|
280 |
+
|
281 |
+
static inline Tensor boolToIndexingTensorNonNativeDeviceType(
|
282 |
+
const Tensor& self,
|
283 |
+
bool value) {
|
284 |
+
// booleans add a dimension of size 1. true indexes this dimension as if 0:,
|
285 |
+
// false as empty.
|
286 |
+
if (value) {
|
287 |
+
return at::zeros({1}, self.options().dtype(kLong));
|
288 |
+
} else {
|
289 |
+
return at::empty({0}, self.options().dtype(kLong));
|
290 |
+
}
|
291 |
+
}
|
292 |
+
|
293 |
+
static inline Tensor boolToIndexingTensor(
|
294 |
+
const Tensor& self,
|
295 |
+
bool value,
|
296 |
+
const at::Device& self_device) {
|
297 |
+
if (self_device == at::kCPU || self_device == at::kCUDA) {
|
298 |
+
return boolToIndexingTensorCPUOrCUDA(self, value);
|
299 |
+
} else {
|
300 |
+
return boolToIndexingTensorNonNativeDeviceType(self, value);
|
301 |
+
}
|
302 |
+
}
|
303 |
+
|
304 |
+
static inline Tensor scalarToTensorNonNativeDeviceType(
|
305 |
+
const Scalar& v,
|
306 |
+
const TensorOptions& options) {
|
307 |
+
return at::scalar_tensor(v, options);
|
308 |
+
}
|
309 |
+
|
310 |
+
static inline void recordTensorIndex(
|
311 |
+
const Tensor& tensor,
|
312 |
+
std::vector<Tensor>& outIndices,
|
313 |
+
int64_t* dim_ptr) {
|
314 |
+
// TODO: check scalarType
|
315 |
+
outIndices.resize(*dim_ptr + 1);
|
316 |
+
outIndices[*dim_ptr] = tensor;
|
317 |
+
(*dim_ptr)++;
|
318 |
+
};
|
319 |
+
|
320 |
+
static inline c10::List<c10::optional<Tensor>> typeConvertIndices(
|
321 |
+
const Tensor& /*self*/,
|
322 |
+
std::vector<Tensor>&& indices) {
|
323 |
+
c10::List<c10::optional<Tensor>> converted_inds;
|
324 |
+
converted_inds.reserve(indices.size());
|
325 |
+
for (auto&& i : std::move(indices)) {
|
326 |
+
converted_inds.push_back(std::move(i));
|
327 |
+
}
|
328 |
+
return converted_inds;
|
329 |
+
}
|
330 |
+
|
331 |
+
// NOTE: Why do we mirror instead of replace the `count_specified_dimensions`
|
332 |
+
// function in torch/csrc/autograd/python_variable_indexing.cpp? It's because
|
333 |
+
// `count_specified_dimensions` is on the hot path of Python tensor multi-dim
|
334 |
+
// indexing (i.e. it's called by `applySlicing` which is called by
|
335 |
+
// `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more
|
336 |
+
// than one dimension). If we were to merge the Python/C++
|
337 |
+
// `count_specified_dimensions` function, on the Python side we would have to
|
338 |
+
// construct a `std::vector` container to be consumed by the C++
|
339 |
+
// `count_specified_dimensions` function, which adds 100s of nanoseconds
|
340 |
+
// overhead and is undesirable.
|
341 |
+
static inline int64_t count_specified_dimensions(
|
342 |
+
const ArrayRef<TensorIndex>& indices) {
|
343 |
+
// Count the number of indexed dimensions (everything but ellipsis and None)
|
344 |
+
int64_t count = 0;
|
345 |
+
for (auto& obj : indices) {
|
346 |
+
if (obj.is_tensor()) {
|
347 |
+
auto& tensor = obj.tensor();
|
348 |
+
if (tensor.scalar_type() == kByte || tensor.scalar_type() == kBool) {
|
349 |
+
count += tensor.dim();
|
350 |
+
} else {
|
351 |
+
count++;
|
352 |
+
}
|
353 |
+
} else if (!obj.is_none() && !obj.is_ellipsis() && !obj.is_boolean()) {
|
354 |
+
count++;
|
355 |
+
}
|
356 |
+
}
|
357 |
+
return count;
|
358 |
+
}
|
359 |
+
} // namespace impl
|
360 |
+
|
361 |
+
// NOTE: Many functions below are only for consumption from Python indexing
|
362 |
+
// implementation, they include:
|
363 |
+
//
|
364 |
+
// - `Tensor scalarToTensor(...)`
|
365 |
+
// - `IntArrayRef slicePrefix1sSize(...)`
|
366 |
+
// - `void copy_to(...)`
|
367 |
+
// - `Tensor handleDimInMultiDimIndexing(...)`
|
368 |
+
// - `Tensor dispatch_index(...)`
|
369 |
+
// - `Tensor dispatch_index_put_(...)`
|
370 |
+
// - `Tensor get_item(...)`
|
371 |
+
// - `void set_item(...)`
|
372 |
+
//
|
373 |
+
// The rest of the functions are in `at::indexing::impl` namespace, signifying
|
374 |
+
// that they shouldn't be used from Python indexing implementation.
|
375 |
+
static inline Tensor scalarToTensor(
|
376 |
+
const Scalar& v,
|
377 |
+
const TensorOptions& options,
|
378 |
+
const at::Device& self_device) {
|
379 |
+
if (self_device == at::kCPU && !v.isSymbolic()) {
|
380 |
+
return at::detail::scalar_tensor_static(
|
381 |
+
v, options.dtype_opt()->toScalarType(), self_device);
|
382 |
+
} else {
|
383 |
+
return impl::scalarToTensorNonNativeDeviceType(v, options);
|
384 |
+
}
|
385 |
+
}
|
386 |
+
|
387 |
+
// To match numpy semantics:
|
388 |
+
// As a special case for backwards compatibility,
|
389 |
+
// strip away unit dimensions from the left of 'src'
|
390 |
+
static inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) {
|
391 |
+
size_t first_non1_src = sizes.size();
|
392 |
+
for (const auto i : c10::irange(sizes.size())) {
|
393 |
+
// Unbacked SymInt has different behavior, but this is sound because
|
394 |
+
// failing to slice will only ever cause an error, not divergent
|
395 |
+
// behavior
|
396 |
+
if (!sizes[i].has_hint() || sizes[i] != 1) {
|
397 |
+
first_non1_src = i;
|
398 |
+
break;
|
399 |
+
}
|
400 |
+
}
|
401 |
+
|
402 |
+
return sizes.slice(first_non1_src);
|
403 |
+
}
|
404 |
+
|
405 |
+
static inline void copy_to(const Tensor& dst, const Tensor& src) {
|
406 |
+
if (dst.sym_sizes().equals(src.sym_sizes())) {
|
407 |
+
// A shortcut to avoid generating hard-coded constant sizes during tracing.
|
408 |
+
// This is not a perfect solution: when src & dst have different shapes,
|
409 |
+
// constants will still appear. Users can workaround that case by
|
410 |
+
// dst[index..] = src.reshape(..)
|
411 |
+
dst.copy_(src);
|
412 |
+
return;
|
413 |
+
} else if (src.dim() == 0 && src.device().type() == at::kCPU) {
|
414 |
+
dst.fill_(src);
|
415 |
+
return;
|
416 |
+
}
|
417 |
+
auto src_view = src.view_symint(slicePrefix1sSize(src.sym_sizes()));
|
418 |
+
c10::MaybeOwned<Tensor> b_src = expand_inplace(dst, src_view, "setitem");
|
419 |
+
dst.copy_(*b_src);
|
420 |
+
}
|
421 |
+
|
422 |
+
// See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor
|
423 |
+
// indexing functions from Python ]
|
424 |
+
static inline Tensor handleDimInMultiDimIndexing(
|
425 |
+
const Tensor& prev_dim_result,
|
426 |
+
const Tensor& original_tensor,
|
427 |
+
const TensorIndex& index,
|
428 |
+
int64_t* dim_ptr,
|
429 |
+
int64_t* specified_dims_ptr,
|
430 |
+
int64_t real_dim,
|
431 |
+
std::vector<Tensor>& outIndices,
|
432 |
+
bool disable_slice_optimization,
|
433 |
+
const at::Device& original_tensor_device,
|
434 |
+
const c10::optional<SymIntArrayRef>& prev_dim_result_sizes) {
|
435 |
+
if (index.is_integer()) {
|
436 |
+
return impl::applySelect(
|
437 |
+
prev_dim_result,
|
438 |
+
*dim_ptr,
|
439 |
+
index.integer(),
|
440 |
+
real_dim,
|
441 |
+
original_tensor_device,
|
442 |
+
prev_dim_result_sizes);
|
443 |
+
} else if (index.is_slice()) {
|
444 |
+
Tensor result = impl::applySlice(
|
445 |
+
prev_dim_result,
|
446 |
+
*dim_ptr,
|
447 |
+
index.slice().start(),
|
448 |
+
index.slice().stop(),
|
449 |
+
index.slice().step(),
|
450 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
451 |
+
original_tensor_device,
|
452 |
+
prev_dim_result_sizes);
|
453 |
+
(*dim_ptr)++;
|
454 |
+
return result;
|
455 |
+
} else if (index.is_ellipsis()) {
|
456 |
+
(*dim_ptr) += original_tensor.dim() - (*specified_dims_ptr);
|
457 |
+
return prev_dim_result;
|
458 |
+
} else if (index.is_none()) {
|
459 |
+
Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
|
460 |
+
(*dim_ptr)++;
|
461 |
+
return result;
|
462 |
+
} else if (index.is_boolean()) {
|
463 |
+
Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
|
464 |
+
impl::recordTensorIndex(
|
465 |
+
impl::boolToIndexingTensor(
|
466 |
+
result, index.boolean(), original_tensor_device),
|
467 |
+
outIndices,
|
468 |
+
dim_ptr);
|
469 |
+
return result;
|
470 |
+
} else if (index.is_tensor()) {
|
471 |
+
Tensor result = prev_dim_result;
|
472 |
+
const Tensor& tensor = index.tensor();
|
473 |
+
auto scalar_type = tensor.scalar_type();
|
474 |
+
if (tensor.dim() == 0 &&
|
475 |
+
at::isIntegralType(scalar_type, /*includeBool=*/true)) {
|
476 |
+
if (scalar_type != at::kByte && scalar_type != at::kBool) {
|
477 |
+
result = impl::applySelect(
|
478 |
+
result,
|
479 |
+
*dim_ptr,
|
480 |
+
tensor.item<int64_t>(),
|
481 |
+
real_dim,
|
482 |
+
original_tensor_device,
|
483 |
+
prev_dim_result_sizes);
|
484 |
+
} else {
|
485 |
+
result = result.unsqueeze(*dim_ptr);
|
486 |
+
if (scalar_type == at::kBool) {
|
487 |
+
impl::recordTensorIndex(
|
488 |
+
impl::boolToIndexingTensor(
|
489 |
+
result, tensor.item<bool>() != 0, original_tensor_device),
|
490 |
+
outIndices,
|
491 |
+
dim_ptr);
|
492 |
+
} else {
|
493 |
+
impl::recordTensorIndex(
|
494 |
+
impl::boolToIndexingTensor(
|
495 |
+
result, tensor.item<uint8_t>() != 0, original_tensor_device),
|
496 |
+
outIndices,
|
497 |
+
dim_ptr);
|
498 |
+
}
|
499 |
+
}
|
500 |
+
} else {
|
501 |
+
impl::recordTensorIndex(tensor, outIndices, dim_ptr);
|
502 |
+
}
|
503 |
+
return result;
|
504 |
+
} else {
|
505 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorIndex type");
|
506 |
+
}
|
507 |
+
}
|
508 |
+
|
509 |
+
namespace impl {
|
510 |
+
// This mirrors `applySlicing` in
|
511 |
+
// torch/csrc/autograd/python_variable_indexing.cpp
|
512 |
+
static inline Tensor applySlicing(
|
513 |
+
const Tensor& self,
|
514 |
+
const ArrayRef<TensorIndex>& indices,
|
515 |
+
std::vector<Tensor>& outIndices,
|
516 |
+
bool disable_slice_optimization,
|
517 |
+
const at::Device& self_device,
|
518 |
+
const c10::optional<SymIntArrayRef>& self_sizes) {
|
519 |
+
int64_t dim = 0;
|
520 |
+
int64_t specified_dims = impl::count_specified_dimensions(indices);
|
521 |
+
|
522 |
+
// See NOTE [nested tensor size for indexing]
|
523 |
+
if (self_sizes.has_value()) {
|
524 |
+
TORCH_CHECK_INDEX(
|
525 |
+
specified_dims <= (int64_t)self_sizes->size(),
|
526 |
+
"too many indices for tensor of dimension ",
|
527 |
+
(int)self_sizes->size());
|
528 |
+
}
|
529 |
+
|
530 |
+
Tensor result = self;
|
531 |
+
for (const auto i : c10::irange(indices.size())) {
|
532 |
+
auto& obj = indices[i];
|
533 |
+
// See NOTE [nested tensor size for indexing]
|
534 |
+
c10::optional<SymIntArrayRef> result_sizes = result.is_nested()
|
535 |
+
? c10::optional<SymIntArrayRef>(c10::nullopt)
|
536 |
+
: c10::optional<SymIntArrayRef>(result.sym_sizes());
|
537 |
+
result = handleDimInMultiDimIndexing(
|
538 |
+
/*prev_dim_result=*/result,
|
539 |
+
/*original_tensor=*/self,
|
540 |
+
/*index=*/obj,
|
541 |
+
/*dim_ptr=*/&dim,
|
542 |
+
/*specified_dims_ptr=*/&specified_dims,
|
543 |
+
/*real_dim=*/static_cast<int64_t>(i),
|
544 |
+
/*outIndices=*/outIndices,
|
545 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
546 |
+
/*original_tensor_device=*/self_device,
|
547 |
+
/*prev_dim_result_sizes=*/result_sizes);
|
548 |
+
}
|
549 |
+
return result;
|
550 |
+
}
|
551 |
+
} // namespace impl
|
552 |
+
|
553 |
+
static inline Tensor dispatch_index(
|
554 |
+
const Tensor& self,
|
555 |
+
std::vector<Tensor>&& indices) {
|
556 |
+
return self.index(impl::typeConvertIndices(self, std::move(indices)));
|
557 |
+
}
|
558 |
+
|
559 |
+
static inline Tensor dispatch_index_put_(
|
560 |
+
Tensor& self,
|
561 |
+
std::vector<Tensor>&& indices,
|
562 |
+
const Tensor& value) {
|
563 |
+
return self.index_put_(
|
564 |
+
impl::typeConvertIndices(self, std::move(indices)), value);
|
565 |
+
}
|
566 |
+
|
567 |
+
// NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing
|
568 |
+
// functions from Python ]
|
569 |
+
//
|
570 |
+
// Question: When should we set `disable_slice_optimization` to `true` when
|
571 |
+
// calling C++ tensor indexing functions from Python indexing code?
|
572 |
+
//
|
573 |
+
// Answer: What "slice optimization" means: when we have a slicing expression
|
574 |
+
// like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we
|
575 |
+
// would skip dispatching the actual slice call as an optimization. However,
|
576 |
+
// here are the cases where we DON'T want this optimization:
|
577 |
+
//
|
578 |
+
// 1. When we are doing 1-D slicing (e.g. `tensor[:]`).
|
579 |
+
// Reason: we always return a shallow copy for expressions such as
|
580 |
+
// `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:,
|
581 |
+
// :]`, we return an alias of `tensor` by doing the following:
|
582 |
+
// ```
|
583 |
+
// Tensor sliced = impl::applySlicing(self, indices, tensorIndices,
|
584 |
+
// disable_slice_optimization, self_device, self_sizes); if
|
585 |
+
// (tensorIndices.empty()) {
|
586 |
+
// if (sliced.is_same(self)) {
|
587 |
+
// // ensure we return a shallow copy for things like x[...]
|
588 |
+
// sliced = at::alias(sliced);
|
589 |
+
// }
|
590 |
+
// return sliced;
|
591 |
+
// }
|
592 |
+
// ```)
|
593 |
+
// 2. When we are doing JIT tracing.
|
594 |
+
// Reason: JIT tracing needs the `self.slice(...)` call to properly trace the
|
595 |
+
// slice operation.
|
596 |
+
|
597 |
+
// This mirrors `THPVariable_getitem` in
|
598 |
+
// torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting
|
599 |
+
// `disable_slice_optimization` when calling C++ tensor indexing functions from
|
600 |
+
// Python ]
|
601 |
+
static inline Tensor get_item(
|
602 |
+
const Tensor& self,
|
603 |
+
const ArrayRef<TensorIndex>& indices,
|
604 |
+
bool disable_slice_optimization = false) {
|
605 |
+
at::Device self_device = self.device();
|
606 |
+
// NOTE [nested tensor size for indexing]
|
607 |
+
// nested tensor does not have a size (yet) so for now we represent its size
|
608 |
+
// as null may need to be changed after we reach a better solution for nested
|
609 |
+
// tensor size
|
610 |
+
c10::optional<SymIntArrayRef> self_sizes = self.is_nested()
|
611 |
+
? c10::optional<SymIntArrayRef>(c10::nullopt)
|
612 |
+
: c10::optional<SymIntArrayRef>(self.sym_sizes());
|
613 |
+
|
614 |
+
// handle simple types: integers, slices, none, ellipsis, bool
|
615 |
+
if (indices.size() == 1) {
|
616 |
+
const TensorIndex& index = indices[0];
|
617 |
+
if (index.is_integer()) {
|
618 |
+
return impl::applySelect(
|
619 |
+
self, 0, index.integer(), 0, self_device, self_sizes);
|
620 |
+
} else if (index.is_slice()) {
|
621 |
+
return impl::applySlice(
|
622 |
+
self,
|
623 |
+
0,
|
624 |
+
index.slice().start(),
|
625 |
+
index.slice().stop(),
|
626 |
+
index.slice().step(),
|
627 |
+
/*disable_slice_optimization=*/true,
|
628 |
+
self_device,
|
629 |
+
self_sizes);
|
630 |
+
} else if (index.is_none()) {
|
631 |
+
return self.unsqueeze(0);
|
632 |
+
} else if (index.is_ellipsis()) {
|
633 |
+
return at::alias(self);
|
634 |
+
} else if (index.is_boolean()) {
|
635 |
+
Tensor result = self.unsqueeze(0);
|
636 |
+
return dispatch_index(
|
637 |
+
result,
|
638 |
+
std::vector<Tensor>{impl::boolToIndexingTensor(
|
639 |
+
result, index.boolean(), self_device)});
|
640 |
+
}
|
641 |
+
}
|
642 |
+
|
643 |
+
std::vector<Tensor> tensorIndices;
|
644 |
+
Tensor sliced = impl::applySlicing(
|
645 |
+
self,
|
646 |
+
indices,
|
647 |
+
tensorIndices,
|
648 |
+
disable_slice_optimization,
|
649 |
+
self_device,
|
650 |
+
self_sizes);
|
651 |
+
if (tensorIndices.empty()) {
|
652 |
+
if (sliced.is_same(self)) {
|
653 |
+
// ensure we return a shallow copy for things like x[...]
|
654 |
+
sliced = at::alias(sliced);
|
655 |
+
}
|
656 |
+
return sliced;
|
657 |
+
}
|
658 |
+
|
659 |
+
// indexing by tensors ("advanced" indexing)
|
660 |
+
return dispatch_index(sliced, std::move(tensorIndices));
|
661 |
+
}
|
662 |
+
|
663 |
+
// This mirrors `THPVariable_setitem` in
|
664 |
+
// torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a
|
665 |
+
// Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++
|
666 |
+
// tensor indexing functions from Python ]
|
667 |
+
static inline void set_item(
|
668 |
+
const Tensor& self,
|
669 |
+
const ArrayRef<TensorIndex>& indices,
|
670 |
+
const Tensor& value,
|
671 |
+
bool disable_slice_optimization = false) {
|
672 |
+
at::Device self_device = self.device();
|
673 |
+
SymIntArrayRef self_sizes = self.sym_sizes();
|
674 |
+
|
675 |
+
// handle simple types: integers, slices, ellipsis, bool
|
676 |
+
if (indices.size() == 1) {
|
677 |
+
const TensorIndex& index = indices[0];
|
678 |
+
if (index.is_boolean() && !index.boolean()) {
|
679 |
+
// do nothing for false (technically we should check the size, but we
|
680 |
+
// don't have real 0-sized shapes.
|
681 |
+
return;
|
682 |
+
} else if (index.is_ellipsis()) {
|
683 |
+
copy_to(self, value);
|
684 |
+
return;
|
685 |
+
} else if (index.is_none() || (index.is_boolean() && index.boolean())) {
|
686 |
+
copy_to(self.unsqueeze(0), value);
|
687 |
+
return;
|
688 |
+
} else if (index.is_integer()) {
|
689 |
+
copy_to(
|
690 |
+
impl::applySelect(
|
691 |
+
self, 0, index.integer(), 0, self_device, self_sizes),
|
692 |
+
value);
|
693 |
+
return;
|
694 |
+
} else if (index.is_slice()) {
|
695 |
+
copy_to(
|
696 |
+
impl::applySlice(
|
697 |
+
self,
|
698 |
+
0,
|
699 |
+
index.slice().start(),
|
700 |
+
index.slice().stop(),
|
701 |
+
index.slice().step(),
|
702 |
+
/*disable_slice_optimization=*/disable_slice_optimization,
|
703 |
+
self_device,
|
704 |
+
self_sizes),
|
705 |
+
value);
|
706 |
+
return;
|
707 |
+
}
|
708 |
+
}
|
709 |
+
|
710 |
+
std::vector<Tensor> tensorIndices;
|
711 |
+
Tensor sliced = impl::applySlicing(
|
712 |
+
self,
|
713 |
+
indices,
|
714 |
+
tensorIndices,
|
715 |
+
disable_slice_optimization,
|
716 |
+
self_device,
|
717 |
+
self_sizes);
|
718 |
+
if (tensorIndices.empty()) {
|
719 |
+
copy_to(sliced, value);
|
720 |
+
return;
|
721 |
+
}
|
722 |
+
|
723 |
+
SymIntArrayRef valueSizes = value.sym_sizes();
|
724 |
+
SymIntArrayRef slicedValueSizes = slicePrefix1sSize(valueSizes);
|
725 |
+
Tensor valuesSliced;
|
726 |
+
if (!valueSizes.equals(slicedValueSizes)) {
|
727 |
+
valuesSliced = value.view_symint(slicedValueSizes);
|
728 |
+
} else {
|
729 |
+
valuesSliced = value;
|
730 |
+
}
|
731 |
+
dispatch_index_put_(sliced, std::move(tensorIndices), valuesSliced);
|
732 |
+
return;
|
733 |
+
}
|
734 |
+
|
735 |
+
} // namespace at::indexing
|
venv/lib/python3.10/site-packages/torch/include/ATen/TensorIterator.h
ADDED
@@ -0,0 +1,1002 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/TensorMeta.h>
|
4 |
+
#include <ATen/core/Dimname.h>
|
5 |
+
#include <ATen/core/Range.h>
|
6 |
+
#include <ATen/core/TensorBase.h>
|
7 |
+
#include <c10/core/DynamicCast.h>
|
8 |
+
#include <c10/util/FunctionRef.h>
|
9 |
+
#include <c10/util/MaybeOwned.h>
|
10 |
+
#include <c10/util/SmallVector.h>
|
11 |
+
#include <c10/util/TypeCast.h>
|
12 |
+
#include <c10/util/irange.h>
|
13 |
+
|
14 |
+
#include <array>
|
15 |
+
#include <bitset>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
class Tensor;
|
19 |
+
class OptionalTensorRef;
|
20 |
+
using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
|
21 |
+
} // namespace at
|
22 |
+
|
23 |
+
// TensorIterator is a helper class for element-wise operations, such as
|
24 |
+
// arithmetic, comparisons, and trigonometric functions. It handles
|
25 |
+
// broadcasting and type conversions of operands.
|
26 |
+
//
|
27 |
+
// This is inspired by NumPy's Array Iterator API (NpyIter).
|
28 |
+
//
|
29 |
+
// The files Loops.h and Loops.cuh provide functions to build kernels that
|
30 |
+
// use TensorIterator.
|
31 |
+
//
|
32 |
+
// Example:
|
33 |
+
//
|
34 |
+
// auto iter = TensorIteratorConfig()
|
35 |
+
// .add_output(output)
|
36 |
+
// .add_input(input)
|
37 |
+
// .build()
|
38 |
+
//
|
39 |
+
// [MyKernel.cpp / MyKernel.cu]
|
40 |
+
// cpu_kernel(iter, [](float a, float b) {
|
41 |
+
// return a + b;
|
42 |
+
// });
|
43 |
+
//
|
44 |
+
// gpu_kernel(iter, []GPU_LAMBDA(float a, float b) -> float {
|
45 |
+
// return a + b;
|
46 |
+
// });
|
47 |
+
//
|
48 |
+
// Note [Order of Construction]
|
49 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
50 |
+
// When setting up the tensor iterator configuration, the output Tensors
|
51 |
+
// have to be added first via
|
52 |
+
// TensorIteratorConfig::add_owned_output(at::Tensor). After adding all outputs,
|
53 |
+
// the inputs can be added via
|
54 |
+
// TensorIteratorConfig::add_owned_input(at::Tensor).
|
55 |
+
// Adding another output after inputs have been added will rise an exception.
|
56 |
+
//
|
57 |
+
// Note [Common Dtype Computation]
|
58 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
59 |
+
// Some operations have a natural notion of a "common dtype" or
|
60 |
+
// "computation dtype" where all inputs are cast to one dtype, the
|
61 |
+
// operation is performed, and then the results are cast to all outputs.
|
62 |
+
//
|
63 |
+
// TensorIterator infers a common dtype if all inputs have the same dtype,
|
64 |
+
// and it computes one using type promotion rules on its inputs if
|
65 |
+
// promote_inputs_to_common_dtype_ is true. Attempting to query
|
66 |
+
// a common dtype otherwise will throw an exception.
|
67 |
+
//
|
68 |
+
// Note that the outputs are not considered when computing a common dtype.
|
69 |
+
|
70 |
+
namespace at {
|
71 |
+
|
72 |
+
namespace internal {
|
73 |
+
// This parameter is heuristically chosen to determine the minimum number of
|
74 |
+
// work that warrants parallelism. For example, when summing an array, it is
|
75 |
+
// deemed inefficient to parallelise over arrays shorter than 32768. Further,
|
76 |
+
// no parallel algorithm (such as parallel_reduce) should split work into
|
77 |
+
// smaller than GRAIN_SIZE chunks.
|
78 |
+
constexpr int64_t GRAIN_SIZE = 32768;
|
79 |
+
|
80 |
+
// Storage for a non-owning Tensor, without needing to include Tensor.h
|
81 |
+
class TORCH_API OpaqueOptionalTensorRef {
|
82 |
+
alignas(alignof(TensorBase)) std::array<char, sizeof(TensorBase)> data_{};
|
83 |
+
|
84 |
+
public:
|
85 |
+
OpaqueOptionalTensorRef();
|
86 |
+
OpaqueOptionalTensorRef(const OpaqueOptionalTensorRef&) = default;
|
87 |
+
OpaqueOptionalTensorRef& operator=(const OpaqueOptionalTensorRef&) = default;
|
88 |
+
OpaqueOptionalTensorRef(OpaqueOptionalTensorRef&&) noexcept = default;
|
89 |
+
OpaqueOptionalTensorRef& operator=(OpaqueOptionalTensorRef&&) noexcept =
|
90 |
+
default;
|
91 |
+
~OpaqueOptionalTensorRef();
|
92 |
+
|
93 |
+
OptionalTensorRef* get() {
|
94 |
+
return reinterpret_cast<OptionalTensorRef*>(data_.data());
|
95 |
+
}
|
96 |
+
const OptionalTensorRef* get() const {
|
97 |
+
return reinterpret_cast<const OptionalTensorRef*>(data_.data());
|
98 |
+
}
|
99 |
+
|
100 |
+
OptionalTensorRef& operator*() {
|
101 |
+
return *get();
|
102 |
+
}
|
103 |
+
const OptionalTensorRef& operator*() const {
|
104 |
+
return *get();
|
105 |
+
}
|
106 |
+
OptionalTensorRef* operator->() {
|
107 |
+
return get();
|
108 |
+
}
|
109 |
+
const OptionalTensorRef* operator->() const {
|
110 |
+
return get();
|
111 |
+
}
|
112 |
+
|
113 |
+
const Tensor& getTensor() const;
|
114 |
+
};
|
115 |
+
} // namespace internal
|
116 |
+
|
117 |
+
struct TORCH_API OperandInfo {
|
118 |
+
using StrideVector = SmallVector<int64_t, 6>;
|
119 |
+
OperandInfo() = default;
|
120 |
+
C10_ALWAYS_INLINE explicit OperandInfo(c10::MaybeOwned<TensorBase>&& t) {
|
121 |
+
if (t->defined()) {
|
122 |
+
device = t->device();
|
123 |
+
target_dtype = t->scalar_type();
|
124 |
+
current_dtype = target_dtype;
|
125 |
+
}
|
126 |
+
tensor(std::move(t));
|
127 |
+
validate();
|
128 |
+
}
|
129 |
+
|
130 |
+
C10_ALWAYS_INLINE OperandInfo(const OperandInfo&) = default;
|
131 |
+
C10_ALWAYS_INLINE OperandInfo& operator=(const OperandInfo&) = default;
|
132 |
+
C10_ALWAYS_INLINE OperandInfo(OperandInfo&&) noexcept = default;
|
133 |
+
C10_ALWAYS_INLINE OperandInfo& operator=(OperandInfo&&) noexcept = default;
|
134 |
+
C10_ALWAYS_INLINE ~OperandInfo() = default;
|
135 |
+
|
136 |
+
/// The data pointer. This may be different from tensor->data_ptr() if the
|
137 |
+
/// iterator is split.
|
138 |
+
void* data = nullptr;
|
139 |
+
|
140 |
+
/// Stride after broadcasting. The stride is in bytes, not number of elements.
|
141 |
+
StrideVector stride_bytes;
|
142 |
+
|
143 |
+
/// The desired device and type for the operand. For inputs, this specifies
|
144 |
+
/// that the input should be converted to this type if necessary. For outputs,
|
145 |
+
/// this specifies which type to allocate. target_dtype and device are
|
146 |
+
/// initialized with the dtype and device of the tensor but during type
|
147 |
+
/// promotion target_dtype value can become different from tensor's dtype
|
148 |
+
/// also, during type promotion target_dtype and device can be set for an
|
149 |
+
/// undefined tensor so that tensor can be properly constructed later.
|
150 |
+
c10::optional<Device> device = c10::nullopt;
|
151 |
+
ScalarType target_dtype = ScalarType::Undefined;
|
152 |
+
// Caches dtype of the tensor, because scalar_type is an expensive operation
|
153 |
+
// If dtype of the tensor is changed (e.g. as a result of type promotion or in
|
154 |
+
// allocate_outputs), this
|
155 |
+
// value should be changed too.
|
156 |
+
ScalarType current_dtype = ScalarType::Undefined;
|
157 |
+
|
158 |
+
bool is_device_defined() const {
|
159 |
+
return device.has_value();
|
160 |
+
}
|
161 |
+
bool is_type_defined() const {
|
162 |
+
return target_dtype != ScalarType::Undefined;
|
163 |
+
}
|
164 |
+
TensorOptions options() const {
|
165 |
+
return TensorOptions(target_dtype).device(device);
|
166 |
+
}
|
167 |
+
|
168 |
+
bool is_output = false;
|
169 |
+
|
170 |
+
bool will_resize = false;
|
171 |
+
|
172 |
+
bool is_read_write = false;
|
173 |
+
|
174 |
+
bool is_const = false;
|
175 |
+
|
176 |
+
void validate() {
|
177 |
+
TORCH_CHECK(
|
178 |
+
!tensor_base_->defined() || tensor_base_->layout() == kStrided,
|
179 |
+
"unsupported tensor layout: ",
|
180 |
+
tensor_base_->layout());
|
181 |
+
}
|
182 |
+
|
183 |
+
/// The tensor operand. Note that the strides, data pointer, and
|
184 |
+
/// other attributes may differ due to dimension reordering and
|
185 |
+
/// coalescing.
|
186 |
+
const Tensor& tensor() const {
|
187 |
+
return tensor_storage_.getTensor();
|
188 |
+
}
|
189 |
+
const TensorBase& tensor_base() const {
|
190 |
+
return *tensor_base_;
|
191 |
+
}
|
192 |
+
void tensor(c10::MaybeOwned<TensorBase>&& tensor);
|
193 |
+
|
194 |
+
// Save the original tensor operand in cases when an output is modified
|
195 |
+
// (e.g. if dtype is changed)
|
196 |
+
const Tensor& original_tensor() const {
|
197 |
+
return original_tensor_storage_.getTensor();
|
198 |
+
}
|
199 |
+
const TensorBase& original_tensor_base() const {
|
200 |
+
return *original_tensor_base_;
|
201 |
+
}
|
202 |
+
|
203 |
+
// Set tensor to a new value, and store the old tensor value in
|
204 |
+
// original_tensor Should only ever be called once for the lifetime of an
|
205 |
+
// operand
|
206 |
+
void exchange_tensor(c10::MaybeOwned<TensorBase>&& new_tensor);
|
207 |
+
|
208 |
+
// Move original_tensor back into tensor, exchange_tensor must have been
|
209 |
+
// called before
|
210 |
+
void restore_original_tensor();
|
211 |
+
|
212 |
+
private:
|
213 |
+
c10::MaybeOwned<TensorBase> tensor_base_;
|
214 |
+
c10::MaybeOwned<TensorBase> original_tensor_base_ =
|
215 |
+
c10::MaybeOwned<TensorBase>::owned(std::in_place);
|
216 |
+
|
217 |
+
// We store TensorBase visibly in the header to allow inline access.
|
218 |
+
// However, we sometimes need a genuine `const Tensor &` for the
|
219 |
+
// TensorIterator API. So, we also store a non-owning `Tensor`
|
220 |
+
// object in these `_storage_` variables.
|
221 |
+
internal::OpaqueOptionalTensorRef tensor_storage_;
|
222 |
+
internal::OpaqueOptionalTensorRef original_tensor_storage_;
|
223 |
+
};
|
224 |
+
|
225 |
+
struct SplitUntil32Bit;
|
226 |
+
|
227 |
+
enum class FastSetupType : uint8_t {
|
228 |
+
NONE,
|
229 |
+
CONTIGUOUS,
|
230 |
+
CHANNELS_LAST,
|
231 |
+
NON_OVERLAPPING_DENSE
|
232 |
+
};
|
233 |
+
|
234 |
+
class TensorIteratorConfig;
|
235 |
+
struct TensorIterator;
|
236 |
+
|
237 |
+
struct TORCH_API TensorIteratorBase : public impl::MetaBase {
|
238 |
+
using DimMask = std::bitset<64>;
|
239 |
+
using PtrVector = SmallVector<char*, 4>;
|
240 |
+
using StrideVector = SmallVector<int64_t, 6>;
|
241 |
+
|
242 |
+
TensorIteratorBase();
|
243 |
+
void build(TensorIteratorConfig&);
|
244 |
+
|
245 |
+
// The inner-loop function operates on the fastest moving dimension. It
|
246 |
+
// implements element-wise operations in terms of 1-d strided tensors.
|
247 |
+
//
|
248 |
+
// Arguments:
|
249 |
+
// data: data pointers for each operand (length `ntensors`)
|
250 |
+
// strides: stride for each operand (length `ntensors`)
|
251 |
+
// size: size of inner loop
|
252 |
+
//
|
253 |
+
// The `size` often matches shape[0], but may be smaller due to
|
254 |
+
// parallelization of the inner loop.
|
255 |
+
using loop2d_t = c10::function_ref<
|
256 |
+
void(char** data, const int64_t* strides, int64_t size0, int64_t size1)>;
|
257 |
+
|
258 |
+
using loop_subiter_t = c10::function_ref<void(TensorIteratorBase& subiter)>;
|
259 |
+
|
260 |
+
void foreach_reduced_elt(loop_subiter_t loop, bool parallelize = true);
|
261 |
+
|
262 |
+
int ndim() const {
|
263 |
+
return static_cast<int>(shape_.size());
|
264 |
+
}
|
265 |
+
IntArrayRef shape() const {
|
266 |
+
return shape_;
|
267 |
+
}
|
268 |
+
int64_t numel() const;
|
269 |
+
int ntensors() const {
|
270 |
+
return static_cast<int>(operands_.size());
|
271 |
+
}
|
272 |
+
int noutputs() const {
|
273 |
+
return num_outputs_;
|
274 |
+
}
|
275 |
+
int ninputs() const {
|
276 |
+
return ntensors() - noutputs();
|
277 |
+
}
|
278 |
+
IntArrayRef view_offsets() const {
|
279 |
+
return view_offsets_;
|
280 |
+
}
|
281 |
+
|
282 |
+
/// number of elements in the output operand. this is the same as numel() for
|
283 |
+
/// operations that are not reductions.
|
284 |
+
int64_t num_output_elements() const;
|
285 |
+
|
286 |
+
/// number of reduced dimensions in a reduction operation
|
287 |
+
int num_reduce_dims() const;
|
288 |
+
|
289 |
+
/// 1-dimensional iteration and no buffering or type conversion
|
290 |
+
bool is_trivial_1d() const;
|
291 |
+
/// Reducible to 1-dimensional and all operands are contiguous
|
292 |
+
bool is_contiguous() const;
|
293 |
+
bool is_dim_reduced(int dim) const;
|
294 |
+
|
295 |
+
/// Accessors for each operand
|
296 |
+
IntArrayRef strides(int64_t arg) const {
|
297 |
+
return operands_[arg].stride_bytes;
|
298 |
+
}
|
299 |
+
void* data_ptr(int64_t arg) const;
|
300 |
+
ScalarType dtype(int64_t arg = 0) const {
|
301 |
+
return operands_[arg].current_dtype;
|
302 |
+
}
|
303 |
+
ScalarType common_dtype() const {
|
304 |
+
TORCH_INTERNAL_ASSERT(
|
305 |
+
common_dtype_ != ScalarType::Undefined,
|
306 |
+
"Queried for invalid common dtype!");
|
307 |
+
return common_dtype_;
|
308 |
+
}
|
309 |
+
ScalarType input_dtype(int64_t arg = 0) const {
|
310 |
+
return operands_[num_outputs_ + arg].current_dtype;
|
311 |
+
}
|
312 |
+
Device device(int64_t arg = 0) const {
|
313 |
+
return operands_[arg].device.value();
|
314 |
+
}
|
315 |
+
c10::DeviceType device_type(int64_t arg = 0) const {
|
316 |
+
return device(arg).type();
|
317 |
+
}
|
318 |
+
int64_t element_size(int64_t arg) const {
|
319 |
+
return static_cast<int64_t>(elementSize(dtype(arg)));
|
320 |
+
}
|
321 |
+
bool is_scalar(int64_t arg) const;
|
322 |
+
bool is_cpu_scalar(int64_t arg) const;
|
323 |
+
|
324 |
+
const TensorBase& tensor_base(int64_t arg) const {
|
325 |
+
return operands_[arg].tensor_base();
|
326 |
+
}
|
327 |
+
const Tensor& tensor(int64_t arg) const {
|
328 |
+
return operands_[arg].tensor();
|
329 |
+
}
|
330 |
+
|
331 |
+
const TensorBase& output_base(int64_t arg = 0) const {
|
332 |
+
AT_ASSERT(arg < num_outputs_);
|
333 |
+
return tensor_base(arg);
|
334 |
+
}
|
335 |
+
|
336 |
+
const Tensor& output(int64_t arg = 0) const {
|
337 |
+
AT_ASSERT(arg < num_outputs_);
|
338 |
+
return tensor(arg);
|
339 |
+
}
|
340 |
+
|
341 |
+
const TensorBase& input_base(int64_t arg = 0) const {
|
342 |
+
AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
|
343 |
+
return tensor_base(num_outputs_ + arg);
|
344 |
+
}
|
345 |
+
const Tensor& input(int64_t arg = 0) const {
|
346 |
+
AT_ASSERT(arg >= 0 && arg < ntensors() - num_outputs_);
|
347 |
+
return tensor(num_outputs_ + arg);
|
348 |
+
}
|
349 |
+
|
350 |
+
// Copies from temporary outputs back to the original outputs
|
351 |
+
// NOTE: only used on CPU
|
352 |
+
void cast_outputs();
|
353 |
+
|
354 |
+
/// Removes an operand from this iterator
|
355 |
+
void remove_operand(int64_t arg);
|
356 |
+
/// Shrinks an iterated dimension
|
357 |
+
void narrow(int dim, int64_t start, int64_t size);
|
358 |
+
/// Narrows every dim after and including `start_dim` to size one.
|
359 |
+
void select_all_keeping_dim(int start_dim, IntArrayRef starts);
|
360 |
+
/// Replaces the data pointer for the operand at index `arg`.
|
361 |
+
/// The new pointer should have the same sizes, strides and dtype as the
|
362 |
+
/// original
|
363 |
+
void unsafe_replace_operand(int64_t arg, void* data);
|
364 |
+
|
365 |
+
/// Splits this TensorIterator into two iterators. Together they iterate over
|
366 |
+
/// the entire operation. Used by `with_32bit_indexing()`.
|
367 |
+
std::unique_ptr<TensorIterator> split(int dim);
|
368 |
+
|
369 |
+
/// Returns the dimension with the largest extent: (size[dim]-1) * stride[dim]
|
370 |
+
int get_dim_to_split() const;
|
371 |
+
|
372 |
+
template <typename T>
|
373 |
+
T scalar_value(int64_t arg) {
|
374 |
+
auto& op = operands_[arg];
|
375 |
+
return c10::fetch_and_cast<T>(op.tensor_base().scalar_type(), op.data);
|
376 |
+
}
|
377 |
+
|
378 |
+
/// Return scalar value from original_tensor_base if it is defined. When
|
379 |
+
/// common_dtype is Half, casting scalar input to common_dtype might overflow.
|
380 |
+
/// If the scalar is aleady given in the type of Half, then return scalar
|
381 |
+
/// value from tensor_base.
|
382 |
+
template <typename T>
|
383 |
+
T original_scalar_value(int64_t arg) {
|
384 |
+
auto& original_tensor_base = operands_[arg].original_tensor_base();
|
385 |
+
if (original_tensor_base.defined()) {
|
386 |
+
TORCH_INTERNAL_ASSERT(
|
387 |
+
original_tensor_base.scalar_type() != common_dtype());
|
388 |
+
return c10::fetch_and_cast<T>(
|
389 |
+
original_tensor_base.scalar_type(),
|
390 |
+
original_tensor_base.const_data_ptr());
|
391 |
+
} else {
|
392 |
+
return scalar_value<T>(arg);
|
393 |
+
}
|
394 |
+
}
|
395 |
+
|
396 |
+
private:
|
397 |
+
template <typename loop1d_t>
|
398 |
+
auto loop_2d_from_1d(const loop1d_t& loop) {
|
399 |
+
return
|
400 |
+
[loop, ntensor = ntensors()](
|
401 |
+
char** base, const int64_t* strides, int64_t size0, int64_t size1) {
|
402 |
+
PtrVector data(base, base + ntensor);
|
403 |
+
const int64_t* outer_strides = &strides[ntensor];
|
404 |
+
for (const auto i : c10::irange(size1)) {
|
405 |
+
if (i > 0) {
|
406 |
+
for (const auto arg : c10::irange(ntensor)) {
|
407 |
+
data[arg] += outer_strides[arg];
|
408 |
+
}
|
409 |
+
}
|
410 |
+
loop(data.data(), strides, size0);
|
411 |
+
}
|
412 |
+
};
|
413 |
+
}
|
414 |
+
|
415 |
+
public:
|
416 |
+
template <
|
417 |
+
typename loop1d_t,
|
418 |
+
std::enable_if_t<
|
419 |
+
std::is_convertible_v<
|
420 |
+
loop1d_t,
|
421 |
+
c10::function_ref<
|
422 |
+
void(char**, const int64_t* strides, int64_t size)>>,
|
423 |
+
int> = 0>
|
424 |
+
void for_each(loop1d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE) {
|
425 |
+
for_each(loop_2d_from_1d(loop), grain_size);
|
426 |
+
}
|
427 |
+
|
428 |
+
void for_each(loop2d_t loop, int64_t grain_size = at::internal::GRAIN_SIZE);
|
429 |
+
|
430 |
+
void parallel_reduce(loop2d_t loop);
|
431 |
+
|
432 |
+
template <
|
433 |
+
typename loop1d_t,
|
434 |
+
std::enable_if_t<
|
435 |
+
std::is_convertible_v<
|
436 |
+
loop1d_t,
|
437 |
+
c10::function_ref<
|
438 |
+
void(char**, const int64_t* strides, int64_t size)>>,
|
439 |
+
int> = 0>
|
440 |
+
void serial_for_each(loop1d_t loop, Range range) {
|
441 |
+
serial_for_each(loop_2d_from_1d(loop), range);
|
442 |
+
}
|
443 |
+
|
444 |
+
void serial_for_each(loop2d_t loop, Range range) const;
|
445 |
+
|
446 |
+
/// Create a strides array for a Tensor with shape of this iterator. The
|
447 |
+
/// parameter `element_size` specifies the size of Tensor's data type in
|
448 |
+
/// bytes (e.g. `4` for `float`)
|
449 |
+
StrideVector compatible_stride(int64_t element_size) const;
|
450 |
+
|
451 |
+
/// Inverts the re-ordering done by reorder_dimensions. This can only be
|
452 |
+
/// called *before* coalesce_dimensions() is called.
|
453 |
+
DimVector invert_perm(IntArrayRef input) const;
|
454 |
+
|
455 |
+
/// Reapply same re-ordering as it is done by reorder_dimensions. This can
|
456 |
+
/// only be called *before* coalesce_dimensions() is called.
|
457 |
+
DimVector apply_perm_and_mul(IntArrayRef input, int mul) const;
|
458 |
+
|
459 |
+
/// Helper functions for CPU iteration
|
460 |
+
StrideVector get_dim_strides(int dim) const;
|
461 |
+
StrideVector get_strides() const;
|
462 |
+
StrideVector get_inner_strides() const {
|
463 |
+
return get_dim_strides(0);
|
464 |
+
}
|
465 |
+
PtrVector get_base_ptrs() const;
|
466 |
+
|
467 |
+
// Helper functions for advanced stride manipulations (e.g. torch.flip)
|
468 |
+
void _unsafe_set_arg_strides(const int64_t arg, IntArrayRef strides) {
|
469 |
+
operands_[arg].stride_bytes = strides;
|
470 |
+
}
|
471 |
+
void _unsafe_set_arg_data(const int64_t arg, void* data) {
|
472 |
+
operands_[arg].data = data;
|
473 |
+
}
|
474 |
+
|
475 |
+
/// true if the stride computation can use 32-bit arithmetic. Used by GPU
|
476 |
+
/// kernels
|
477 |
+
bool can_use_32bit_indexing() const;
|
478 |
+
|
479 |
+
/// An "iteratable" object that recursively splits this iterator into
|
480 |
+
/// sub-iterators that can use 32-bit indexing.
|
481 |
+
SplitUntil32Bit with_32bit_indexing() const;
|
482 |
+
|
483 |
+
/// If the kernel should accumulate into the output. Only relevant for CUDA
|
484 |
+
/// reductions.
|
485 |
+
bool should_accumulate() const {
|
486 |
+
return accumulate_;
|
487 |
+
}
|
488 |
+
|
489 |
+
/// Whether this iterator produces the actual output,
|
490 |
+
/// as opposed to something that will be accumulated further. Only relevant
|
491 |
+
/// for CUDA reductions.
|
492 |
+
bool is_final_output() const {
|
493 |
+
return final_output_;
|
494 |
+
}
|
495 |
+
|
496 |
+
bool has_contiguous_first_dim() const {
|
497 |
+
if (ndim() == 0) {
|
498 |
+
return true;
|
499 |
+
}
|
500 |
+
|
501 |
+
int num_tensors = ntensors();
|
502 |
+
for (const auto i : c10::irange(num_tensors)) {
|
503 |
+
if (strides(i)[0] != element_size(i)) {
|
504 |
+
return false;
|
505 |
+
}
|
506 |
+
}
|
507 |
+
return true;
|
508 |
+
}
|
509 |
+
|
510 |
+
void set_output_raw_strided(
|
511 |
+
int64_t output_idx,
|
512 |
+
IntArrayRef sizes,
|
513 |
+
IntArrayRef strides,
|
514 |
+
TensorOptions options,
|
515 |
+
DimnameList names) override;
|
516 |
+
|
517 |
+
#define TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, maybestatic) \
|
518 |
+
maybestatic void methodname( \
|
519 |
+
TensorBase&& out, const TensorBase& a, const TensorBase& b) = delete; \
|
520 |
+
maybestatic void methodname( \
|
521 |
+
const TensorBase& out, TensorBase&& a, const TensorBase& b) = delete; \
|
522 |
+
maybestatic void methodname( \
|
523 |
+
const TensorBase& out, const TensorBase& a, TensorBase&& b) = delete; \
|
524 |
+
maybestatic void methodname( \
|
525 |
+
TensorBase&& out, TensorBase&& a, const TensorBase& b) = delete; \
|
526 |
+
maybestatic void methodname( \
|
527 |
+
TensorBase&& out, const TensorBase& a, TensorBase&& b) = delete; \
|
528 |
+
maybestatic void methodname( \
|
529 |
+
const TensorBase& out, TensorBase&& a, TensorBase&& b) = delete; \
|
530 |
+
maybestatic void methodname( \
|
531 |
+
TensorBase&& out, TensorBase&& a, TensorBase&& b) = delete;
|
532 |
+
|
533 |
+
#define TORCH_DISALLOW_TEMPORARIES(methodname) \
|
534 |
+
TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, )
|
535 |
+
|
536 |
+
void build_binary_float_op(
|
537 |
+
const TensorBase& out,
|
538 |
+
const TensorBase& a,
|
539 |
+
const TensorBase& b);
|
540 |
+
void build_borrowing_binary_float_op(
|
541 |
+
const TensorBase& out,
|
542 |
+
const TensorBase& a,
|
543 |
+
const TensorBase& b);
|
544 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_float_op)
|
545 |
+
void build_binary_op(
|
546 |
+
const TensorBase& out,
|
547 |
+
const TensorBase& a,
|
548 |
+
const TensorBase& b);
|
549 |
+
void build_borrowing_binary_op(
|
550 |
+
const TensorBase& out,
|
551 |
+
const TensorBase& a,
|
552 |
+
const TensorBase& b);
|
553 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_binary_op)
|
554 |
+
void build_unary_float_op(const TensorBase& out, const TensorBase& a);
|
555 |
+
void build_borrowing_unary_float_op(
|
556 |
+
const TensorBase& out,
|
557 |
+
const TensorBase& a);
|
558 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_float_op)
|
559 |
+
void build_unary_op(const TensorBase& out, const TensorBase& a);
|
560 |
+
// Odd special case needed for pow. Has to borrow the output because
|
561 |
+
// it's a structured kernel, but the argument is potentially a copy.
|
562 |
+
void build_output_borrowing_argument_owning_unary_op(
|
563 |
+
const TensorBase& out,
|
564 |
+
const TensorBase& a);
|
565 |
+
void build_borrowing_unary_op(const TensorBase& out, const TensorBase& a);
|
566 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_op)
|
567 |
+
void build_borrowing_unary_force_boolean_op(
|
568 |
+
const TensorBase& out,
|
569 |
+
const TensorBase& a);
|
570 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_unary_force_boolean_op)
|
571 |
+
void build_comparison_op(
|
572 |
+
const TensorBase& out,
|
573 |
+
const TensorBase& a,
|
574 |
+
const TensorBase& b);
|
575 |
+
void build_borrowing_comparison_op(
|
576 |
+
const TensorBase& out,
|
577 |
+
const TensorBase& a,
|
578 |
+
const TensorBase& b);
|
579 |
+
TORCH_DISALLOW_TEMPORARIES(build_borrowing_comparison_op)
|
580 |
+
// Another special case: we need to own the second argument for comparison
|
581 |
+
// ops.
|
582 |
+
void build_borrowing_except_last_argument_comparison_op(
|
583 |
+
const TensorBase& out,
|
584 |
+
const TensorBase& a,
|
585 |
+
const TensorBase& b);
|
586 |
+
void build_ternary_op(
|
587 |
+
const TensorBase& out,
|
588 |
+
const TensorBase& a,
|
589 |
+
const TensorBase& b,
|
590 |
+
const TensorBase& c);
|
591 |
+
|
592 |
+
#undef TORCH_DISALLOW_TEMPORARIES
|
593 |
+
protected:
|
594 |
+
// Mutable reference as it moves tensors out of TensorIteratorConfig
|
595 |
+
void populate_operands(TensorIteratorConfig&);
|
596 |
+
void mark_outputs();
|
597 |
+
void mark_resize_outputs(const TensorIteratorConfig&);
|
598 |
+
void compute_mem_overlaps(const TensorIteratorConfig&);
|
599 |
+
void compute_shape(const TensorIteratorConfig&);
|
600 |
+
void compute_strides(const TensorIteratorConfig&);
|
601 |
+
void reorder_dimensions();
|
602 |
+
void permute_dimensions(IntArrayRef perm);
|
603 |
+
void compute_types(const TensorIteratorConfig&);
|
604 |
+
ScalarType compute_common_dtype();
|
605 |
+
void allocate_or_resize_outputs();
|
606 |
+
bool fast_set_up(const TensorIteratorConfig&);
|
607 |
+
FastSetupType compute_fast_setup_type(const TensorIteratorConfig&);
|
608 |
+
void compute_names(const TensorIteratorConfig&);
|
609 |
+
void propagate_names_to_outputs();
|
610 |
+
void coalesce_dimensions();
|
611 |
+
|
612 |
+
protected:
|
613 |
+
/// Records the "computation" shape of the output tensor. The computation
|
614 |
+
/// shape is different from the regular shape in a few ways:
|
615 |
+
///
|
616 |
+
/// - The shape may be permuted (via permute_dimensions) so that we
|
617 |
+
/// process the dimensions in the most computationally efficient order
|
618 |
+
/// (rather than the logical order given to us by the users.)
|
619 |
+
/// - The shape may have adjacent dimensions collapsed (via
|
620 |
+
/// coalesce_dimensions) so that we minimize the number of
|
621 |
+
/// dimensions we have to explicitly iterate over. For example,
|
622 |
+
/// a pointwise operation on a contiguous tensor "computationally"
|
623 |
+
/// consists of only a single dimension.
|
624 |
+
///
|
625 |
+
/// In other words, the computation shape is the output shape as it
|
626 |
+
/// actually matters for implementing the kernel, but not necessarily the
|
627 |
+
/// output shape that the user will see in the end.
|
628 |
+
///
|
629 |
+
/// The lifecycle of mutations to shape_ in TensorIterator:
|
630 |
+
/// - declare_static_shape() sets an initial shape explicitly
|
631 |
+
/// provided by user, otherwise
|
632 |
+
/// - compute_shape() computes the true (non-computational) shape
|
633 |
+
/// specified by the user.
|
634 |
+
/// - reorder_dimensions() reorders dimensions to improve coalescing.
|
635 |
+
/// - coalesce_dimensions() then coalesces adjacent dimensions when
|
636 |
+
/// possible.
|
637 |
+
///
|
638 |
+
/// The shape may also be further modified if we create sub-TensorIterators,
|
639 |
+
/// e.g., via narrow or select_all_keeping_dim.
|
640 |
+
DimVector shape_;
|
641 |
+
|
642 |
+
/// Temporarily records the permutation computed by reorder_dimensions.
|
643 |
+
/// This permutation maps the computation output dimension (dim) to
|
644 |
+
/// the original true output dimension (perm_[dim]). It is used by
|
645 |
+
/// invert_perm to undo the permutation. After coalesce_dimensions is
|
646 |
+
/// called, the permutation is no longer valid (as, in general, there
|
647 |
+
/// is no permutation that will make computation dimensions to
|
648 |
+
/// output dimensions); methods that manipulate perm_ are obligated
|
649 |
+
/// to test that !has_coalesced_dimensions
|
650 |
+
DimVector perm_;
|
651 |
+
|
652 |
+
/// Has coalesce_dimensions() (or any moral equivalent, e.g., fast_build())
|
653 |
+
/// been called? This is SOLELY used to check validity of perm_.
|
654 |
+
bool has_coalesced_dimensions_ = false;
|
655 |
+
|
656 |
+
/// Whether iteration must be fixed. This disables dimension permuting and
|
657 |
+
/// also changes how for_each divides work among threads.
|
658 |
+
bool enforce_linear_iteration_ = false;
|
659 |
+
|
660 |
+
/// The index offsets into the original tensors for each dimension.
|
661 |
+
/// This is only non-zero when you narrow() a TensorIterator (e.g.,
|
662 |
+
/// when you make sub-TensorIterators).
|
663 |
+
DimVector view_offsets_;
|
664 |
+
|
665 |
+
/// The computed names of the output tensor. Computed by compute_names()
|
666 |
+
NameVector names_;
|
667 |
+
|
668 |
+
/// The operands of the TensorIterator: both the inputs and outputs. The
|
669 |
+
/// outputs MUST come first in the operands_ list. There is always an
|
670 |
+
/// operand for each output of the TensorIterator, even if TensorIterator
|
671 |
+
/// will ultimately be responsible for allocating the output; in those
|
672 |
+
/// cases, tensor is simply undefined (and will be populated later
|
673 |
+
/// during build()).
|
674 |
+
///
|
675 |
+
/// This list is initially populated prior to build(), but build() mutates
|
676 |
+
/// OperandInfo to populate more information.
|
677 |
+
SmallVector<OperandInfo, 4> operands_;
|
678 |
+
|
679 |
+
/// Number of outputs in operands_ (the length of the outputs prefix
|
680 |
+
/// in operands_).
|
681 |
+
int num_outputs_ = 0;
|
682 |
+
|
683 |
+
/// Whether or not all operands have the same shape and are 1d+. Having all
|
684 |
+
/// the same shape affects whether or not the iterator is eligible for fast
|
685 |
+
/// setup.
|
686 |
+
bool all_ops_same_shape_ = false;
|
687 |
+
/// Whether or not all operands are 0d, this affects type promotion
|
688 |
+
bool all_ops_are_scalars_ = false;
|
689 |
+
|
690 |
+
/// The "computation" dtype of TensorIterator, specifying what the dtype
|
691 |
+
/// we will do the internal computation in TensorIterator. Typically,
|
692 |
+
/// this matches the dtype of the output tensors, but not always!
|
693 |
+
ScalarType common_dtype_ = ScalarType::Undefined;
|
694 |
+
|
695 |
+
/// This is currently defined as kCPU, or the device of the first non-CPU
|
696 |
+
/// tensor argument. See TensorIteratorBase::compute_types for details.
|
697 |
+
Device common_device_ = kCPU;
|
698 |
+
|
699 |
+
/// Set by split(), see should_accumulate() and is_final_output()
|
700 |
+
bool accumulate_ = false;
|
701 |
+
bool final_output_ = true;
|
702 |
+
|
703 |
+
// From TensorIteratorConfig
|
704 |
+
bool is_reduction_ = false;
|
705 |
+
|
706 |
+
/// Set by populate_operands(), says if we're handling meta tensors
|
707 |
+
bool is_meta_ = false;
|
708 |
+
};
|
709 |
+
|
710 |
+
struct TORCH_API TensorIterator final : public TensorIteratorBase {
|
711 |
+
TensorIterator() : TensorIteratorBase() {}
|
712 |
+
// Slicing is OK, TensorIterator guaranteed NOT to have any fields
|
713 |
+
TensorIterator(const TensorIteratorBase& iter) : TensorIteratorBase(iter) {}
|
714 |
+
|
715 |
+
#define TORCH_DISALLOW_TEMPORARIES(methodname) \
|
716 |
+
TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, static)
|
717 |
+
|
718 |
+
static TensorIterator binary_float_op(
|
719 |
+
TensorBase& out,
|
720 |
+
const TensorBase& a,
|
721 |
+
const TensorBase& b);
|
722 |
+
static TensorIterator binary_op(
|
723 |
+
TensorBase& out,
|
724 |
+
const TensorBase& a,
|
725 |
+
const TensorBase& b);
|
726 |
+
static TensorIterator borrowing_binary_op(
|
727 |
+
const TensorBase& out,
|
728 |
+
const TensorBase& a,
|
729 |
+
const TensorBase& b);
|
730 |
+
TORCH_DISALLOW_TEMPORARIES(borrowing_binary_op)
|
731 |
+
static TensorIterator comparison_op(
|
732 |
+
TensorBase& out,
|
733 |
+
const TensorBase& a,
|
734 |
+
const TensorBase& b);
|
735 |
+
static TensorIterator unary_op(TensorBase& out, const TensorBase& a);
|
736 |
+
static TensorIterator unary_float_op(TensorBase& out, const TensorBase& a);
|
737 |
+
static TensorIterator nullary_op(TensorBase& out);
|
738 |
+
static TensorIterator borrowing_nullary_op(const TensorBase& out);
|
739 |
+
static TensorIterator borrowing_nullary_op(TensorBase&& out) = delete;
|
740 |
+
static TensorIterator reduce_op(TensorBase& out, const TensorBase& a);
|
741 |
+
static TensorIterator reduce_op(
|
742 |
+
TensorBase& out1,
|
743 |
+
TensorBase& out2,
|
744 |
+
const TensorBase& a);
|
745 |
+
#undef TORCH_DISALLOW_TEMPORARIES
|
746 |
+
#undef TORCH_DISALLOW_TEMPORARIES_IMPL
|
747 |
+
|
748 |
+
const Tensor& maybe_get_output(int64_t output_idx) override;
|
749 |
+
void set_output_raw_strided(
|
750 |
+
int64_t output_idx,
|
751 |
+
IntArrayRef sizes,
|
752 |
+
IntArrayRef strides,
|
753 |
+
TensorOptions options,
|
754 |
+
DimnameList names) override;
|
755 |
+
};
|
756 |
+
|
757 |
+
class TORCH_API TensorIteratorConfig final {
|
758 |
+
public:
|
759 |
+
friend struct TensorIteratorBase;
|
760 |
+
friend struct TensorIterator;
|
761 |
+
|
762 |
+
TensorIteratorConfig() = default;
|
763 |
+
|
764 |
+
C10_DISABLE_COPY_AND_ASSIGN(TensorIteratorConfig);
|
765 |
+
|
766 |
+
/// Construction
|
767 |
+
// Stores input/output Tensors without incrementing the reference count.
|
768 |
+
// Important: the outputs have to be added before the inputs.
|
769 |
+
TensorIteratorConfig& add_output(const TensorBase& output) {
|
770 |
+
return add_borrowed_output(output);
|
771 |
+
}
|
772 |
+
TensorIteratorConfig& add_input(const TensorBase& input) {
|
773 |
+
return add_borrowed_input(input);
|
774 |
+
}
|
775 |
+
TensorIteratorConfig& add_const_input(const TensorBase& input) {
|
776 |
+
return add_borrowed_const_input(input);
|
777 |
+
}
|
778 |
+
|
779 |
+
// Borrowing from temporaries is unlikely to go well.
|
780 |
+
TensorIteratorConfig& add_output(TensorBase&& output) = delete;
|
781 |
+
TensorIteratorConfig& add_input(TensorBase&& input) = delete;
|
782 |
+
TensorIteratorConfig& add_const_input(TensorBase&& input) = delete;
|
783 |
+
|
784 |
+
// Stores input/output Tensors while incrementing the reference count.
|
785 |
+
// Note that add_{in,out}put are nearly always what you
|
786 |
+
// want, and the exception (adding an unnamed temporary) won't
|
787 |
+
// compile.
|
788 |
+
TensorIteratorConfig& add_owned_output(const TensorBase& output);
|
789 |
+
TensorIteratorConfig& add_owned_input(const TensorBase& input);
|
790 |
+
TensorIteratorConfig& add_owned_const_input(const TensorBase& input);
|
791 |
+
|
792 |
+
// Advanced API: stores input/output Tensors without incrementing
|
793 |
+
// the reference count. The caller must ensure that these Tensors
|
794 |
+
// live at least as long as this TensorIteratorConfig and any
|
795 |
+
// TensorIteratorBase built from this TensorIteratorConfig.
|
796 |
+
// Important: the outputs have to be added before the inputs.
|
797 |
+
TensorIteratorConfig& add_borrowed_output(const TensorBase& output);
|
798 |
+
TensorIteratorConfig& add_borrowed_input(const TensorBase& input);
|
799 |
+
TensorIteratorConfig& add_borrowed_const_input(const TensorBase& input);
|
800 |
+
|
801 |
+
// Borrowing from temporaries is unlikely to go well.
|
802 |
+
TensorIteratorConfig& add_borrowed_output(TensorBase&& output) = delete;
|
803 |
+
TensorIteratorConfig& add_borrowed_input(TensorBase&& input) = delete;
|
804 |
+
TensorIteratorConfig& add_borrowed_const_input(TensorBase&& input) = delete;
|
805 |
+
|
806 |
+
// Sets the check_mem_overlap_ flag, which is true by default.
|
807 |
+
// If true, inputs are checked for partial overlap with the outputs and
|
808 |
+
// outputs are checked for internal overlap (e.g. broadcasted views). An error
|
809 |
+
// is raised if unacceptable overlap is detected.
|
810 |
+
// If you're migrating an existing operator to using TensorIterator, please
|
811 |
+
// consider if the previous implementation checked memory overlap. If it did
|
812 |
+
// not, and if the operator is idempotent (for example, Tensor.fill_(0)), then
|
813 |
+
// checking memory overlap is BC-breaking. Please don't check memory overlap
|
814 |
+
// in that case.
|
815 |
+
TensorIteratorConfig& set_check_mem_overlap(bool check_mem_overlap) {
|
816 |
+
check_mem_overlap_ = check_mem_overlap;
|
817 |
+
return *this;
|
818 |
+
}
|
819 |
+
|
820 |
+
// Sets the check_all_same_dtype_ flag, which is true by default
|
821 |
+
// If true, checks that all inputs and defined outputs have the same dtype
|
822 |
+
// Setting either of promote_inputs_to_common_dtype_
|
823 |
+
// or cast_common_dtype_to_outputs_ to true will set
|
824 |
+
// check_all_same_dtype_ to false.
|
825 |
+
TensorIteratorConfig& check_all_same_dtype(const bool _check_all_same_dtype) {
|
826 |
+
check_all_same_dtype_ = _check_all_same_dtype;
|
827 |
+
return *this;
|
828 |
+
}
|
829 |
+
|
830 |
+
// Sets the check_all_same_device_ flag, which is true by default
|
831 |
+
// If true, all operands must be on the same device, with the possible
|
832 |
+
// exception of CPU scalars, which can be passed to some CUDA kernels
|
833 |
+
// as kernel arguments.
|
834 |
+
TensorIteratorConfig& check_all_same_device(
|
835 |
+
const bool _check_all_same_device) {
|
836 |
+
check_all_same_device_ = _check_all_same_device;
|
837 |
+
return *this;
|
838 |
+
}
|
839 |
+
|
840 |
+
// Sets the enforce_safe_casting_to_output_ flag, which is false by default
|
841 |
+
// If true, the iterator's "common dtype" must be computable
|
842 |
+
// (see the [Common Dtype Computation] note) and
|
843 |
+
// canCast(common dtype, output dtype) must be true for all outputs.
|
844 |
+
TensorIteratorConfig& enforce_safe_casting_to_output(
|
845 |
+
const bool _enforce_safe_casting_to_output) {
|
846 |
+
enforce_safe_casting_to_output_ = _enforce_safe_casting_to_output;
|
847 |
+
return *this;
|
848 |
+
}
|
849 |
+
|
850 |
+
// Sets the enforce_linear_iteration_ flag, which is false by default.
|
851 |
+
// If true, iteration goes in the same order as a C-contiguous tensor
|
852 |
+
// is layed out in memory. i.e. last dimension iterates fastest.
|
853 |
+
//
|
854 |
+
// This iteration order can be less efficient and may even prevent
|
855 |
+
// vectorization. So only use if the correctness of your kernel depends on it.
|
856 |
+
TensorIteratorConfig& enforce_linear_iteration(
|
857 |
+
const bool _enforce_linear_iteration = true) {
|
858 |
+
enforce_linear_iteration_ = _enforce_linear_iteration;
|
859 |
+
return *this;
|
860 |
+
}
|
861 |
+
|
862 |
+
// Sets the promote_inputs_to_common_dtype_ flag, which is false by default
|
863 |
+
// If true, the iterator's "common dtype" is always computed (see the
|
864 |
+
// [Common Dtype Computation] note) and, on the CPU, temporary copies of
|
865 |
+
// the inputs in the common dtype are passed as the actual inputs to
|
866 |
+
// the operation.
|
867 |
+
// Setting this flag to true sets check_all_same_dtype_ to false.
|
868 |
+
TensorIteratorConfig& promote_inputs_to_common_dtype(
|
869 |
+
const bool _promote_inputs_to_common_dtype) {
|
870 |
+
promote_inputs_to_common_dtype_ = _promote_inputs_to_common_dtype;
|
871 |
+
if (_promote_inputs_to_common_dtype) {
|
872 |
+
check_all_same_dtype_ = false;
|
873 |
+
}
|
874 |
+
return *this;
|
875 |
+
}
|
876 |
+
|
877 |
+
// Sets the promote_integer_inputs_to_float_ flag, which is false by default
|
878 |
+
// NOTE: If set to true, the promote_inputs_to_common_dtype_ must also be
|
879 |
+
// true. If true, if the iterator's "common dtype" is an integral type
|
880 |
+
// (including bool)
|
881 |
+
// then it is changed to the default float scalar type.
|
882 |
+
TensorIteratorConfig& promote_integer_inputs_to_float(
|
883 |
+
const bool _promote_integer_inputs_to_float) {
|
884 |
+
promote_integer_inputs_to_float_ = _promote_integer_inputs_to_float;
|
885 |
+
TORCH_INTERNAL_ASSERT(
|
886 |
+
!promote_integer_inputs_to_float_ || promote_inputs_to_common_dtype_);
|
887 |
+
return *this;
|
888 |
+
}
|
889 |
+
|
890 |
+
TensorIteratorConfig& is_reduction(const bool _is_reduction) {
|
891 |
+
is_reduction_ = _is_reduction;
|
892 |
+
return *this;
|
893 |
+
}
|
894 |
+
|
895 |
+
TensorIteratorConfig& allow_cpu_scalars(const bool _allow_cpu_scalars) {
|
896 |
+
allow_cpu_scalars_ = _allow_cpu_scalars;
|
897 |
+
return *this;
|
898 |
+
}
|
899 |
+
|
900 |
+
// Sets the cast_common_dtype_to_outputs_ flag, which is false by default
|
901 |
+
// If true, the iterator's "common dtype" must be computatable
|
902 |
+
// (see the [Common Dtype Computation] note) and, on the CPU, temporary
|
903 |
+
// copies of the outputs are passed as the actual output to the operation.
|
904 |
+
// These temporaries are then copied to the original outputs after
|
905 |
+
// the operation is performed (see cast_outputs()).
|
906 |
+
// Setting this flag to true sets check_all_same_dtype_ to false.
|
907 |
+
TensorIteratorConfig& cast_common_dtype_to_outputs(
|
908 |
+
const bool _cast_common_dtype_to_outputs) {
|
909 |
+
cast_common_dtype_to_outputs_ = _cast_common_dtype_to_outputs;
|
910 |
+
if (_cast_common_dtype_to_outputs) {
|
911 |
+
check_all_same_dtype_ = false;
|
912 |
+
}
|
913 |
+
return *this;
|
914 |
+
}
|
915 |
+
|
916 |
+
TensorIteratorConfig& resize_outputs(bool resize_outputs) {
|
917 |
+
resize_outputs_ = resize_outputs;
|
918 |
+
return *this;
|
919 |
+
}
|
920 |
+
|
921 |
+
// Bypass output dtype/device computation and fix the dtype/device as
|
922 |
+
// specified here.
|
923 |
+
TensorIteratorConfig& declare_static_dtype_and_device(
|
924 |
+
ScalarType dtype,
|
925 |
+
Device device);
|
926 |
+
TensorIteratorConfig& declare_static_dtype(ScalarType dtype);
|
927 |
+
TensorIteratorConfig& declare_static_device(Device device);
|
928 |
+
TensorIteratorConfig& declare_static_shape(IntArrayRef shape);
|
929 |
+
TensorIteratorConfig& declare_static_shape(
|
930 |
+
IntArrayRef shape,
|
931 |
+
IntArrayRef squash_dims);
|
932 |
+
|
933 |
+
// It would be better if this was && qualified, but this would be at the cost
|
934 |
+
// of a lot of boilerplate above
|
935 |
+
TensorIterator build() {
|
936 |
+
TensorIterator iter;
|
937 |
+
iter.build(*this);
|
938 |
+
return iter;
|
939 |
+
}
|
940 |
+
|
941 |
+
private:
|
942 |
+
bool is_tensor_const(size_t idx);
|
943 |
+
|
944 |
+
SmallVector<c10::MaybeOwned<TensorBase>, 4> tensors_;
|
945 |
+
int num_outputs_ = 0;
|
946 |
+
int num_inputs_ = 0;
|
947 |
+
|
948 |
+
c10::optional<DimVector> static_shape_ = c10::nullopt;
|
949 |
+
c10::optional<ScalarType> static_dtype_ = c10::nullopt;
|
950 |
+
c10::optional<Device> static_device_ = c10::nullopt;
|
951 |
+
bool check_mem_overlap_ = true;
|
952 |
+
bool allow_cpu_scalars_ = false;
|
953 |
+
bool is_reduction_ = false;
|
954 |
+
bool resize_outputs_ = true;
|
955 |
+
bool check_all_same_dtype_ = true;
|
956 |
+
bool check_all_same_device_ = true;
|
957 |
+
bool enforce_safe_casting_to_output_ = false;
|
958 |
+
bool enforce_linear_iteration_ = false;
|
959 |
+
bool promote_inputs_to_common_dtype_ = false;
|
960 |
+
bool promote_integer_inputs_to_float_ = false;
|
961 |
+
bool cast_common_dtype_to_outputs_ = false;
|
962 |
+
|
963 |
+
SmallVector<size_t, 4> const_tensor_indices_;
|
964 |
+
};
|
965 |
+
|
966 |
+
/// A container-like struct that acts as if it contains splits of a
|
967 |
+
/// TensorIterator that can use 32-bit indexing. Taken together the splits cover
|
968 |
+
/// the original TensorIterator.
|
969 |
+
struct TORCH_API SplitUntil32Bit {
|
970 |
+
struct TORCH_API iterator {
|
971 |
+
iterator() = default;
|
972 |
+
iterator(const TensorIteratorBase& iter);
|
973 |
+
iterator(iterator&&) = default;
|
974 |
+
|
975 |
+
// Guaranteed to be a TensorIterator proper!
|
976 |
+
TensorIterator& operator*() const;
|
977 |
+
iterator& operator++();
|
978 |
+
bool operator==(const iterator& other) const {
|
979 |
+
// two iterators are equal if they are the same object or they're both
|
980 |
+
// empty
|
981 |
+
return this == &other || (vec.empty() && other.vec.empty());
|
982 |
+
}
|
983 |
+
// needed for C++11 range-based for loop
|
984 |
+
bool operator!=(const iterator& other) const {
|
985 |
+
return !(*this == other);
|
986 |
+
}
|
987 |
+
|
988 |
+
/// stack of TensorIterators to be split
|
989 |
+
std::vector<std::unique_ptr<TensorIterator>> vec;
|
990 |
+
};
|
991 |
+
|
992 |
+
SplitUntil32Bit(const TensorIteratorBase& iter) : iter(iter) {}
|
993 |
+
|
994 |
+
iterator begin() const;
|
995 |
+
iterator end() const;
|
996 |
+
|
997 |
+
private:
|
998 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
999 |
+
const TensorIteratorBase& iter;
|
1000 |
+
};
|
1001 |
+
|
1002 |
+
} // namespace at
|