applied-ai-018 commited on
Commit
2b3dce2
·
verified ·
1 Parent(s): c139e06

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ATen.h +37 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Backend.h +2 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Backtrace.h +2 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h +33 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h +29 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h +569 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions_inl.h +608 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h +94 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h +533 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h +29 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h +321 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h +500 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h +25 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h +21 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Device.h +2 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Dimname.h +1 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h +808 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h +160 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h +30 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Formatting.h +1 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h +46 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/InferSize.h +87 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h +15 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Layout.h +2 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h +25 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapMode.h +26 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h +109 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h +42 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h +29 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h +324 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h +215 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NativeMetaFunctions.h +1281 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h +281 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h +59 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h +186 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h +17 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/PadNd.h +28 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Parallel.h +160 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h +13 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h +52 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h +0 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ScalarOps.h +71 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h +13 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h +2 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorImpl.h +182 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h +415 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h +400 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/StorageUtils.h +49 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h +144 -0
.gitattributes CHANGED
@@ -204,3 +204,4 @@ env-llmeval/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs me
204
  env-llmeval/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
205
  env-llmeval/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
206
  env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
 
 
204
  env-llmeval/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
205
  env-llmeval/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
206
  env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
207
+ env-llmeval/lib/python3.10/site-packages/torch/lib/libcusparseLt-f8b4a9fb.so.0 filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ATen.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(_MSC_VER) && __cplusplus < 201703L
4
+ #error C++17 or later compatible compiler is required to use ATen.
5
+ #endif
6
+
7
+ #include <ATen/Context.h>
8
+ #include <ATen/Device.h>
9
+ #include <ATen/DeviceGuard.h>
10
+ #include <ATen/DimVector.h>
11
+ #include <ATen/Dispatch.h>
12
+ #include <ATen/Formatting.h>
13
+ #include <ATen/Functions.h>
14
+ #include <ATen/NamedTensor.h>
15
+ #include <ATen/ScalarOps.h>
16
+ #include <ATen/Tensor.h>
17
+ #include <ATen/TensorGeometry.h>
18
+ #include <ATen/TensorIndexing.h>
19
+ #include <ATen/TensorOperators.h>
20
+ #include <ATen/Version.h>
21
+ #include <ATen/core/ATenGeneral.h>
22
+ #include <ATen/core/Generator.h>
23
+ #include <ATen/core/Reduction.h>
24
+ #include <ATen/core/Scalar.h>
25
+ #include <ATen/core/UnsafeFromTH.h>
26
+ #include <ATen/core/ivalue.h>
27
+ #include <ATen/core/jit_type.h>
28
+ #include <c10/core/Allocator.h>
29
+ #include <c10/core/InferenceMode.h>
30
+ #include <c10/core/Layout.h>
31
+ #include <c10/core/Storage.h>
32
+ #include <c10/core/TensorOptions.h>
33
+ #include <c10/util/Exception.h>
34
+
35
+ // TODO: try to remove this
36
+ // There is some back story, see https://github.com/pytorch/pytorch/issues/48684
37
+ #include <ATen/NativeFunctions.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Backend.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/Backend.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Backtrace.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Backtrace.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ // This file creates a fake allocator that just throws exceptions if
7
+ // it is actually used.
8
+
9
+ // state passed to the allocator is the std::function<void(void*)> called
10
+ // when the blob is release by ATen
11
+
12
+ namespace at {
13
+
14
+ static cpu_fixed_malloc(void*, ptrdiff_t) {
15
+ AT_ERROR("attempting to resize a tensor view of an external blob");
16
+ }
17
+
18
+ static cpu_fixed_realloc(void*, void*, ptrdiff_t) {
19
+ AT_ERROR("attempting to resize a tensor view of an external blob");
20
+ }
21
+
22
+ static cpu_fixed_free(void* state, void* allocation) {
23
+ auto on_release = static_cast<std::function<void(void*)>*>(state);
24
+ (*on_release)(allocation);
25
+ delete on_release;
26
+ }
27
+
28
+ static Allocator CPU_fixed_allocator = {
29
+ cpu_fixed_malloc,
30
+ cpu_fixed_realloc,
31
+ cpu_fixed_free};
32
+
33
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CPUFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_cpu_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
20
+ #include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
21
+ #include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
22
+ #include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
23
+ #include <ATen/ops/_add_relu_cpu_dispatch.h>
24
+ #include <ATen/ops/_addmm_activation_cpu_dispatch.h>
25
+ #include <ATen/ops/_aminmax_cpu_dispatch.h>
26
+ #include <ATen/ops/_assert_async_cpu_dispatch.h>
27
+ #include <ATen/ops/_cdist_backward_cpu_dispatch.h>
28
+ #include <ATen/ops/_cdist_forward_cpu_dispatch.h>
29
+ #include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
30
+ #include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
31
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
32
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
33
+ #include <ATen/ops/_ctc_loss_cpu_dispatch.h>
34
+ #include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
35
+ #include <ATen/ops/_cummax_helper_cpu_dispatch.h>
36
+ #include <ATen/ops/_cummin_helper_cpu_dispatch.h>
37
+ #include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
38
+ #include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
39
+ #include <ATen/ops/_embedding_bag_cpu_dispatch.h>
40
+ #include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
41
+ #include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
42
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
43
+ #include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
44
+ #include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
45
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
46
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
47
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
48
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
49
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
50
+ #include <ATen/ops/_fft_c2c_cpu_dispatch.h>
51
+ #include <ATen/ops/_fft_c2r_cpu_dispatch.h>
52
+ #include <ATen/ops/_fft_r2c_cpu_dispatch.h>
53
+ #include <ATen/ops/_foobar_cpu_dispatch.h>
54
+ #include <ATen/ops/_foreach_abs_cpu_dispatch.h>
55
+ #include <ATen/ops/_foreach_acos_cpu_dispatch.h>
56
+ #include <ATen/ops/_foreach_add_cpu_dispatch.h>
57
+ #include <ATen/ops/_foreach_addcdiv_cpu_dispatch.h>
58
+ #include <ATen/ops/_foreach_addcmul_cpu_dispatch.h>
59
+ #include <ATen/ops/_foreach_asin_cpu_dispatch.h>
60
+ #include <ATen/ops/_foreach_atan_cpu_dispatch.h>
61
+ #include <ATen/ops/_foreach_ceil_cpu_dispatch.h>
62
+ #include <ATen/ops/_foreach_clamp_max_cpu_dispatch.h>
63
+ #include <ATen/ops/_foreach_clamp_min_cpu_dispatch.h>
64
+ #include <ATen/ops/_foreach_copy_cpu_dispatch.h>
65
+ #include <ATen/ops/_foreach_cos_cpu_dispatch.h>
66
+ #include <ATen/ops/_foreach_cosh_cpu_dispatch.h>
67
+ #include <ATen/ops/_foreach_div_cpu_dispatch.h>
68
+ #include <ATen/ops/_foreach_erf_cpu_dispatch.h>
69
+ #include <ATen/ops/_foreach_erfc_cpu_dispatch.h>
70
+ #include <ATen/ops/_foreach_exp_cpu_dispatch.h>
71
+ #include <ATen/ops/_foreach_expm1_cpu_dispatch.h>
72
+ #include <ATen/ops/_foreach_floor_cpu_dispatch.h>
73
+ #include <ATen/ops/_foreach_frac_cpu_dispatch.h>
74
+ #include <ATen/ops/_foreach_lerp_cpu_dispatch.h>
75
+ #include <ATen/ops/_foreach_lgamma_cpu_dispatch.h>
76
+ #include <ATen/ops/_foreach_log_cpu_dispatch.h>
77
+ #include <ATen/ops/_foreach_log10_cpu_dispatch.h>
78
+ #include <ATen/ops/_foreach_log1p_cpu_dispatch.h>
79
+ #include <ATen/ops/_foreach_log2_cpu_dispatch.h>
80
+ #include <ATen/ops/_foreach_maximum_cpu_dispatch.h>
81
+ #include <ATen/ops/_foreach_minimum_cpu_dispatch.h>
82
+ #include <ATen/ops/_foreach_mul_cpu_dispatch.h>
83
+ #include <ATen/ops/_foreach_neg_cpu_dispatch.h>
84
+ #include <ATen/ops/_foreach_norm_cpu_dispatch.h>
85
+ #include <ATen/ops/_foreach_pow_cpu_dispatch.h>
86
+ #include <ATen/ops/_foreach_reciprocal_cpu_dispatch.h>
87
+ #include <ATen/ops/_foreach_round_cpu_dispatch.h>
88
+ #include <ATen/ops/_foreach_sigmoid_cpu_dispatch.h>
89
+ #include <ATen/ops/_foreach_sign_cpu_dispatch.h>
90
+ #include <ATen/ops/_foreach_sin_cpu_dispatch.h>
91
+ #include <ATen/ops/_foreach_sinh_cpu_dispatch.h>
92
+ #include <ATen/ops/_foreach_sqrt_cpu_dispatch.h>
93
+ #include <ATen/ops/_foreach_sub_cpu_dispatch.h>
94
+ #include <ATen/ops/_foreach_tan_cpu_dispatch.h>
95
+ #include <ATen/ops/_foreach_tanh_cpu_dispatch.h>
96
+ #include <ATen/ops/_foreach_trunc_cpu_dispatch.h>
97
+ #include <ATen/ops/_foreach_zero_cpu_dispatch.h>
98
+ #include <ATen/ops/_functional_assert_async_cpu_dispatch.h>
99
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
100
+ #include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
101
+ #include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
102
+ #include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
103
+ #include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
104
+ #include <ATen/ops/_index_put_impl_cpu_dispatch.h>
105
+ #include <ATen/ops/_linalg_det_cpu_dispatch.h>
106
+ #include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
107
+ #include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
108
+ #include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
109
+ #include <ATen/ops/_linalg_svd_cpu_dispatch.h>
110
+ #include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
111
+ #include <ATen/ops/_log_softmax_cpu_dispatch.h>
112
+ #include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
113
+ #include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
114
+ #include <ATen/ops/_make_dep_token_cpu_dispatch.h>
115
+ #include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
116
+ #include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
117
+ #include <ATen/ops/_masked_softmax_cpu_dispatch.h>
118
+ #include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
119
+ #include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
120
+ #include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
121
+ #include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
122
+ #include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
123
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
124
+ #include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
125
+ #include <ATen/ops/_pdist_backward_cpu_dispatch.h>
126
+ #include <ATen/ops/_pdist_forward_cpu_dispatch.h>
127
+ #include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
128
+ #include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
129
+ #include <ATen/ops/_reshape_alias_cpu_dispatch.h>
130
+ #include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
131
+ #include <ATen/ops/_scaled_dot_product_flash_attention_cpu_dispatch.h>
132
+ #include <ATen/ops/_scaled_dot_product_flash_attention_backward_cpu_dispatch.h>
133
+ #include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
134
+ #include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
135
+ #include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
136
+ #include <ATen/ops/_softmax_cpu_dispatch.h>
137
+ #include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
138
+ #include <ATen/ops/_spdiags_cpu_dispatch.h>
139
+ #include <ATen/ops/_stack_cpu_dispatch.h>
140
+ #include <ATen/ops/_standard_gamma_cpu_dispatch.h>
141
+ #include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
142
+ #include <ATen/ops/_test_functorch_fallback_cpu_dispatch.h>
143
+ #include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
144
+ #include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
145
+ #include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
146
+ #include <ATen/ops/_to_sparse_cpu_dispatch.h>
147
+ #include <ATen/ops/_to_sparse_bsc_cpu_dispatch.h>
148
+ #include <ATen/ops/_to_sparse_bsr_cpu_dispatch.h>
149
+ #include <ATen/ops/_to_sparse_csc_cpu_dispatch.h>
150
+ #include <ATen/ops/_to_sparse_csr_cpu_dispatch.h>
151
+ #include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
152
+ #include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
153
+ #include <ATen/ops/_unique_cpu_dispatch.h>
154
+ #include <ATen/ops/_unique2_cpu_dispatch.h>
155
+ #include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
156
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
157
+ #include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
158
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
159
+ #include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
160
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
161
+ #include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
162
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
163
+ #include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
164
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
165
+ #include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
166
+ #include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
167
+ #include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
168
+ #include <ATen/ops/abs_cpu_dispatch.h>
169
+ #include <ATen/ops/acos_cpu_dispatch.h>
170
+ #include <ATen/ops/acosh_cpu_dispatch.h>
171
+ #include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
172
+ #include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
173
+ #include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
174
+ #include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
175
+ #include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
176
+ #include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
177
+ #include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
178
+ #include <ATen/ops/add_cpu_dispatch.h>
179
+ #include <ATen/ops/addbmm_cpu_dispatch.h>
180
+ #include <ATen/ops/addcdiv_cpu_dispatch.h>
181
+ #include <ATen/ops/addcmul_cpu_dispatch.h>
182
+ #include <ATen/ops/addmm_cpu_dispatch.h>
183
+ #include <ATen/ops/addmv_cpu_dispatch.h>
184
+ #include <ATen/ops/addr_cpu_dispatch.h>
185
+ #include <ATen/ops/all_cpu_dispatch.h>
186
+ #include <ATen/ops/amax_cpu_dispatch.h>
187
+ #include <ATen/ops/amin_cpu_dispatch.h>
188
+ #include <ATen/ops/aminmax_cpu_dispatch.h>
189
+ #include <ATen/ops/angle_cpu_dispatch.h>
190
+ #include <ATen/ops/any_cpu_dispatch.h>
191
+ #include <ATen/ops/arange_cpu_dispatch.h>
192
+ #include <ATen/ops/argmax_cpu_dispatch.h>
193
+ #include <ATen/ops/argmin_cpu_dispatch.h>
194
+ #include <ATen/ops/argsort_cpu_dispatch.h>
195
+ #include <ATen/ops/as_strided_cpu_dispatch.h>
196
+ #include <ATen/ops/asin_cpu_dispatch.h>
197
+ #include <ATen/ops/asinh_cpu_dispatch.h>
198
+ #include <ATen/ops/atan_cpu_dispatch.h>
199
+ #include <ATen/ops/atan2_cpu_dispatch.h>
200
+ #include <ATen/ops/atanh_cpu_dispatch.h>
201
+ #include <ATen/ops/avg_pool2d_cpu_dispatch.h>
202
+ #include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
203
+ #include <ATen/ops/avg_pool3d_cpu_dispatch.h>
204
+ #include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
205
+ #include <ATen/ops/baddbmm_cpu_dispatch.h>
206
+ #include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
207
+ #include <ATen/ops/bernoulli_cpu_dispatch.h>
208
+ #include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
209
+ #include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
210
+ #include <ATen/ops/bincount_cpu_dispatch.h>
211
+ #include <ATen/ops/binomial_cpu_dispatch.h>
212
+ #include <ATen/ops/bitwise_and_cpu_dispatch.h>
213
+ #include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
214
+ #include <ATen/ops/bitwise_not_cpu_dispatch.h>
215
+ #include <ATen/ops/bitwise_or_cpu_dispatch.h>
216
+ #include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
217
+ #include <ATen/ops/bitwise_xor_cpu_dispatch.h>
218
+ #include <ATen/ops/bmm_cpu_dispatch.h>
219
+ #include <ATen/ops/bucketize_cpu_dispatch.h>
220
+ #include <ATen/ops/cat_cpu_dispatch.h>
221
+ #include <ATen/ops/cauchy_cpu_dispatch.h>
222
+ #include <ATen/ops/ceil_cpu_dispatch.h>
223
+ #include <ATen/ops/channel_shuffle_cpu_dispatch.h>
224
+ #include <ATen/ops/cholesky_cpu_dispatch.h>
225
+ #include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
226
+ #include <ATen/ops/clamp_cpu_dispatch.h>
227
+ #include <ATen/ops/clamp_max_cpu_dispatch.h>
228
+ #include <ATen/ops/clamp_min_cpu_dispatch.h>
229
+ #include <ATen/ops/col2im_cpu_dispatch.h>
230
+ #include <ATen/ops/complex_cpu_dispatch.h>
231
+ #include <ATen/ops/conj_physical_cpu_dispatch.h>
232
+ #include <ATen/ops/copysign_cpu_dispatch.h>
233
+ #include <ATen/ops/cos_cpu_dispatch.h>
234
+ #include <ATen/ops/cosh_cpu_dispatch.h>
235
+ #include <ATen/ops/count_nonzero_cpu_dispatch.h>
236
+ #include <ATen/ops/cumprod_cpu_dispatch.h>
237
+ #include <ATen/ops/cumsum_cpu_dispatch.h>
238
+ #include <ATen/ops/dense_dim_cpu_dispatch.h>
239
+ #include <ATen/ops/dequantize_cpu_dispatch.h>
240
+ #include <ATen/ops/digamma_cpu_dispatch.h>
241
+ #include <ATen/ops/div_cpu_dispatch.h>
242
+ #include <ATen/ops/dot_cpu_dispatch.h>
243
+ #include <ATen/ops/elu_cpu_dispatch.h>
244
+ #include <ATen/ops/elu_backward_cpu_dispatch.h>
245
+ #include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
246
+ #include <ATen/ops/embedding_renorm_cpu_dispatch.h>
247
+ #include <ATen/ops/empty_cpu_dispatch.h>
248
+ #include <ATen/ops/empty_strided_cpu_dispatch.h>
249
+ #include <ATen/ops/eq_cpu_dispatch.h>
250
+ #include <ATen/ops/equal_cpu_dispatch.h>
251
+ #include <ATen/ops/erf_cpu_dispatch.h>
252
+ #include <ATen/ops/erfc_cpu_dispatch.h>
253
+ #include <ATen/ops/erfinv_cpu_dispatch.h>
254
+ #include <ATen/ops/exp_cpu_dispatch.h>
255
+ #include <ATen/ops/exp2_cpu_dispatch.h>
256
+ #include <ATen/ops/expm1_cpu_dispatch.h>
257
+ #include <ATen/ops/exponential_cpu_dispatch.h>
258
+ #include <ATen/ops/eye_cpu_dispatch.h>
259
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
260
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
261
+ #include <ATen/ops/fill_cpu_dispatch.h>
262
+ #include <ATen/ops/flip_cpu_dispatch.h>
263
+ #include <ATen/ops/floor_cpu_dispatch.h>
264
+ #include <ATen/ops/floor_divide_cpu_dispatch.h>
265
+ #include <ATen/ops/fmax_cpu_dispatch.h>
266
+ #include <ATen/ops/fmin_cpu_dispatch.h>
267
+ #include <ATen/ops/fmod_cpu_dispatch.h>
268
+ #include <ATen/ops/frac_cpu_dispatch.h>
269
+ #include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
270
+ #include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
271
+ #include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
272
+ #include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
273
+ #include <ATen/ops/frexp_cpu_dispatch.h>
274
+ #include <ATen/ops/from_file_cpu_dispatch.h>
275
+ #include <ATen/ops/gather_cpu_dispatch.h>
276
+ #include <ATen/ops/gcd_cpu_dispatch.h>
277
+ #include <ATen/ops/ge_cpu_dispatch.h>
278
+ #include <ATen/ops/gelu_cpu_dispatch.h>
279
+ #include <ATen/ops/gelu_backward_cpu_dispatch.h>
280
+ #include <ATen/ops/geometric_cpu_dispatch.h>
281
+ #include <ATen/ops/geqrf_cpu_dispatch.h>
282
+ #include <ATen/ops/glu_cpu_dispatch.h>
283
+ #include <ATen/ops/glu_backward_cpu_dispatch.h>
284
+ #include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
285
+ #include <ATen/ops/glu_jvp_cpu_dispatch.h>
286
+ #include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
287
+ #include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
288
+ #include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
289
+ #include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
290
+ #include <ATen/ops/gt_cpu_dispatch.h>
291
+ #include <ATen/ops/hardshrink_cpu_dispatch.h>
292
+ #include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
293
+ #include <ATen/ops/hardsigmoid_cpu_dispatch.h>
294
+ #include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
295
+ #include <ATen/ops/hardswish_cpu_dispatch.h>
296
+ #include <ATen/ops/hardswish_backward_cpu_dispatch.h>
297
+ #include <ATen/ops/hardtanh_cpu_dispatch.h>
298
+ #include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
299
+ #include <ATen/ops/heaviside_cpu_dispatch.h>
300
+ #include <ATen/ops/histc_cpu_dispatch.h>
301
+ #include <ATen/ops/histogram_cpu_dispatch.h>
302
+ #include <ATen/ops/huber_loss_cpu_dispatch.h>
303
+ #include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
304
+ #include <ATen/ops/hypot_cpu_dispatch.h>
305
+ #include <ATen/ops/i0_cpu_dispatch.h>
306
+ #include <ATen/ops/igamma_cpu_dispatch.h>
307
+ #include <ATen/ops/igammac_cpu_dispatch.h>
308
+ #include <ATen/ops/im2col_cpu_dispatch.h>
309
+ #include <ATen/ops/index_cpu_dispatch.h>
310
+ #include <ATen/ops/index_add_cpu_dispatch.h>
311
+ #include <ATen/ops/index_copy_cpu_dispatch.h>
312
+ #include <ATen/ops/index_fill_cpu_dispatch.h>
313
+ #include <ATen/ops/index_reduce_cpu_dispatch.h>
314
+ #include <ATen/ops/index_select_cpu_dispatch.h>
315
+ #include <ATen/ops/is_set_to_cpu_dispatch.h>
316
+ #include <ATen/ops/isin_cpu_dispatch.h>
317
+ #include <ATen/ops/isnan_cpu_dispatch.h>
318
+ #include <ATen/ops/isneginf_cpu_dispatch.h>
319
+ #include <ATen/ops/isposinf_cpu_dispatch.h>
320
+ #include <ATen/ops/kthvalue_cpu_dispatch.h>
321
+ #include <ATen/ops/lcm_cpu_dispatch.h>
322
+ #include <ATen/ops/le_cpu_dispatch.h>
323
+ #include <ATen/ops/leaky_relu_cpu_dispatch.h>
324
+ #include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
325
+ #include <ATen/ops/lerp_cpu_dispatch.h>
326
+ #include <ATen/ops/lgamma_cpu_dispatch.h>
327
+ #include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
328
+ #include <ATen/ops/linalg_cross_cpu_dispatch.h>
329
+ #include <ATen/ops/linalg_eig_cpu_dispatch.h>
330
+ #include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
331
+ #include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
332
+ #include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
333
+ #include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
334
+ #include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
335
+ #include <ATen/ops/linalg_lu_cpu_dispatch.h>
336
+ #include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
337
+ #include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
338
+ #include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
339
+ #include <ATen/ops/linalg_qr_cpu_dispatch.h>
340
+ #include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
341
+ #include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
342
+ #include <ATen/ops/linspace_cpu_dispatch.h>
343
+ #include <ATen/ops/log_cpu_dispatch.h>
344
+ #include <ATen/ops/log10_cpu_dispatch.h>
345
+ #include <ATen/ops/log1p_cpu_dispatch.h>
346
+ #include <ATen/ops/log2_cpu_dispatch.h>
347
+ #include <ATen/ops/log_normal_cpu_dispatch.h>
348
+ #include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
349
+ #include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
350
+ #include <ATen/ops/logaddexp_cpu_dispatch.h>
351
+ #include <ATen/ops/logaddexp2_cpu_dispatch.h>
352
+ #include <ATen/ops/logical_and_cpu_dispatch.h>
353
+ #include <ATen/ops/logical_not_cpu_dispatch.h>
354
+ #include <ATen/ops/logical_or_cpu_dispatch.h>
355
+ #include <ATen/ops/logical_xor_cpu_dispatch.h>
356
+ #include <ATen/ops/logit_cpu_dispatch.h>
357
+ #include <ATen/ops/logit_backward_cpu_dispatch.h>
358
+ #include <ATen/ops/logspace_cpu_dispatch.h>
359
+ #include <ATen/ops/lshift_cpu_dispatch.h>
360
+ #include <ATen/ops/lt_cpu_dispatch.h>
361
+ #include <ATen/ops/lu_unpack_cpu_dispatch.h>
362
+ #include <ATen/ops/masked_fill_cpu_dispatch.h>
363
+ #include <ATen/ops/masked_scatter_cpu_dispatch.h>
364
+ #include <ATen/ops/masked_select_cpu_dispatch.h>
365
+ #include <ATen/ops/max_cpu_dispatch.h>
366
+ #include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
367
+ #include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
368
+ #include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
369
+ #include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
370
+ #include <ATen/ops/max_unpool2d_cpu_dispatch.h>
371
+ #include <ATen/ops/max_unpool3d_cpu_dispatch.h>
372
+ #include <ATen/ops/maximum_cpu_dispatch.h>
373
+ #include <ATen/ops/mean_cpu_dispatch.h>
374
+ #include <ATen/ops/median_cpu_dispatch.h>
375
+ #include <ATen/ops/min_cpu_dispatch.h>
376
+ #include <ATen/ops/minimum_cpu_dispatch.h>
377
+ #include <ATen/ops/mish_cpu_dispatch.h>
378
+ #include <ATen/ops/mish_backward_cpu_dispatch.h>
379
+ #include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
380
+ #include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
381
+ #include <ATen/ops/mm_cpu_dispatch.h>
382
+ #include <ATen/ops/mode_cpu_dispatch.h>
383
+ #include <ATen/ops/mse_loss_cpu_dispatch.h>
384
+ #include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
385
+ #include <ATen/ops/mul_cpu_dispatch.h>
386
+ #include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
387
+ #include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
388
+ #include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
389
+ #include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
390
+ #include <ATen/ops/multinomial_cpu_dispatch.h>
391
+ #include <ATen/ops/mvlgamma_cpu_dispatch.h>
392
+ #include <ATen/ops/nan_to_num_cpu_dispatch.h>
393
+ #include <ATen/ops/nanmedian_cpu_dispatch.h>
394
+ #include <ATen/ops/nansum_cpu_dispatch.h>
395
+ #include <ATen/ops/narrow_copy_cpu_dispatch.h>
396
+ #include <ATen/ops/native_batch_norm_cpu_dispatch.h>
397
+ #include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
398
+ #include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
399
+ #include <ATen/ops/native_dropout_cpu_dispatch.h>
400
+ #include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
401
+ #include <ATen/ops/native_group_norm_cpu_dispatch.h>
402
+ #include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
403
+ #include <ATen/ops/native_layer_norm_cpu_dispatch.h>
404
+ #include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
405
+ #include <ATen/ops/ne_cpu_dispatch.h>
406
+ #include <ATen/ops/neg_cpu_dispatch.h>
407
+ #include <ATen/ops/nextafter_cpu_dispatch.h>
408
+ #include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
409
+ #include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
410
+ #include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
411
+ #include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
412
+ #include <ATen/ops/nonzero_cpu_dispatch.h>
413
+ #include <ATen/ops/nonzero_static_cpu_dispatch.h>
414
+ #include <ATen/ops/norm_cpu_dispatch.h>
415
+ #include <ATen/ops/normal_cpu_dispatch.h>
416
+ #include <ATen/ops/ormqr_cpu_dispatch.h>
417
+ #include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
418
+ #include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
419
+ #include <ATen/ops/poisson_cpu_dispatch.h>
420
+ #include <ATen/ops/polar_cpu_dispatch.h>
421
+ #include <ATen/ops/polygamma_cpu_dispatch.h>
422
+ #include <ATen/ops/pow_cpu_dispatch.h>
423
+ #include <ATen/ops/prod_cpu_dispatch.h>
424
+ #include <ATen/ops/put_cpu_dispatch.h>
425
+ #include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
426
+ #include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
427
+ #include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
428
+ #include <ATen/ops/random_cpu_dispatch.h>
429
+ #include <ATen/ops/randperm_cpu_dispatch.h>
430
+ #include <ATen/ops/range_cpu_dispatch.h>
431
+ #include <ATen/ops/reciprocal_cpu_dispatch.h>
432
+ #include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
433
+ #include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
434
+ #include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
435
+ #include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
436
+ #include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
437
+ #include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
438
+ #include <ATen/ops/relu_cpu_dispatch.h>
439
+ #include <ATen/ops/remainder_cpu_dispatch.h>
440
+ #include <ATen/ops/renorm_cpu_dispatch.h>
441
+ #include <ATen/ops/repeat_interleave_cpu_dispatch.h>
442
+ #include <ATen/ops/replication_pad1d_cpu_dispatch.h>
443
+ #include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
444
+ #include <ATen/ops/replication_pad2d_cpu_dispatch.h>
445
+ #include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
446
+ #include <ATen/ops/replication_pad3d_cpu_dispatch.h>
447
+ #include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
448
+ #include <ATen/ops/resize_cpu_dispatch.h>
449
+ #include <ATen/ops/roll_cpu_dispatch.h>
450
+ #include <ATen/ops/round_cpu_dispatch.h>
451
+ #include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
452
+ #include <ATen/ops/rshift_cpu_dispatch.h>
453
+ #include <ATen/ops/rsqrt_cpu_dispatch.h>
454
+ #include <ATen/ops/rsub_cpu_dispatch.h>
455
+ #include <ATen/ops/scatter_cpu_dispatch.h>
456
+ #include <ATen/ops/scatter_add_cpu_dispatch.h>
457
+ #include <ATen/ops/scatter_reduce_cpu_dispatch.h>
458
+ #include <ATen/ops/searchsorted_cpu_dispatch.h>
459
+ #include <ATen/ops/segment_reduce_cpu_dispatch.h>
460
+ #include <ATen/ops/set_cpu_dispatch.h>
461
+ #include <ATen/ops/sgn_cpu_dispatch.h>
462
+ #include <ATen/ops/sigmoid_cpu_dispatch.h>
463
+ #include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
464
+ #include <ATen/ops/sign_cpu_dispatch.h>
465
+ #include <ATen/ops/signbit_cpu_dispatch.h>
466
+ #include <ATen/ops/silu_cpu_dispatch.h>
467
+ #include <ATen/ops/silu_backward_cpu_dispatch.h>
468
+ #include <ATen/ops/sin_cpu_dispatch.h>
469
+ #include <ATen/ops/sinc_cpu_dispatch.h>
470
+ #include <ATen/ops/sinh_cpu_dispatch.h>
471
+ #include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
472
+ #include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
473
+ #include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
474
+ #include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
475
+ #include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
476
+ #include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
477
+ #include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
478
+ #include <ATen/ops/softplus_cpu_dispatch.h>
479
+ #include <ATen/ops/softplus_backward_cpu_dispatch.h>
480
+ #include <ATen/ops/softshrink_cpu_dispatch.h>
481
+ #include <ATen/ops/softshrink_backward_cpu_dispatch.h>
482
+ #include <ATen/ops/sort_cpu_dispatch.h>
483
+ #include <ATen/ops/sparse_dim_cpu_dispatch.h>
484
+ #include <ATen/ops/special_airy_ai_cpu_dispatch.h>
485
+ #include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
486
+ #include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
487
+ #include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
488
+ #include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
489
+ #include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
490
+ #include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
491
+ #include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
492
+ #include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
493
+ #include <ATen/ops/special_entr_cpu_dispatch.h>
494
+ #include <ATen/ops/special_erfcx_cpu_dispatch.h>
495
+ #include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
496
+ #include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
497
+ #include <ATen/ops/special_i0e_cpu_dispatch.h>
498
+ #include <ATen/ops/special_i1_cpu_dispatch.h>
499
+ #include <ATen/ops/special_i1e_cpu_dispatch.h>
500
+ #include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
501
+ #include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
502
+ #include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
503
+ #include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
504
+ #include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
505
+ #include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
506
+ #include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
507
+ #include <ATen/ops/special_ndtri_cpu_dispatch.h>
508
+ #include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
509
+ #include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
510
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
511
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
512
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
513
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
514
+ #include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
515
+ #include <ATen/ops/special_xlog1py_cpu_dispatch.h>
516
+ #include <ATen/ops/special_zeta_cpu_dispatch.h>
517
+ #include <ATen/ops/sqrt_cpu_dispatch.h>
518
+ #include <ATen/ops/sspaddmm_cpu_dispatch.h>
519
+ #include <ATen/ops/std_cpu_dispatch.h>
520
+ #include <ATen/ops/std_mean_cpu_dispatch.h>
521
+ #include <ATen/ops/sub_cpu_dispatch.h>
522
+ #include <ATen/ops/sum_cpu_dispatch.h>
523
+ #include <ATen/ops/take_cpu_dispatch.h>
524
+ #include <ATen/ops/tan_cpu_dispatch.h>
525
+ #include <ATen/ops/tanh_cpu_dispatch.h>
526
+ #include <ATen/ops/tanh_backward_cpu_dispatch.h>
527
+ #include <ATen/ops/threshold_cpu_dispatch.h>
528
+ #include <ATen/ops/threshold_backward_cpu_dispatch.h>
529
+ #include <ATen/ops/to_mkldnn_cpu_dispatch.h>
530
+ #include <ATen/ops/topk_cpu_dispatch.h>
531
+ #include <ATen/ops/trace_cpu_dispatch.h>
532
+ #include <ATen/ops/triangular_solve_cpu_dispatch.h>
533
+ #include <ATen/ops/tril_cpu_dispatch.h>
534
+ #include <ATen/ops/tril_indices_cpu_dispatch.h>
535
+ #include <ATen/ops/triu_cpu_dispatch.h>
536
+ #include <ATen/ops/triu_indices_cpu_dispatch.h>
537
+ #include <ATen/ops/trunc_cpu_dispatch.h>
538
+ #include <ATen/ops/unfold_cpu_dispatch.h>
539
+ #include <ATen/ops/unfold_backward_cpu_dispatch.h>
540
+ #include <ATen/ops/uniform_cpu_dispatch.h>
541
+ #include <ATen/ops/unique_consecutive_cpu_dispatch.h>
542
+ #include <ATen/ops/unique_dim_cpu_dispatch.h>
543
+ #include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
544
+ #include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
545
+ #include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
546
+ #include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
547
+ #include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
548
+ #include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
549
+ #include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
550
+ #include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
551
+ #include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
552
+ #include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
553
+ #include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
554
+ #include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
555
+ #include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
556
+ #include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
557
+ #include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
558
+ #include <ATen/ops/var_cpu_dispatch.h>
559
+ #include <ATen/ops/var_mean_cpu_dispatch.h>
560
+ #include <ATen/ops/vdot_cpu_dispatch.h>
561
+ #include <ATen/ops/view_cpu_dispatch.h>
562
+ #include <ATen/ops/view_as_complex_cpu_dispatch.h>
563
+ #include <ATen/ops/view_as_real_cpu_dispatch.h>
564
+ #include <ATen/ops/where_cpu_dispatch.h>
565
+ #include <ATen/ops/xlogy_cpu_dispatch.h>
566
+ #include <ATen/ops/zero_cpu_dispatch.h>
567
+
568
+
569
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions_inl.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_cuda_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_adaptive_avg_pool2d_cuda_dispatch.h>
20
+ #include <ATen/ops/_adaptive_avg_pool2d_backward_cuda_dispatch.h>
21
+ #include <ATen/ops/_adaptive_avg_pool3d_cuda_dispatch.h>
22
+ #include <ATen/ops/_adaptive_avg_pool3d_backward_cuda_dispatch.h>
23
+ #include <ATen/ops/_addmm_activation_cuda_dispatch.h>
24
+ #include <ATen/ops/_aminmax_cuda_dispatch.h>
25
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cuda_dispatch.h>
26
+ #include <ATen/ops/_amp_update_scale_cuda_dispatch.h>
27
+ #include <ATen/ops/_assert_async_cuda_dispatch.h>
28
+ #include <ATen/ops/_cdist_backward_cuda_dispatch.h>
29
+ #include <ATen/ops/_cdist_forward_cuda_dispatch.h>
30
+ #include <ATen/ops/_cholesky_solve_helper_cuda_dispatch.h>
31
+ #include <ATen/ops/_compute_linear_combination_cuda_dispatch.h>
32
+ #include <ATen/ops/_conv_depthwise2d_cuda_dispatch.h>
33
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_cuda_dispatch.h>
34
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_cuda_dispatch.h>
35
+ #include <ATen/ops/_convert_weight_to_int4pack_cuda_dispatch.h>
36
+ #include <ATen/ops/_cslt_compress_cuda_dispatch.h>
37
+ #include <ATen/ops/_cslt_sparse_mm_cuda_dispatch.h>
38
+ #include <ATen/ops/_ctc_loss_cuda_dispatch.h>
39
+ #include <ATen/ops/_ctc_loss_backward_cuda_dispatch.h>
40
+ #include <ATen/ops/_cudnn_ctc_loss_cuda_dispatch.h>
41
+ #include <ATen/ops/_cudnn_init_dropout_state_cuda_dispatch.h>
42
+ #include <ATen/ops/_cudnn_rnn_cuda_dispatch.h>
43
+ #include <ATen/ops/_cudnn_rnn_backward_cuda_dispatch.h>
44
+ #include <ATen/ops/_cudnn_rnn_flatten_weight_cuda_dispatch.h>
45
+ #include <ATen/ops/_cummax_helper_cuda_dispatch.h>
46
+ #include <ATen/ops/_cummin_helper_cuda_dispatch.h>
47
+ #include <ATen/ops/_dirichlet_grad_cuda_dispatch.h>
48
+ #include <ATen/ops/_efficient_attention_backward_cuda_dispatch.h>
49
+ #include <ATen/ops/_efficient_attention_forward_cuda_dispatch.h>
50
+ #include <ATen/ops/_efficientzerotensor_cuda_dispatch.h>
51
+ #include <ATen/ops/_embedding_bag_cuda_dispatch.h>
52
+ #include <ATen/ops/_embedding_bag_dense_backward_cuda_dispatch.h>
53
+ #include <ATen/ops/_embedding_bag_forward_only_cuda_dispatch.h>
54
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward_cuda_dispatch.h>
55
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cuda_dispatch.h>
56
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cuda_dispatch.h>
57
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cuda_dispatch.h>
58
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cuda_dispatch.h>
59
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cuda_dispatch.h>
60
+ #include <ATen/ops/_fft_c2c_cuda_dispatch.h>
61
+ #include <ATen/ops/_fft_c2r_cuda_dispatch.h>
62
+ #include <ATen/ops/_fft_r2c_cuda_dispatch.h>
63
+ #include <ATen/ops/_fill_mem_eff_dropout_mask_cuda_dispatch.h>
64
+ #include <ATen/ops/_flash_attention_backward_cuda_dispatch.h>
65
+ #include <ATen/ops/_flash_attention_forward_cuda_dispatch.h>
66
+ #include <ATen/ops/_foreach_abs_cuda_dispatch.h>
67
+ #include <ATen/ops/_foreach_acos_cuda_dispatch.h>
68
+ #include <ATen/ops/_foreach_add_cuda_dispatch.h>
69
+ #include <ATen/ops/_foreach_addcdiv_cuda_dispatch.h>
70
+ #include <ATen/ops/_foreach_addcmul_cuda_dispatch.h>
71
+ #include <ATen/ops/_foreach_asin_cuda_dispatch.h>
72
+ #include <ATen/ops/_foreach_atan_cuda_dispatch.h>
73
+ #include <ATen/ops/_foreach_ceil_cuda_dispatch.h>
74
+ #include <ATen/ops/_foreach_clamp_max_cuda_dispatch.h>
75
+ #include <ATen/ops/_foreach_clamp_min_cuda_dispatch.h>
76
+ #include <ATen/ops/_foreach_copy_cuda_dispatch.h>
77
+ #include <ATen/ops/_foreach_cos_cuda_dispatch.h>
78
+ #include <ATen/ops/_foreach_cosh_cuda_dispatch.h>
79
+ #include <ATen/ops/_foreach_div_cuda_dispatch.h>
80
+ #include <ATen/ops/_foreach_erf_cuda_dispatch.h>
81
+ #include <ATen/ops/_foreach_erfc_cuda_dispatch.h>
82
+ #include <ATen/ops/_foreach_exp_cuda_dispatch.h>
83
+ #include <ATen/ops/_foreach_expm1_cuda_dispatch.h>
84
+ #include <ATen/ops/_foreach_floor_cuda_dispatch.h>
85
+ #include <ATen/ops/_foreach_frac_cuda_dispatch.h>
86
+ #include <ATen/ops/_foreach_lerp_cuda_dispatch.h>
87
+ #include <ATen/ops/_foreach_lgamma_cuda_dispatch.h>
88
+ #include <ATen/ops/_foreach_log_cuda_dispatch.h>
89
+ #include <ATen/ops/_foreach_log10_cuda_dispatch.h>
90
+ #include <ATen/ops/_foreach_log1p_cuda_dispatch.h>
91
+ #include <ATen/ops/_foreach_log2_cuda_dispatch.h>
92
+ #include <ATen/ops/_foreach_maximum_cuda_dispatch.h>
93
+ #include <ATen/ops/_foreach_minimum_cuda_dispatch.h>
94
+ #include <ATen/ops/_foreach_mul_cuda_dispatch.h>
95
+ #include <ATen/ops/_foreach_neg_cuda_dispatch.h>
96
+ #include <ATen/ops/_foreach_norm_cuda_dispatch.h>
97
+ #include <ATen/ops/_foreach_pow_cuda_dispatch.h>
98
+ #include <ATen/ops/_foreach_reciprocal_cuda_dispatch.h>
99
+ #include <ATen/ops/_foreach_round_cuda_dispatch.h>
100
+ #include <ATen/ops/_foreach_sigmoid_cuda_dispatch.h>
101
+ #include <ATen/ops/_foreach_sign_cuda_dispatch.h>
102
+ #include <ATen/ops/_foreach_sin_cuda_dispatch.h>
103
+ #include <ATen/ops/_foreach_sinh_cuda_dispatch.h>
104
+ #include <ATen/ops/_foreach_sqrt_cuda_dispatch.h>
105
+ #include <ATen/ops/_foreach_sub_cuda_dispatch.h>
106
+ #include <ATen/ops/_foreach_tan_cuda_dispatch.h>
107
+ #include <ATen/ops/_foreach_tanh_cuda_dispatch.h>
108
+ #include <ATen/ops/_foreach_trunc_cuda_dispatch.h>
109
+ #include <ATen/ops/_foreach_zero_cuda_dispatch.h>
110
+ #include <ATen/ops/_fused_adam_cuda_dispatch.h>
111
+ #include <ATen/ops/_fused_adamw_cuda_dispatch.h>
112
+ #include <ATen/ops/_fused_dropout_cuda_dispatch.h>
113
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper_cuda_dispatch.h>
114
+ #include <ATen/ops/_fused_sdp_choice_cuda_dispatch.h>
115
+ #include <ATen/ops/_index_put_impl_cuda_dispatch.h>
116
+ #include <ATen/ops/_int_mm_cuda_dispatch.h>
117
+ #include <ATen/ops/_linalg_det_cuda_dispatch.h>
118
+ #include <ATen/ops/_linalg_eigh_cuda_dispatch.h>
119
+ #include <ATen/ops/_linalg_slogdet_cuda_dispatch.h>
120
+ #include <ATen/ops/_linalg_solve_ex_cuda_dispatch.h>
121
+ #include <ATen/ops/_linalg_svd_cuda_dispatch.h>
122
+ #include <ATen/ops/_local_scalar_dense_cuda_dispatch.h>
123
+ #include <ATen/ops/_log_softmax_cuda_dispatch.h>
124
+ #include <ATen/ops/_log_softmax_backward_data_cuda_dispatch.h>
125
+ #include <ATen/ops/_logcumsumexp_cuda_dispatch.h>
126
+ #include <ATen/ops/_make_per_channel_quantized_tensor_cuda_dispatch.h>
127
+ #include <ATen/ops/_make_per_tensor_quantized_tensor_cuda_dispatch.h>
128
+ #include <ATen/ops/_masked_scale_cuda_dispatch.h>
129
+ #include <ATen/ops/_masked_softmax_cuda_dispatch.h>
130
+ #include <ATen/ops/_masked_softmax_backward_cuda_dispatch.h>
131
+ #include <ATen/ops/_mixed_dtypes_linear_cuda_dispatch.h>
132
+ #include <ATen/ops/_native_batch_norm_legit_cuda_dispatch.h>
133
+ #include <ATen/ops/_native_multi_head_attention_cuda_dispatch.h>
134
+ #include <ATen/ops/_nested_from_padded_cuda_dispatch.h>
135
+ #include <ATen/ops/_nested_tensor_from_mask_cuda_dispatch.h>
136
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned_cuda_dispatch.h>
137
+ #include <ATen/ops/_nested_view_from_buffer_cuda_dispatch.h>
138
+ #include <ATen/ops/_pdist_backward_cuda_dispatch.h>
139
+ #include <ATen/ops/_pdist_forward_cuda_dispatch.h>
140
+ #include <ATen/ops/_pin_memory_cuda_dispatch.h>
141
+ #include <ATen/ops/_prelu_kernel_cuda_dispatch.h>
142
+ #include <ATen/ops/_prelu_kernel_backward_cuda_dispatch.h>
143
+ #include <ATen/ops/_reshape_alias_cuda_dispatch.h>
144
+ #include <ATen/ops/_sample_dirichlet_cuda_dispatch.h>
145
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_cuda_dispatch.h>
146
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_backward_cuda_dispatch.h>
147
+ #include <ATen/ops/_scaled_dot_product_flash_attention_cuda_dispatch.h>
148
+ #include <ATen/ops/_scaled_dot_product_flash_attention_backward_cuda_dispatch.h>
149
+ #include <ATen/ops/_scaled_mm_cuda_dispatch.h>
150
+ #include <ATen/ops/_segment_reduce_backward_cuda_dispatch.h>
151
+ #include <ATen/ops/_slow_conv2d_backward_cuda_dispatch.h>
152
+ #include <ATen/ops/_slow_conv2d_forward_cuda_dispatch.h>
153
+ #include <ATen/ops/_softmax_cuda_dispatch.h>
154
+ #include <ATen/ops/_softmax_backward_data_cuda_dispatch.h>
155
+ #include <ATen/ops/_sparse_semi_structured_linear_cuda_dispatch.h>
156
+ #include <ATen/ops/_standard_gamma_cuda_dispatch.h>
157
+ #include <ATen/ops/_standard_gamma_grad_cuda_dispatch.h>
158
+ #include <ATen/ops/_thnn_fused_gru_cell_cuda_dispatch.h>
159
+ #include <ATen/ops/_thnn_fused_gru_cell_backward_cuda_dispatch.h>
160
+ #include <ATen/ops/_thnn_fused_lstm_cell_cuda_dispatch.h>
161
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_cuda_dispatch.h>
162
+ #include <ATen/ops/_to_sparse_cuda_dispatch.h>
163
+ #include <ATen/ops/_to_sparse_bsc_cuda_dispatch.h>
164
+ #include <ATen/ops/_to_sparse_bsr_cuda_dispatch.h>
165
+ #include <ATen/ops/_to_sparse_csc_cuda_dispatch.h>
166
+ #include <ATen/ops/_to_sparse_csr_cuda_dispatch.h>
167
+ #include <ATen/ops/_to_sparse_semi_structured_cuda_dispatch.h>
168
+ #include <ATen/ops/_transform_bias_rescale_qkv_cuda_dispatch.h>
169
+ #include <ATen/ops/_transformer_encoder_layer_fwd_cuda_dispatch.h>
170
+ #include <ATen/ops/_triton_multi_head_attention_cuda_dispatch.h>
171
+ #include <ATen/ops/_triton_scaled_dot_attention_cuda_dispatch.h>
172
+ #include <ATen/ops/_unique_cuda_dispatch.h>
173
+ #include <ATen/ops/_unique2_cuda_dispatch.h>
174
+ #include <ATen/ops/_upsample_bicubic2d_aa_cuda_dispatch.h>
175
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h>
176
+ #include <ATen/ops/_upsample_bilinear2d_aa_cuda_dispatch.h>
177
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_cuda_dispatch.h>
178
+ #include <ATen/ops/_upsample_nearest_exact1d_cuda_dispatch.h>
179
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_cuda_dispatch.h>
180
+ #include <ATen/ops/_upsample_nearest_exact2d_cuda_dispatch.h>
181
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_cuda_dispatch.h>
182
+ #include <ATen/ops/_upsample_nearest_exact3d_cuda_dispatch.h>
183
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_cuda_dispatch.h>
184
+ #include <ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h>
185
+ #include <ATen/ops/_validate_compressed_sparse_indices_cuda_dispatch.h>
186
+ #include <ATen/ops/_weight_int4pack_mm_cuda_dispatch.h>
187
+ #include <ATen/ops/_weight_norm_interface_cuda_dispatch.h>
188
+ #include <ATen/ops/_weight_norm_interface_backward_cuda_dispatch.h>
189
+ #include <ATen/ops/abs_cuda_dispatch.h>
190
+ #include <ATen/ops/acos_cuda_dispatch.h>
191
+ #include <ATen/ops/acosh_cuda_dispatch.h>
192
+ #include <ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h>
193
+ #include <ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h>
194
+ #include <ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h>
195
+ #include <ATen/ops/adaptive_max_pool2d_cuda_dispatch.h>
196
+ #include <ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h>
197
+ #include <ATen/ops/adaptive_max_pool3d_cuda_dispatch.h>
198
+ #include <ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h>
199
+ #include <ATen/ops/add_cuda_dispatch.h>
200
+ #include <ATen/ops/addbmm_cuda_dispatch.h>
201
+ #include <ATen/ops/addcdiv_cuda_dispatch.h>
202
+ #include <ATen/ops/addcmul_cuda_dispatch.h>
203
+ #include <ATen/ops/addmm_cuda_dispatch.h>
204
+ #include <ATen/ops/addmv_cuda_dispatch.h>
205
+ #include <ATen/ops/addr_cuda_dispatch.h>
206
+ #include <ATen/ops/all_cuda_dispatch.h>
207
+ #include <ATen/ops/amax_cuda_dispatch.h>
208
+ #include <ATen/ops/amin_cuda_dispatch.h>
209
+ #include <ATen/ops/aminmax_cuda_dispatch.h>
210
+ #include <ATen/ops/angle_cuda_dispatch.h>
211
+ #include <ATen/ops/any_cuda_dispatch.h>
212
+ #include <ATen/ops/arange_cuda_dispatch.h>
213
+ #include <ATen/ops/argmax_cuda_dispatch.h>
214
+ #include <ATen/ops/argmin_cuda_dispatch.h>
215
+ #include <ATen/ops/argsort_cuda_dispatch.h>
216
+ #include <ATen/ops/as_strided_cuda_dispatch.h>
217
+ #include <ATen/ops/asin_cuda_dispatch.h>
218
+ #include <ATen/ops/asinh_cuda_dispatch.h>
219
+ #include <ATen/ops/atan_cuda_dispatch.h>
220
+ #include <ATen/ops/atan2_cuda_dispatch.h>
221
+ #include <ATen/ops/atanh_cuda_dispatch.h>
222
+ #include <ATen/ops/avg_pool2d_cuda_dispatch.h>
223
+ #include <ATen/ops/avg_pool2d_backward_cuda_dispatch.h>
224
+ #include <ATen/ops/avg_pool3d_cuda_dispatch.h>
225
+ #include <ATen/ops/avg_pool3d_backward_cuda_dispatch.h>
226
+ #include <ATen/ops/baddbmm_cuda_dispatch.h>
227
+ #include <ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h>
228
+ #include <ATen/ops/batch_norm_backward_reduce_cuda_dispatch.h>
229
+ #include <ATen/ops/batch_norm_elemt_cuda_dispatch.h>
230
+ #include <ATen/ops/batch_norm_gather_stats_cuda_dispatch.h>
231
+ #include <ATen/ops/batch_norm_gather_stats_with_counts_cuda_dispatch.h>
232
+ #include <ATen/ops/batch_norm_stats_cuda_dispatch.h>
233
+ #include <ATen/ops/batch_norm_update_stats_cuda_dispatch.h>
234
+ #include <ATen/ops/bernoulli_cuda_dispatch.h>
235
+ #include <ATen/ops/binary_cross_entropy_cuda_dispatch.h>
236
+ #include <ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h>
237
+ #include <ATen/ops/bincount_cuda_dispatch.h>
238
+ #include <ATen/ops/binomial_cuda_dispatch.h>
239
+ #include <ATen/ops/bitwise_and_cuda_dispatch.h>
240
+ #include <ATen/ops/bitwise_left_shift_cuda_dispatch.h>
241
+ #include <ATen/ops/bitwise_not_cuda_dispatch.h>
242
+ #include <ATen/ops/bitwise_or_cuda_dispatch.h>
243
+ #include <ATen/ops/bitwise_right_shift_cuda_dispatch.h>
244
+ #include <ATen/ops/bitwise_xor_cuda_dispatch.h>
245
+ #include <ATen/ops/bmm_cuda_dispatch.h>
246
+ #include <ATen/ops/bucketize_cuda_dispatch.h>
247
+ #include <ATen/ops/cat_cuda_dispatch.h>
248
+ #include <ATen/ops/cauchy_cuda_dispatch.h>
249
+ #include <ATen/ops/ceil_cuda_dispatch.h>
250
+ #include <ATen/ops/channel_shuffle_cuda_dispatch.h>
251
+ #include <ATen/ops/cholesky_cuda_dispatch.h>
252
+ #include <ATen/ops/cholesky_inverse_cuda_dispatch.h>
253
+ #include <ATen/ops/clamp_cuda_dispatch.h>
254
+ #include <ATen/ops/clamp_max_cuda_dispatch.h>
255
+ #include <ATen/ops/clamp_min_cuda_dispatch.h>
256
+ #include <ATen/ops/col2im_cuda_dispatch.h>
257
+ #include <ATen/ops/complex_cuda_dispatch.h>
258
+ #include <ATen/ops/conj_physical_cuda_dispatch.h>
259
+ #include <ATen/ops/conv_depthwise3d_cuda_dispatch.h>
260
+ #include <ATen/ops/convolution_backward_cuda_dispatch.h>
261
+ #include <ATen/ops/copysign_cuda_dispatch.h>
262
+ #include <ATen/ops/cos_cuda_dispatch.h>
263
+ #include <ATen/ops/cosh_cuda_dispatch.h>
264
+ #include <ATen/ops/count_nonzero_cuda_dispatch.h>
265
+ #include <ATen/ops/cudnn_affine_grid_generator_cuda_dispatch.h>
266
+ #include <ATen/ops/cudnn_affine_grid_generator_backward_cuda_dispatch.h>
267
+ #include <ATen/ops/cudnn_batch_norm_cuda_dispatch.h>
268
+ #include <ATen/ops/cudnn_batch_norm_backward_cuda_dispatch.h>
269
+ #include <ATen/ops/cudnn_convolution_cuda_dispatch.h>
270
+ #include <ATen/ops/cudnn_convolution_add_relu_cuda_dispatch.h>
271
+ #include <ATen/ops/cudnn_convolution_relu_cuda_dispatch.h>
272
+ #include <ATen/ops/cudnn_convolution_transpose_cuda_dispatch.h>
273
+ #include <ATen/ops/cudnn_grid_sampler_cuda_dispatch.h>
274
+ #include <ATen/ops/cudnn_grid_sampler_backward_cuda_dispatch.h>
275
+ #include <ATen/ops/cumprod_cuda_dispatch.h>
276
+ #include <ATen/ops/cumsum_cuda_dispatch.h>
277
+ #include <ATen/ops/dense_dim_cuda_dispatch.h>
278
+ #include <ATen/ops/dequantize_cuda_dispatch.h>
279
+ #include <ATen/ops/digamma_cuda_dispatch.h>
280
+ #include <ATen/ops/div_cuda_dispatch.h>
281
+ #include <ATen/ops/dot_cuda_dispatch.h>
282
+ #include <ATen/ops/elu_cuda_dispatch.h>
283
+ #include <ATen/ops/elu_backward_cuda_dispatch.h>
284
+ #include <ATen/ops/embedding_dense_backward_cuda_dispatch.h>
285
+ #include <ATen/ops/embedding_renorm_cuda_dispatch.h>
286
+ #include <ATen/ops/empty_cuda_dispatch.h>
287
+ #include <ATen/ops/empty_strided_cuda_dispatch.h>
288
+ #include <ATen/ops/eq_cuda_dispatch.h>
289
+ #include <ATen/ops/equal_cuda_dispatch.h>
290
+ #include <ATen/ops/erf_cuda_dispatch.h>
291
+ #include <ATen/ops/erfc_cuda_dispatch.h>
292
+ #include <ATen/ops/erfinv_cuda_dispatch.h>
293
+ #include <ATen/ops/exp_cuda_dispatch.h>
294
+ #include <ATen/ops/exp2_cuda_dispatch.h>
295
+ #include <ATen/ops/expm1_cuda_dispatch.h>
296
+ #include <ATen/ops/exponential_cuda_dispatch.h>
297
+ #include <ATen/ops/eye_cuda_dispatch.h>
298
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h>
299
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cuda_dispatch.h>
300
+ #include <ATen/ops/fill_cuda_dispatch.h>
301
+ #include <ATen/ops/flip_cuda_dispatch.h>
302
+ #include <ATen/ops/floor_cuda_dispatch.h>
303
+ #include <ATen/ops/floor_divide_cuda_dispatch.h>
304
+ #include <ATen/ops/fmax_cuda_dispatch.h>
305
+ #include <ATen/ops/fmin_cuda_dispatch.h>
306
+ #include <ATen/ops/fmod_cuda_dispatch.h>
307
+ #include <ATen/ops/frac_cuda_dispatch.h>
308
+ #include <ATen/ops/fractional_max_pool2d_cuda_dispatch.h>
309
+ #include <ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h>
310
+ #include <ATen/ops/fractional_max_pool3d_cuda_dispatch.h>
311
+ #include <ATen/ops/fractional_max_pool3d_backward_cuda_dispatch.h>
312
+ #include <ATen/ops/frexp_cuda_dispatch.h>
313
+ #include <ATen/ops/gather_cuda_dispatch.h>
314
+ #include <ATen/ops/gcd_cuda_dispatch.h>
315
+ #include <ATen/ops/ge_cuda_dispatch.h>
316
+ #include <ATen/ops/gelu_cuda_dispatch.h>
317
+ #include <ATen/ops/gelu_backward_cuda_dispatch.h>
318
+ #include <ATen/ops/geometric_cuda_dispatch.h>
319
+ #include <ATen/ops/geqrf_cuda_dispatch.h>
320
+ #include <ATen/ops/glu_cuda_dispatch.h>
321
+ #include <ATen/ops/glu_backward_cuda_dispatch.h>
322
+ #include <ATen/ops/glu_backward_jvp_cuda_dispatch.h>
323
+ #include <ATen/ops/glu_jvp_cuda_dispatch.h>
324
+ #include <ATen/ops/grid_sampler_2d_cuda_dispatch.h>
325
+ #include <ATen/ops/grid_sampler_2d_backward_cuda_dispatch.h>
326
+ #include <ATen/ops/grid_sampler_3d_cuda_dispatch.h>
327
+ #include <ATen/ops/grid_sampler_3d_backward_cuda_dispatch.h>
328
+ #include <ATen/ops/gt_cuda_dispatch.h>
329
+ #include <ATen/ops/hardshrink_cuda_dispatch.h>
330
+ #include <ATen/ops/hardshrink_backward_cuda_dispatch.h>
331
+ #include <ATen/ops/hardsigmoid_cuda_dispatch.h>
332
+ #include <ATen/ops/hardsigmoid_backward_cuda_dispatch.h>
333
+ #include <ATen/ops/hardswish_cuda_dispatch.h>
334
+ #include <ATen/ops/hardswish_backward_cuda_dispatch.h>
335
+ #include <ATen/ops/hardtanh_cuda_dispatch.h>
336
+ #include <ATen/ops/hardtanh_backward_cuda_dispatch.h>
337
+ #include <ATen/ops/heaviside_cuda_dispatch.h>
338
+ #include <ATen/ops/histc_cuda_dispatch.h>
339
+ #include <ATen/ops/huber_loss_cuda_dispatch.h>
340
+ #include <ATen/ops/huber_loss_backward_cuda_dispatch.h>
341
+ #include <ATen/ops/hypot_cuda_dispatch.h>
342
+ #include <ATen/ops/i0_cuda_dispatch.h>
343
+ #include <ATen/ops/igamma_cuda_dispatch.h>
344
+ #include <ATen/ops/igammac_cuda_dispatch.h>
345
+ #include <ATen/ops/im2col_cuda_dispatch.h>
346
+ #include <ATen/ops/index_cuda_dispatch.h>
347
+ #include <ATen/ops/index_add_cuda_dispatch.h>
348
+ #include <ATen/ops/index_copy_cuda_dispatch.h>
349
+ #include <ATen/ops/index_fill_cuda_dispatch.h>
350
+ #include <ATen/ops/index_reduce_cuda_dispatch.h>
351
+ #include <ATen/ops/index_select_cuda_dispatch.h>
352
+ #include <ATen/ops/is_pinned_cuda_dispatch.h>
353
+ #include <ATen/ops/is_set_to_cuda_dispatch.h>
354
+ #include <ATen/ops/isin_cuda_dispatch.h>
355
+ #include <ATen/ops/isnan_cuda_dispatch.h>
356
+ #include <ATen/ops/isneginf_cuda_dispatch.h>
357
+ #include <ATen/ops/isposinf_cuda_dispatch.h>
358
+ #include <ATen/ops/kthvalue_cuda_dispatch.h>
359
+ #include <ATen/ops/lcm_cuda_dispatch.h>
360
+ #include <ATen/ops/le_cuda_dispatch.h>
361
+ #include <ATen/ops/leaky_relu_cuda_dispatch.h>
362
+ #include <ATen/ops/leaky_relu_backward_cuda_dispatch.h>
363
+ #include <ATen/ops/lerp_cuda_dispatch.h>
364
+ #include <ATen/ops/lgamma_cuda_dispatch.h>
365
+ #include <ATen/ops/linalg_cholesky_ex_cuda_dispatch.h>
366
+ #include <ATen/ops/linalg_cross_cuda_dispatch.h>
367
+ #include <ATen/ops/linalg_eig_cuda_dispatch.h>
368
+ #include <ATen/ops/linalg_householder_product_cuda_dispatch.h>
369
+ #include <ATen/ops/linalg_inv_ex_cuda_dispatch.h>
370
+ #include <ATen/ops/linalg_ldl_factor_ex_cuda_dispatch.h>
371
+ #include <ATen/ops/linalg_ldl_solve_cuda_dispatch.h>
372
+ #include <ATen/ops/linalg_lstsq_cuda_dispatch.h>
373
+ #include <ATen/ops/linalg_lu_cuda_dispatch.h>
374
+ #include <ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h>
375
+ #include <ATen/ops/linalg_lu_solve_cuda_dispatch.h>
376
+ #include <ATen/ops/linalg_matrix_exp_cuda_dispatch.h>
377
+ #include <ATen/ops/linalg_qr_cuda_dispatch.h>
378
+ #include <ATen/ops/linalg_solve_triangular_cuda_dispatch.h>
379
+ #include <ATen/ops/linalg_vector_norm_cuda_dispatch.h>
380
+ #include <ATen/ops/linspace_cuda_dispatch.h>
381
+ #include <ATen/ops/log_cuda_dispatch.h>
382
+ #include <ATen/ops/log10_cuda_dispatch.h>
383
+ #include <ATen/ops/log1p_cuda_dispatch.h>
384
+ #include <ATen/ops/log2_cuda_dispatch.h>
385
+ #include <ATen/ops/log_normal_cuda_dispatch.h>
386
+ #include <ATen/ops/log_sigmoid_backward_cuda_dispatch.h>
387
+ #include <ATen/ops/log_sigmoid_forward_cuda_dispatch.h>
388
+ #include <ATen/ops/logaddexp_cuda_dispatch.h>
389
+ #include <ATen/ops/logaddexp2_cuda_dispatch.h>
390
+ #include <ATen/ops/logical_and_cuda_dispatch.h>
391
+ #include <ATen/ops/logical_not_cuda_dispatch.h>
392
+ #include <ATen/ops/logical_or_cuda_dispatch.h>
393
+ #include <ATen/ops/logical_xor_cuda_dispatch.h>
394
+ #include <ATen/ops/logit_cuda_dispatch.h>
395
+ #include <ATen/ops/logit_backward_cuda_dispatch.h>
396
+ #include <ATen/ops/logspace_cuda_dispatch.h>
397
+ #include <ATen/ops/lshift_cuda_dispatch.h>
398
+ #include <ATen/ops/lt_cuda_dispatch.h>
399
+ #include <ATen/ops/lu_unpack_cuda_dispatch.h>
400
+ #include <ATen/ops/masked_fill_cuda_dispatch.h>
401
+ #include <ATen/ops/masked_scatter_cuda_dispatch.h>
402
+ #include <ATen/ops/masked_select_cuda_dispatch.h>
403
+ #include <ATen/ops/max_cuda_dispatch.h>
404
+ #include <ATen/ops/max_pool2d_with_indices_cuda_dispatch.h>
405
+ #include <ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h>
406
+ #include <ATen/ops/max_pool3d_with_indices_cuda_dispatch.h>
407
+ #include <ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h>
408
+ #include <ATen/ops/max_unpool2d_cuda_dispatch.h>
409
+ #include <ATen/ops/max_unpool3d_cuda_dispatch.h>
410
+ #include <ATen/ops/maximum_cuda_dispatch.h>
411
+ #include <ATen/ops/mean_cuda_dispatch.h>
412
+ #include <ATen/ops/median_cuda_dispatch.h>
413
+ #include <ATen/ops/min_cuda_dispatch.h>
414
+ #include <ATen/ops/minimum_cuda_dispatch.h>
415
+ #include <ATen/ops/miopen_batch_norm_cuda_dispatch.h>
416
+ #include <ATen/ops/miopen_batch_norm_backward_cuda_dispatch.h>
417
+ #include <ATen/ops/miopen_convolution_cuda_dispatch.h>
418
+ #include <ATen/ops/miopen_convolution_add_relu_cuda_dispatch.h>
419
+ #include <ATen/ops/miopen_convolution_relu_cuda_dispatch.h>
420
+ #include <ATen/ops/miopen_convolution_transpose_cuda_dispatch.h>
421
+ #include <ATen/ops/miopen_depthwise_convolution_cuda_dispatch.h>
422
+ #include <ATen/ops/miopen_rnn_cuda_dispatch.h>
423
+ #include <ATen/ops/miopen_rnn_backward_cuda_dispatch.h>
424
+ #include <ATen/ops/mish_cuda_dispatch.h>
425
+ #include <ATen/ops/mish_backward_cuda_dispatch.h>
426
+ #include <ATen/ops/mm_cuda_dispatch.h>
427
+ #include <ATen/ops/mode_cuda_dispatch.h>
428
+ #include <ATen/ops/mse_loss_cuda_dispatch.h>
429
+ #include <ATen/ops/mse_loss_backward_cuda_dispatch.h>
430
+ #include <ATen/ops/mul_cuda_dispatch.h>
431
+ #include <ATen/ops/multi_margin_loss_cuda_dispatch.h>
432
+ #include <ATen/ops/multi_margin_loss_backward_cuda_dispatch.h>
433
+ #include <ATen/ops/multilabel_margin_loss_backward_cuda_dispatch.h>
434
+ #include <ATen/ops/multilabel_margin_loss_forward_cuda_dispatch.h>
435
+ #include <ATen/ops/multinomial_cuda_dispatch.h>
436
+ #include <ATen/ops/mvlgamma_cuda_dispatch.h>
437
+ #include <ATen/ops/nan_to_num_cuda_dispatch.h>
438
+ #include <ATen/ops/nanmedian_cuda_dispatch.h>
439
+ #include <ATen/ops/nansum_cuda_dispatch.h>
440
+ #include <ATen/ops/native_batch_norm_cuda_dispatch.h>
441
+ #include <ATen/ops/native_batch_norm_backward_cuda_dispatch.h>
442
+ #include <ATen/ops/native_dropout_cuda_dispatch.h>
443
+ #include <ATen/ops/native_dropout_backward_cuda_dispatch.h>
444
+ #include <ATen/ops/native_group_norm_cuda_dispatch.h>
445
+ #include <ATen/ops/native_group_norm_backward_cuda_dispatch.h>
446
+ #include <ATen/ops/native_layer_norm_cuda_dispatch.h>
447
+ #include <ATen/ops/native_layer_norm_backward_cuda_dispatch.h>
448
+ #include <ATen/ops/ne_cuda_dispatch.h>
449
+ #include <ATen/ops/neg_cuda_dispatch.h>
450
+ #include <ATen/ops/nextafter_cuda_dispatch.h>
451
+ #include <ATen/ops/nll_loss2d_backward_cuda_dispatch.h>
452
+ #include <ATen/ops/nll_loss2d_forward_cuda_dispatch.h>
453
+ #include <ATen/ops/nll_loss_backward_cuda_dispatch.h>
454
+ #include <ATen/ops/nll_loss_forward_cuda_dispatch.h>
455
+ #include <ATen/ops/nonzero_cuda_dispatch.h>
456
+ #include <ATen/ops/norm_cuda_dispatch.h>
457
+ #include <ATen/ops/normal_cuda_dispatch.h>
458
+ #include <ATen/ops/ormqr_cuda_dispatch.h>
459
+ #include <ATen/ops/poisson_cuda_dispatch.h>
460
+ #include <ATen/ops/polar_cuda_dispatch.h>
461
+ #include <ATen/ops/polygamma_cuda_dispatch.h>
462
+ #include <ATen/ops/pow_cuda_dispatch.h>
463
+ #include <ATen/ops/prod_cuda_dispatch.h>
464
+ #include <ATen/ops/put_cuda_dispatch.h>
465
+ #include <ATen/ops/quantize_per_channel_cuda_dispatch.h>
466
+ #include <ATen/ops/quantize_per_tensor_cuda_dispatch.h>
467
+ #include <ATen/ops/quantize_per_tensor_dynamic_cuda_dispatch.h>
468
+ #include <ATen/ops/random_cuda_dispatch.h>
469
+ #include <ATen/ops/randperm_cuda_dispatch.h>
470
+ #include <ATen/ops/range_cuda_dispatch.h>
471
+ #include <ATen/ops/reciprocal_cuda_dispatch.h>
472
+ #include <ATen/ops/record_stream_cuda_dispatch.h>
473
+ #include <ATen/ops/reflection_pad1d_cuda_dispatch.h>
474
+ #include <ATen/ops/reflection_pad1d_backward_cuda_dispatch.h>
475
+ #include <ATen/ops/reflection_pad2d_cuda_dispatch.h>
476
+ #include <ATen/ops/reflection_pad2d_backward_cuda_dispatch.h>
477
+ #include <ATen/ops/reflection_pad3d_cuda_dispatch.h>
478
+ #include <ATen/ops/reflection_pad3d_backward_cuda_dispatch.h>
479
+ #include <ATen/ops/relu_cuda_dispatch.h>
480
+ #include <ATen/ops/remainder_cuda_dispatch.h>
481
+ #include <ATen/ops/renorm_cuda_dispatch.h>
482
+ #include <ATen/ops/repeat_interleave_cuda_dispatch.h>
483
+ #include <ATen/ops/replication_pad1d_cuda_dispatch.h>
484
+ #include <ATen/ops/replication_pad1d_backward_cuda_dispatch.h>
485
+ #include <ATen/ops/replication_pad2d_cuda_dispatch.h>
486
+ #include <ATen/ops/replication_pad2d_backward_cuda_dispatch.h>
487
+ #include <ATen/ops/replication_pad3d_cuda_dispatch.h>
488
+ #include <ATen/ops/replication_pad3d_backward_cuda_dispatch.h>
489
+ #include <ATen/ops/resize_cuda_dispatch.h>
490
+ #include <ATen/ops/roll_cuda_dispatch.h>
491
+ #include <ATen/ops/round_cuda_dispatch.h>
492
+ #include <ATen/ops/rrelu_with_noise_cuda_dispatch.h>
493
+ #include <ATen/ops/rshift_cuda_dispatch.h>
494
+ #include <ATen/ops/rsqrt_cuda_dispatch.h>
495
+ #include <ATen/ops/rsub_cuda_dispatch.h>
496
+ #include <ATen/ops/scatter_cuda_dispatch.h>
497
+ #include <ATen/ops/scatter_add_cuda_dispatch.h>
498
+ #include <ATen/ops/scatter_reduce_cuda_dispatch.h>
499
+ #include <ATen/ops/searchsorted_cuda_dispatch.h>
500
+ #include <ATen/ops/segment_reduce_cuda_dispatch.h>
501
+ #include <ATen/ops/set_cuda_dispatch.h>
502
+ #include <ATen/ops/sgn_cuda_dispatch.h>
503
+ #include <ATen/ops/sigmoid_cuda_dispatch.h>
504
+ #include <ATen/ops/sigmoid_backward_cuda_dispatch.h>
505
+ #include <ATen/ops/sign_cuda_dispatch.h>
506
+ #include <ATen/ops/signbit_cuda_dispatch.h>
507
+ #include <ATen/ops/silu_cuda_dispatch.h>
508
+ #include <ATen/ops/silu_backward_cuda_dispatch.h>
509
+ #include <ATen/ops/sin_cuda_dispatch.h>
510
+ #include <ATen/ops/sinc_cuda_dispatch.h>
511
+ #include <ATen/ops/sinh_cuda_dispatch.h>
512
+ #include <ATen/ops/slow_conv_dilated2d_cuda_dispatch.h>
513
+ #include <ATen/ops/slow_conv_dilated3d_cuda_dispatch.h>
514
+ #include <ATen/ops/slow_conv_transpose2d_cuda_dispatch.h>
515
+ #include <ATen/ops/slow_conv_transpose3d_cuda_dispatch.h>
516
+ #include <ATen/ops/smooth_l1_loss_cuda_dispatch.h>
517
+ #include <ATen/ops/smooth_l1_loss_backward_cuda_dispatch.h>
518
+ #include <ATen/ops/softplus_cuda_dispatch.h>
519
+ #include <ATen/ops/softplus_backward_cuda_dispatch.h>
520
+ #include <ATen/ops/softshrink_cuda_dispatch.h>
521
+ #include <ATen/ops/softshrink_backward_cuda_dispatch.h>
522
+ #include <ATen/ops/sort_cuda_dispatch.h>
523
+ #include <ATen/ops/sparse_dim_cuda_dispatch.h>
524
+ #include <ATen/ops/special_airy_ai_cuda_dispatch.h>
525
+ #include <ATen/ops/special_bessel_j0_cuda_dispatch.h>
526
+ #include <ATen/ops/special_bessel_j1_cuda_dispatch.h>
527
+ #include <ATen/ops/special_bessel_y0_cuda_dispatch.h>
528
+ #include <ATen/ops/special_bessel_y1_cuda_dispatch.h>
529
+ #include <ATen/ops/special_chebyshev_polynomial_t_cuda_dispatch.h>
530
+ #include <ATen/ops/special_chebyshev_polynomial_u_cuda_dispatch.h>
531
+ #include <ATen/ops/special_chebyshev_polynomial_v_cuda_dispatch.h>
532
+ #include <ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h>
533
+ #include <ATen/ops/special_entr_cuda_dispatch.h>
534
+ #include <ATen/ops/special_erfcx_cuda_dispatch.h>
535
+ #include <ATen/ops/special_hermite_polynomial_h_cuda_dispatch.h>
536
+ #include <ATen/ops/special_hermite_polynomial_he_cuda_dispatch.h>
537
+ #include <ATen/ops/special_i0e_cuda_dispatch.h>
538
+ #include <ATen/ops/special_i1_cuda_dispatch.h>
539
+ #include <ATen/ops/special_i1e_cuda_dispatch.h>
540
+ #include <ATen/ops/special_laguerre_polynomial_l_cuda_dispatch.h>
541
+ #include <ATen/ops/special_legendre_polynomial_p_cuda_dispatch.h>
542
+ #include <ATen/ops/special_log_ndtr_cuda_dispatch.h>
543
+ #include <ATen/ops/special_modified_bessel_i0_cuda_dispatch.h>
544
+ #include <ATen/ops/special_modified_bessel_i1_cuda_dispatch.h>
545
+ #include <ATen/ops/special_modified_bessel_k0_cuda_dispatch.h>
546
+ #include <ATen/ops/special_modified_bessel_k1_cuda_dispatch.h>
547
+ #include <ATen/ops/special_ndtri_cuda_dispatch.h>
548
+ #include <ATen/ops/special_scaled_modified_bessel_k0_cuda_dispatch.h>
549
+ #include <ATen/ops/special_scaled_modified_bessel_k1_cuda_dispatch.h>
550
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_cuda_dispatch.h>
551
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_cuda_dispatch.h>
552
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_cuda_dispatch.h>
553
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h>
554
+ #include <ATen/ops/special_spherical_bessel_j0_cuda_dispatch.h>
555
+ #include <ATen/ops/special_xlog1py_cuda_dispatch.h>
556
+ #include <ATen/ops/special_zeta_cuda_dispatch.h>
557
+ #include <ATen/ops/sqrt_cuda_dispatch.h>
558
+ #include <ATen/ops/sspaddmm_cuda_dispatch.h>
559
+ #include <ATen/ops/std_cuda_dispatch.h>
560
+ #include <ATen/ops/std_mean_cuda_dispatch.h>
561
+ #include <ATen/ops/sub_cuda_dispatch.h>
562
+ #include <ATen/ops/sum_cuda_dispatch.h>
563
+ #include <ATen/ops/take_cuda_dispatch.h>
564
+ #include <ATen/ops/tan_cuda_dispatch.h>
565
+ #include <ATen/ops/tanh_cuda_dispatch.h>
566
+ #include <ATen/ops/tanh_backward_cuda_dispatch.h>
567
+ #include <ATen/ops/threshold_cuda_dispatch.h>
568
+ #include <ATen/ops/threshold_backward_cuda_dispatch.h>
569
+ #include <ATen/ops/topk_cuda_dispatch.h>
570
+ #include <ATen/ops/trace_cuda_dispatch.h>
571
+ #include <ATen/ops/triangular_solve_cuda_dispatch.h>
572
+ #include <ATen/ops/tril_cuda_dispatch.h>
573
+ #include <ATen/ops/tril_indices_cuda_dispatch.h>
574
+ #include <ATen/ops/triu_cuda_dispatch.h>
575
+ #include <ATen/ops/triu_indices_cuda_dispatch.h>
576
+ #include <ATen/ops/trunc_cuda_dispatch.h>
577
+ #include <ATen/ops/unfold_cuda_dispatch.h>
578
+ #include <ATen/ops/unfold_backward_cuda_dispatch.h>
579
+ #include <ATen/ops/uniform_cuda_dispatch.h>
580
+ #include <ATen/ops/unique_consecutive_cuda_dispatch.h>
581
+ #include <ATen/ops/unique_dim_cuda_dispatch.h>
582
+ #include <ATen/ops/unique_dim_consecutive_cuda_dispatch.h>
583
+ #include <ATen/ops/upsample_bicubic2d_cuda_dispatch.h>
584
+ #include <ATen/ops/upsample_bicubic2d_backward_cuda_dispatch.h>
585
+ #include <ATen/ops/upsample_bilinear2d_cuda_dispatch.h>
586
+ #include <ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h>
587
+ #include <ATen/ops/upsample_linear1d_cuda_dispatch.h>
588
+ #include <ATen/ops/upsample_linear1d_backward_cuda_dispatch.h>
589
+ #include <ATen/ops/upsample_nearest1d_cuda_dispatch.h>
590
+ #include <ATen/ops/upsample_nearest1d_backward_cuda_dispatch.h>
591
+ #include <ATen/ops/upsample_nearest2d_cuda_dispatch.h>
592
+ #include <ATen/ops/upsample_nearest2d_backward_cuda_dispatch.h>
593
+ #include <ATen/ops/upsample_nearest3d_cuda_dispatch.h>
594
+ #include <ATen/ops/upsample_nearest3d_backward_cuda_dispatch.h>
595
+ #include <ATen/ops/upsample_trilinear3d_cuda_dispatch.h>
596
+ #include <ATen/ops/upsample_trilinear3d_backward_cuda_dispatch.h>
597
+ #include <ATen/ops/var_cuda_dispatch.h>
598
+ #include <ATen/ops/var_mean_cuda_dispatch.h>
599
+ #include <ATen/ops/vdot_cuda_dispatch.h>
600
+ #include <ATen/ops/view_cuda_dispatch.h>
601
+ #include <ATen/ops/view_as_complex_cuda_dispatch.h>
602
+ #include <ATen/ops/view_as_real_cuda_dispatch.h>
603
+ #include <ATen/ops/where_cuda_dispatch.h>
604
+ #include <ATen/ops/xlogy_cuda_dispatch.h>
605
+ #include <ATen/ops/zero_cuda_dispatch.h>
606
+
607
+
608
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/util/Exception.h>
2
+ #include <utility>
3
+
4
+ namespace at {
5
+
6
+ /*
7
+ [collapse dims] Updates sizes, and strides to reflect a "collapse" of
8
+ the info, possibly excluding the optional excludeDim. A "collapsed" version
9
+ of the info is the fewest dims that order the tensor's elements in the same
10
+ way as the original info. If excludeDim is specified, the collapse is the
11
+ fewest dims that order the tensor's elements as the original and preserve the
12
+ excluded dimension, unless the tensor collapses to a point.
13
+
14
+ This function returns a pair of values.
15
+
16
+ 1) The (new) index of the preserved dimension if excludeDim is
17
+ specified. 0 if the tensor is collapsed to a point. -1
18
+ otherwise.
19
+
20
+ 2) The new number of dimensions.
21
+ */
22
+ template <typename T>
23
+ inline std::pair<int64_t, int64_t> collapse_dims(
24
+ T* sizes,
25
+ T* strides,
26
+ int64_t dims,
27
+ const int excludeDim = -1) {
28
+ TORCH_CHECK(
29
+ excludeDim >= -1 && excludeDim < dims,
30
+ "expected excluded dim between -1 and dims - 1");
31
+
32
+ int64_t stopDim = (excludeDim == -1) ? dims : excludeDim;
33
+ int64_t newIndex = -1;
34
+ int64_t oldIndex = 0;
35
+ int64_t remappedExcludedDim = -1;
36
+
37
+ while (oldIndex < dims) {
38
+ // Finds a dimension to collapse into
39
+ for (; oldIndex < stopDim; ++oldIndex) {
40
+ if (sizes[oldIndex] == 1) {
41
+ continue;
42
+ }
43
+
44
+ ++newIndex;
45
+ sizes[newIndex] = sizes[oldIndex];
46
+ strides[newIndex] = strides[oldIndex];
47
+ ++oldIndex;
48
+ break;
49
+ }
50
+
51
+ // Collapses dims
52
+ for (; oldIndex < stopDim; ++oldIndex) {
53
+ if (sizes[oldIndex] == 1) {
54
+ continue;
55
+ }
56
+
57
+ if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
58
+ sizes[newIndex] *= sizes[oldIndex];
59
+ strides[newIndex] = strides[oldIndex];
60
+ } else {
61
+ ++newIndex;
62
+ sizes[newIndex] = sizes[oldIndex];
63
+ strides[newIndex] = strides[oldIndex];
64
+ }
65
+ }
66
+
67
+ // Handles excludeDim being set (oldIndex == excludeDim)
68
+ if (oldIndex != dims) {
69
+ // Preserves excluded dimension
70
+ ++newIndex;
71
+ sizes[newIndex] = sizes[oldIndex];
72
+ strides[newIndex] = strides[oldIndex];
73
+ remappedExcludedDim = newIndex;
74
+
75
+ // Restarts iteration after excludeDim
76
+ ++oldIndex;
77
+ stopDim = dims;
78
+ }
79
+ }
80
+
81
+ // Handles special case of all dims size 1
82
+ if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) {
83
+ dims = 1;
84
+ sizes[0] = 1;
85
+ strides[0] = 1;
86
+
87
+ return std::pair<int64_t, int64_t>(0, 1);
88
+ }
89
+
90
+ dims = newIndex + 1;
91
+ return std::pair<int64_t, int64_t>(remappedExcludedDim, dims);
92
+ }
93
+
94
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeexplicitautograd_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h>
20
+ #include <ATen/ops/_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
21
+ #include <ATen/ops/_adaptive_avg_pool3d_compositeexplicitautograd_dispatch.h>
22
+ #include <ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h>
23
+ #include <ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h>
24
+ #include <ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h>
25
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h>
26
+ #include <ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h>
27
+ #include <ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h>
28
+ #include <ATen/ops/_cdist_forward_compositeexplicitautograd_dispatch.h>
29
+ #include <ATen/ops/_cholesky_solve_helper_compositeexplicitautograd_dispatch.h>
30
+ #include <ATen/ops/_coalesce_compositeexplicitautograd_dispatch.h>
31
+ #include <ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h>
32
+ #include <ATen/ops/_conj_compositeexplicitautograd_dispatch.h>
33
+ #include <ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h>
34
+ #include <ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h>
35
+ #include <ATen/ops/_convolution_compositeexplicitautograd_dispatch.h>
36
+ #include <ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h>
37
+ #include <ATen/ops/_copy_from_and_resize_compositeexplicitautograd_dispatch.h>
38
+ #include <ATen/ops/_ctc_loss_compositeexplicitautograd_dispatch.h>
39
+ #include <ATen/ops/_ctc_loss_backward_compositeexplicitautograd_dispatch.h>
40
+ #include <ATen/ops/_cudnn_ctc_loss_compositeexplicitautograd_dispatch.h>
41
+ #include <ATen/ops/_cudnn_init_dropout_state_compositeexplicitautograd_dispatch.h>
42
+ #include <ATen/ops/_cudnn_rnn_compositeexplicitautograd_dispatch.h>
43
+ #include <ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h>
44
+ #include <ATen/ops/_cudnn_rnn_flatten_weight_compositeexplicitautograd_dispatch.h>
45
+ #include <ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h>
46
+ #include <ATen/ops/_efficientzerotensor_compositeexplicitautograd_dispatch.h>
47
+ #include <ATen/ops/_embedding_bag_compositeexplicitautograd_dispatch.h>
48
+ #include <ATen/ops/_embedding_bag_dense_backward_compositeexplicitautograd_dispatch.h>
49
+ #include <ATen/ops/_embedding_bag_forward_only_compositeexplicitautograd_dispatch.h>
50
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward_compositeexplicitautograd_dispatch.h>
51
+ #include <ATen/ops/_empty_affine_quantized_compositeexplicitautograd_dispatch.h>
52
+ #include <ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h>
53
+ #include <ATen/ops/_euclidean_dist_compositeexplicitautograd_dispatch.h>
54
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h>
55
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h>
56
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_compositeexplicitautograd_dispatch.h>
57
+ #include <ATen/ops/_foobar_compositeexplicitautograd_dispatch.h>
58
+ #include <ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h>
59
+ #include <ATen/ops/_foreach_acos_compositeexplicitautograd_dispatch.h>
60
+ #include <ATen/ops/_foreach_add_compositeexplicitautograd_dispatch.h>
61
+ #include <ATen/ops/_foreach_addcdiv_compositeexplicitautograd_dispatch.h>
62
+ #include <ATen/ops/_foreach_addcmul_compositeexplicitautograd_dispatch.h>
63
+ #include <ATen/ops/_foreach_asin_compositeexplicitautograd_dispatch.h>
64
+ #include <ATen/ops/_foreach_atan_compositeexplicitautograd_dispatch.h>
65
+ #include <ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h>
66
+ #include <ATen/ops/_foreach_clamp_max_compositeexplicitautograd_dispatch.h>
67
+ #include <ATen/ops/_foreach_clamp_min_compositeexplicitautograd_dispatch.h>
68
+ #include <ATen/ops/_foreach_copy_compositeexplicitautograd_dispatch.h>
69
+ #include <ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h>
70
+ #include <ATen/ops/_foreach_cosh_compositeexplicitautograd_dispatch.h>
71
+ #include <ATen/ops/_foreach_div_compositeexplicitautograd_dispatch.h>
72
+ #include <ATen/ops/_foreach_erf_compositeexplicitautograd_dispatch.h>
73
+ #include <ATen/ops/_foreach_erfc_compositeexplicitautograd_dispatch.h>
74
+ #include <ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h>
75
+ #include <ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h>
76
+ #include <ATen/ops/_foreach_floor_compositeexplicitautograd_dispatch.h>
77
+ #include <ATen/ops/_foreach_frac_compositeexplicitautograd_dispatch.h>
78
+ #include <ATen/ops/_foreach_lerp_compositeexplicitautograd_dispatch.h>
79
+ #include <ATen/ops/_foreach_lgamma_compositeexplicitautograd_dispatch.h>
80
+ #include <ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h>
81
+ #include <ATen/ops/_foreach_log10_compositeexplicitautograd_dispatch.h>
82
+ #include <ATen/ops/_foreach_log1p_compositeexplicitautograd_dispatch.h>
83
+ #include <ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h>
84
+ #include <ATen/ops/_foreach_maximum_compositeexplicitautograd_dispatch.h>
85
+ #include <ATen/ops/_foreach_minimum_compositeexplicitautograd_dispatch.h>
86
+ #include <ATen/ops/_foreach_mul_compositeexplicitautograd_dispatch.h>
87
+ #include <ATen/ops/_foreach_neg_compositeexplicitautograd_dispatch.h>
88
+ #include <ATen/ops/_foreach_norm_compositeexplicitautograd_dispatch.h>
89
+ #include <ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h>
90
+ #include <ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h>
91
+ #include <ATen/ops/_foreach_round_compositeexplicitautograd_dispatch.h>
92
+ #include <ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h>
93
+ #include <ATen/ops/_foreach_sign_compositeexplicitautograd_dispatch.h>
94
+ #include <ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h>
95
+ #include <ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h>
96
+ #include <ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h>
97
+ #include <ATen/ops/_foreach_sub_compositeexplicitautograd_dispatch.h>
98
+ #include <ATen/ops/_foreach_tan_compositeexplicitautograd_dispatch.h>
99
+ #include <ATen/ops/_foreach_tanh_compositeexplicitautograd_dispatch.h>
100
+ #include <ATen/ops/_foreach_trunc_compositeexplicitautograd_dispatch.h>
101
+ #include <ATen/ops/_foreach_zero_compositeexplicitautograd_dispatch.h>
102
+ #include <ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h>
103
+ #include <ATen/ops/_functional_sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
104
+ #include <ATen/ops/_fused_adam_compositeexplicitautograd_dispatch.h>
105
+ #include <ATen/ops/_fused_adamw_compositeexplicitautograd_dispatch.h>
106
+ #include <ATen/ops/_fused_dropout_compositeexplicitautograd_dispatch.h>
107
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper_compositeexplicitautograd_dispatch.h>
108
+ #include <ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h>
109
+ #include <ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h>
110
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h>
111
+ #include <ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h>
112
+ #include <ATen/ops/_histogramdd_bin_edges_compositeexplicitautograd_dispatch.h>
113
+ #include <ATen/ops/_histogramdd_from_bin_cts_compositeexplicitautograd_dispatch.h>
114
+ #include <ATen/ops/_histogramdd_from_bin_tensors_compositeexplicitautograd_dispatch.h>
115
+ #include <ATen/ops/_index_put_impl_compositeexplicitautograd_dispatch.h>
116
+ #include <ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h>
117
+ #include <ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h>
118
+ #include <ATen/ops/_is_any_true_compositeexplicitautograd_dispatch.h>
119
+ #include <ATen/ops/_linalg_check_errors_compositeexplicitautograd_dispatch.h>
120
+ #include <ATen/ops/_lstm_mps_compositeexplicitautograd_dispatch.h>
121
+ #include <ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h>
122
+ #include <ATen/ops/_make_dual_copy_compositeexplicitautograd_dispatch.h>
123
+ #include <ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h>
124
+ #include <ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h>
125
+ #include <ATen/ops/_masked_scale_compositeexplicitautograd_dispatch.h>
126
+ #include <ATen/ops/_masked_softmax_compositeexplicitautograd_dispatch.h>
127
+ #include <ATen/ops/_masked_softmax_backward_compositeexplicitautograd_dispatch.h>
128
+ #include <ATen/ops/_mkldnn_reshape_compositeexplicitautograd_dispatch.h>
129
+ #include <ATen/ops/_mkldnn_transpose_compositeexplicitautograd_dispatch.h>
130
+ #include <ATen/ops/_mps_convolution_compositeexplicitautograd_dispatch.h>
131
+ #include <ATen/ops/_mps_convolution_transpose_compositeexplicitautograd_dispatch.h>
132
+ #include <ATen/ops/_native_batch_norm_legit_compositeexplicitautograd_dispatch.h>
133
+ #include <ATen/ops/_native_batch_norm_legit_no_training_compositeexplicitautograd_dispatch.h>
134
+ #include <ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h>
135
+ #include <ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h>
136
+ #include <ATen/ops/_neg_view_copy_compositeexplicitautograd_dispatch.h>
137
+ #include <ATen/ops/_nested_from_padded_compositeexplicitautograd_dispatch.h>
138
+ #include <ATen/ops/_nested_from_padded_and_nested_example_compositeexplicitautograd_dispatch.h>
139
+ #include <ATen/ops/_nested_tensor_from_mask_compositeexplicitautograd_dispatch.h>
140
+ #include <ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h>
141
+ #include <ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h>
142
+ #include <ATen/ops/_nested_tensor_storage_offsets_compositeexplicitautograd_dispatch.h>
143
+ #include <ATen/ops/_nested_tensor_strides_compositeexplicitautograd_dispatch.h>
144
+ #include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h>
145
+ #include <ATen/ops/_new_zeros_with_same_feature_meta_compositeexplicitautograd_dispatch.h>
146
+ #include <ATen/ops/_nnpack_spatial_convolution_compositeexplicitautograd_dispatch.h>
147
+ #include <ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h>
148
+ #include <ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h>
149
+ #include <ATen/ops/_pdist_forward_compositeexplicitautograd_dispatch.h>
150
+ #include <ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h>
151
+ #include <ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h>
152
+ #include <ATen/ops/_reshape_copy_compositeexplicitautograd_dispatch.h>
153
+ #include <ATen/ops/_resize_output_compositeexplicitautograd_dispatch.h>
154
+ #include <ATen/ops/_sample_dirichlet_compositeexplicitautograd_dispatch.h>
155
+ #include <ATen/ops/_segment_reduce_backward_compositeexplicitautograd_dispatch.h>
156
+ #include <ATen/ops/_slow_conv2d_backward_compositeexplicitautograd_dispatch.h>
157
+ #include <ATen/ops/_sparse_addmm_compositeexplicitautograd_dispatch.h>
158
+ #include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautograd_dispatch.h>
159
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_compositeexplicitautograd_dispatch.h>
160
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_compositeexplicitautograd_dispatch.h>
161
+ #include <ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h>
162
+ #include <ATen/ops/_sparse_csr_sum_compositeexplicitautograd_dispatch.h>
163
+ #include <ATen/ops/_sparse_log_softmax_compositeexplicitautograd_dispatch.h>
164
+ #include <ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h>
165
+ #include <ATen/ops/_sparse_mask_projection_compositeexplicitautograd_dispatch.h>
166
+ #include <ATen/ops/_sparse_softmax_compositeexplicitautograd_dispatch.h>
167
+ #include <ATen/ops/_sparse_softmax_backward_data_compositeexplicitautograd_dispatch.h>
168
+ #include <ATen/ops/_sparse_sparse_matmul_compositeexplicitautograd_dispatch.h>
169
+ #include <ATen/ops/_sparse_sum_compositeexplicitautograd_dispatch.h>
170
+ #include <ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h>
171
+ #include <ATen/ops/_spdiags_compositeexplicitautograd_dispatch.h>
172
+ #include <ATen/ops/_stack_compositeexplicitautograd_dispatch.h>
173
+ #include <ATen/ops/_standard_gamma_compositeexplicitautograd_dispatch.h>
174
+ #include <ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h>
175
+ #include <ATen/ops/_test_autograd_multiple_dispatch_compositeexplicitautograd_dispatch.h>
176
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_compositeexplicitautograd_dispatch.h>
177
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h>
178
+ #include <ATen/ops/_test_functorch_fallback_compositeexplicitautograd_dispatch.h>
179
+ #include <ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h>
180
+ #include <ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h>
181
+ #include <ATen/ops/_test_optional_intlist_compositeexplicitautograd_dispatch.h>
182
+ #include <ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h>
183
+ #include <ATen/ops/_thnn_fused_gru_cell_compositeexplicitautograd_dispatch.h>
184
+ #include <ATen/ops/_thnn_fused_gru_cell_backward_compositeexplicitautograd_dispatch.h>
185
+ #include <ATen/ops/_thnn_fused_lstm_cell_compositeexplicitautograd_dispatch.h>
186
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_compositeexplicitautograd_dispatch.h>
187
+ #include <ATen/ops/_to_copy_compositeexplicitautograd_dispatch.h>
188
+ #include <ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h>
189
+ #include <ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h>
190
+ #include <ATen/ops/_to_sparse_bsc_compositeexplicitautograd_dispatch.h>
191
+ #include <ATen/ops/_to_sparse_bsr_compositeexplicitautograd_dispatch.h>
192
+ #include <ATen/ops/_to_sparse_csc_compositeexplicitautograd_dispatch.h>
193
+ #include <ATen/ops/_to_sparse_csr_compositeexplicitautograd_dispatch.h>
194
+ #include <ATen/ops/_transform_bias_rescale_qkv_compositeexplicitautograd_dispatch.h>
195
+ #include <ATen/ops/_transformer_encoder_layer_fwd_compositeexplicitautograd_dispatch.h>
196
+ #include <ATen/ops/_trilinear_compositeexplicitautograd_dispatch.h>
197
+ #include <ATen/ops/_triton_multi_head_attention_compositeexplicitautograd_dispatch.h>
198
+ #include <ATen/ops/_triton_scaled_dot_attention_compositeexplicitautograd_dispatch.h>
199
+ #include <ATen/ops/_unique_compositeexplicitautograd_dispatch.h>
200
+ #include <ATen/ops/_unique2_compositeexplicitautograd_dispatch.h>
201
+ #include <ATen/ops/_unsafe_index_compositeexplicitautograd_dispatch.h>
202
+ #include <ATen/ops/_unsafe_index_put_compositeexplicitautograd_dispatch.h>
203
+ #include <ATen/ops/_unsafe_view_compositeexplicitautograd_dispatch.h>
204
+ #include <ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h>
205
+ #include <ATen/ops/_weight_norm_interface_compositeexplicitautograd_dispatch.h>
206
+ #include <ATen/ops/_weight_norm_interface_backward_compositeexplicitautograd_dispatch.h>
207
+ #include <ATen/ops/abs_compositeexplicitautograd_dispatch.h>
208
+ #include <ATen/ops/add_compositeexplicitautograd_dispatch.h>
209
+ #include <ATen/ops/addr_compositeexplicitautograd_dispatch.h>
210
+ #include <ATen/ops/affine_grid_generator_compositeexplicitautograd_dispatch.h>
211
+ #include <ATen/ops/alias_compositeexplicitautograd_dispatch.h>
212
+ #include <ATen/ops/alias_copy_compositeexplicitautograd_dispatch.h>
213
+ #include <ATen/ops/all_compositeexplicitautograd_dispatch.h>
214
+ #include <ATen/ops/allclose_compositeexplicitautograd_dispatch.h>
215
+ #include <ATen/ops/any_compositeexplicitautograd_dispatch.h>
216
+ #include <ATen/ops/arange_compositeexplicitautograd_dispatch.h>
217
+ #include <ATen/ops/argsort_compositeexplicitautograd_dispatch.h>
218
+ #include <ATen/ops/as_strided_copy_compositeexplicitautograd_dispatch.h>
219
+ #include <ATen/ops/as_strided_scatter_compositeexplicitautograd_dispatch.h>
220
+ #include <ATen/ops/bartlett_window_compositeexplicitautograd_dispatch.h>
221
+ #include <ATen/ops/batch_norm_backward_elemt_compositeexplicitautograd_dispatch.h>
222
+ #include <ATen/ops/batch_norm_backward_reduce_compositeexplicitautograd_dispatch.h>
223
+ #include <ATen/ops/batch_norm_gather_stats_compositeexplicitautograd_dispatch.h>
224
+ #include <ATen/ops/batch_norm_gather_stats_with_counts_compositeexplicitautograd_dispatch.h>
225
+ #include <ATen/ops/batch_norm_stats_compositeexplicitautograd_dispatch.h>
226
+ #include <ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h>
227
+ #include <ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h>
228
+ #include <ATen/ops/binary_cross_entropy_with_logits_compositeexplicitautograd_dispatch.h>
229
+ #include <ATen/ops/bincount_compositeexplicitautograd_dispatch.h>
230
+ #include <ATen/ops/binomial_compositeexplicitautograd_dispatch.h>
231
+ #include <ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h>
232
+ #include <ATen/ops/bitwise_left_shift_compositeexplicitautograd_dispatch.h>
233
+ #include <ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h>
234
+ #include <ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h>
235
+ #include <ATen/ops/bitwise_xor_compositeexplicitautograd_dispatch.h>
236
+ #include <ATen/ops/blackman_window_compositeexplicitautograd_dispatch.h>
237
+ #include <ATen/ops/block_diag_compositeexplicitautograd_dispatch.h>
238
+ #include <ATen/ops/bucketize_compositeexplicitautograd_dispatch.h>
239
+ #include <ATen/ops/cauchy_compositeexplicitautograd_dispatch.h>
240
+ #include <ATen/ops/ccol_indices_compositeexplicitautograd_dispatch.h>
241
+ #include <ATen/ops/ccol_indices_copy_compositeexplicitautograd_dispatch.h>
242
+ #include <ATen/ops/celu_compositeexplicitautograd_dispatch.h>
243
+ #include <ATen/ops/channel_shuffle_compositeexplicitautograd_dispatch.h>
244
+ #include <ATen/ops/cholesky_solve_compositeexplicitautograd_dispatch.h>
245
+ #include <ATen/ops/clone_compositeexplicitautograd_dispatch.h>
246
+ #include <ATen/ops/col_indices_compositeexplicitautograd_dispatch.h>
247
+ #include <ATen/ops/col_indices_copy_compositeexplicitautograd_dispatch.h>
248
+ #include <ATen/ops/complex_compositeexplicitautograd_dispatch.h>
249
+ #include <ATen/ops/conj_physical_compositeexplicitautograd_dispatch.h>
250
+ #include <ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h>
251
+ #include <ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h>
252
+ #include <ATen/ops/conv_tbc_compositeexplicitautograd_dispatch.h>
253
+ #include <ATen/ops/convolution_compositeexplicitautograd_dispatch.h>
254
+ #include <ATen/ops/convolution_backward_compositeexplicitautograd_dispatch.h>
255
+ #include <ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h>
256
+ #include <ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h>
257
+ #include <ATen/ops/copy_compositeexplicitautograd_dispatch.h>
258
+ #include <ATen/ops/copy_sparse_to_sparse_compositeexplicitautograd_dispatch.h>
259
+ #include <ATen/ops/copysign_compositeexplicitautograd_dispatch.h>
260
+ #include <ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h>
261
+ #include <ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h>
262
+ #include <ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h>
263
+ #include <ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h>
264
+ #include <ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h>
265
+ #include <ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h>
266
+ #include <ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h>
267
+ #include <ATen/ops/cudnn_convolution_compositeexplicitautograd_dispatch.h>
268
+ #include <ATen/ops/cudnn_convolution_add_relu_compositeexplicitautograd_dispatch.h>
269
+ #include <ATen/ops/cudnn_convolution_relu_compositeexplicitautograd_dispatch.h>
270
+ #include <ATen/ops/cudnn_convolution_transpose_compositeexplicitautograd_dispatch.h>
271
+ #include <ATen/ops/cudnn_grid_sampler_compositeexplicitautograd_dispatch.h>
272
+ #include <ATen/ops/cudnn_grid_sampler_backward_compositeexplicitautograd_dispatch.h>
273
+ #include <ATen/ops/cummax_compositeexplicitautograd_dispatch.h>
274
+ #include <ATen/ops/cummin_compositeexplicitautograd_dispatch.h>
275
+ #include <ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h>
276
+ #include <ATen/ops/dequantize_compositeexplicitautograd_dispatch.h>
277
+ #include <ATen/ops/detach_compositeexplicitautograd_dispatch.h>
278
+ #include <ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h>
279
+ #include <ATen/ops/diag_embed_compositeexplicitautograd_dispatch.h>
280
+ #include <ATen/ops/diagonal_compositeexplicitautograd_dispatch.h>
281
+ #include <ATen/ops/diagonal_backward_compositeexplicitautograd_dispatch.h>
282
+ #include <ATen/ops/diagonal_copy_compositeexplicitautograd_dispatch.h>
283
+ #include <ATen/ops/diagonal_scatter_compositeexplicitautograd_dispatch.h>
284
+ #include <ATen/ops/dist_compositeexplicitautograd_dispatch.h>
285
+ #include <ATen/ops/div_compositeexplicitautograd_dispatch.h>
286
+ #include <ATen/ops/dot_compositeexplicitautograd_dispatch.h>
287
+ #include <ATen/ops/embedding_compositeexplicitautograd_dispatch.h>
288
+ #include <ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h>
289
+ #include <ATen/ops/embedding_renorm_compositeexplicitautograd_dispatch.h>
290
+ #include <ATen/ops/empty_compositeexplicitautograd_dispatch.h>
291
+ #include <ATen/ops/empty_like_compositeexplicitautograd_dispatch.h>
292
+ #include <ATen/ops/empty_permuted_compositeexplicitautograd_dispatch.h>
293
+ #include <ATen/ops/empty_quantized_compositeexplicitautograd_dispatch.h>
294
+ #include <ATen/ops/empty_strided_compositeexplicitautograd_dispatch.h>
295
+ #include <ATen/ops/expand_compositeexplicitautograd_dispatch.h>
296
+ #include <ATen/ops/expand_copy_compositeexplicitautograd_dispatch.h>
297
+ #include <ATen/ops/exponential_compositeexplicitautograd_dispatch.h>
298
+ #include <ATen/ops/eye_compositeexplicitautograd_dispatch.h>
299
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_compositeexplicitautograd_dispatch.h>
300
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h>
301
+ #include <ATen/ops/fft_fftfreq_compositeexplicitautograd_dispatch.h>
302
+ #include <ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h>
303
+ #include <ATen/ops/fill_compositeexplicitautograd_dispatch.h>
304
+ #include <ATen/ops/flip_compositeexplicitautograd_dispatch.h>
305
+ #include <ATen/ops/floor_divide_compositeexplicitautograd_dispatch.h>
306
+ #include <ATen/ops/fmod_compositeexplicitautograd_dispatch.h>
307
+ #include <ATen/ops/frexp_compositeexplicitautograd_dispatch.h>
308
+ #include <ATen/ops/from_file_compositeexplicitautograd_dispatch.h>
309
+ #include <ATen/ops/full_compositeexplicitautograd_dispatch.h>
310
+ #include <ATen/ops/full_like_compositeexplicitautograd_dispatch.h>
311
+ #include <ATen/ops/geometric_compositeexplicitautograd_dispatch.h>
312
+ #include <ATen/ops/glu_backward_jvp_compositeexplicitautograd_dispatch.h>
313
+ #include <ATen/ops/glu_jvp_compositeexplicitautograd_dispatch.h>
314
+ #include <ATen/ops/grid_sampler_2d_compositeexplicitautograd_dispatch.h>
315
+ #include <ATen/ops/grid_sampler_2d_backward_compositeexplicitautograd_dispatch.h>
316
+ #include <ATen/ops/grid_sampler_3d_compositeexplicitautograd_dispatch.h>
317
+ #include <ATen/ops/grid_sampler_3d_backward_compositeexplicitautograd_dispatch.h>
318
+ #include <ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h>
319
+ #include <ATen/ops/hann_window_compositeexplicitautograd_dispatch.h>
320
+ #include <ATen/ops/hardswish_backward_compositeexplicitautograd_dispatch.h>
321
+ #include <ATen/ops/huber_loss_backward_compositeexplicitautograd_dispatch.h>
322
+ #include <ATen/ops/index_fill_compositeexplicitautograd_dispatch.h>
323
+ #include <ATen/ops/index_put_compositeexplicitautograd_dispatch.h>
324
+ #include <ATen/ops/indices_compositeexplicitautograd_dispatch.h>
325
+ #include <ATen/ops/indices_copy_compositeexplicitautograd_dispatch.h>
326
+ #include <ATen/ops/int_repr_compositeexplicitautograd_dispatch.h>
327
+ #include <ATen/ops/is_coalesced_compositeexplicitautograd_dispatch.h>
328
+ #include <ATen/ops/is_pinned_compositeexplicitautograd_dispatch.h>
329
+ #include <ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h>
330
+ #include <ATen/ops/isinf_compositeexplicitautograd_dispatch.h>
331
+ #include <ATen/ops/isnan_compositeexplicitautograd_dispatch.h>
332
+ #include <ATen/ops/kaiser_window_compositeexplicitautograd_dispatch.h>
333
+ #include <ATen/ops/kthvalue_compositeexplicitautograd_dispatch.h>
334
+ #include <ATen/ops/lift_compositeexplicitautograd_dispatch.h>
335
+ #include <ATen/ops/lift_fresh_compositeexplicitautograd_dispatch.h>
336
+ #include <ATen/ops/lift_fresh_copy_compositeexplicitautograd_dispatch.h>
337
+ #include <ATen/ops/linalg_lstsq_compositeexplicitautograd_dispatch.h>
338
+ #include <ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h>
339
+ #include <ATen/ops/linalg_pinv_compositeexplicitautograd_dispatch.h>
340
+ #include <ATen/ops/linear_compositeexplicitautograd_dispatch.h>
341
+ #include <ATen/ops/linear_backward_compositeexplicitautograd_dispatch.h>
342
+ #include <ATen/ops/linspace_compositeexplicitautograd_dispatch.h>
343
+ #include <ATen/ops/log_normal_compositeexplicitautograd_dispatch.h>
344
+ #include <ATen/ops/log_softmax_compositeexplicitautograd_dispatch.h>
345
+ #include <ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h>
346
+ #include <ATen/ops/logical_and_compositeexplicitautograd_dispatch.h>
347
+ #include <ATen/ops/logical_not_compositeexplicitautograd_dispatch.h>
348
+ #include <ATen/ops/logical_or_compositeexplicitautograd_dispatch.h>
349
+ #include <ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h>
350
+ #include <ATen/ops/logspace_compositeexplicitautograd_dispatch.h>
351
+ #include <ATen/ops/logsumexp_compositeexplicitautograd_dispatch.h>
352
+ #include <ATen/ops/lshift_compositeexplicitautograd_dispatch.h>
353
+ #include <ATen/ops/lstm_mps_backward_compositeexplicitautograd_dispatch.h>
354
+ #include <ATen/ops/masked_fill_compositeexplicitautograd_dispatch.h>
355
+ #include <ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h>
356
+ #include <ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h>
357
+ #include <ATen/ops/matmul_backward_compositeexplicitautograd_dispatch.h>
358
+ #include <ATen/ops/max_pool2d_backward_compositeexplicitautograd_dispatch.h>
359
+ #include <ATen/ops/mean_compositeexplicitautograd_dispatch.h>
360
+ #include <ATen/ops/median_compositeexplicitautograd_dispatch.h>
361
+ #include <ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h>
362
+ #include <ATen/ops/miopen_batch_norm_backward_compositeexplicitautograd_dispatch.h>
363
+ #include <ATen/ops/miopen_convolution_compositeexplicitautograd_dispatch.h>
364
+ #include <ATen/ops/miopen_convolution_transpose_compositeexplicitautograd_dispatch.h>
365
+ #include <ATen/ops/miopen_depthwise_convolution_compositeexplicitautograd_dispatch.h>
366
+ #include <ATen/ops/miopen_rnn_compositeexplicitautograd_dispatch.h>
367
+ #include <ATen/ops/miopen_rnn_backward_compositeexplicitautograd_dispatch.h>
368
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_compositeexplicitautograd_dispatch.h>
369
+ #include <ATen/ops/mkldnn_convolution_compositeexplicitautograd_dispatch.h>
370
+ #include <ATen/ops/mkldnn_linear_compositeexplicitautograd_dispatch.h>
371
+ #include <ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h>
372
+ #include <ATen/ops/mkldnn_linear_backward_input_compositeexplicitautograd_dispatch.h>
373
+ #include <ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h>
374
+ #include <ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h>
375
+ #include <ATen/ops/mkldnn_max_pool2d_backward_compositeexplicitautograd_dispatch.h>
376
+ #include <ATen/ops/mkldnn_max_pool3d_compositeexplicitautograd_dispatch.h>
377
+ #include <ATen/ops/mkldnn_max_pool3d_backward_compositeexplicitautograd_dispatch.h>
378
+ #include <ATen/ops/mkldnn_reorder_conv2d_weight_compositeexplicitautograd_dispatch.h>
379
+ #include <ATen/ops/mkldnn_reorder_conv3d_weight_compositeexplicitautograd_dispatch.h>
380
+ #include <ATen/ops/mkldnn_rnn_layer_compositeexplicitautograd_dispatch.h>
381
+ #include <ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h>
382
+ #include <ATen/ops/mode_compositeexplicitautograd_dispatch.h>
383
+ #include <ATen/ops/mps_convolution_backward_compositeexplicitautograd_dispatch.h>
384
+ #include <ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h>
385
+ #include <ATen/ops/mul_compositeexplicitautograd_dispatch.h>
386
+ #include <ATen/ops/mv_compositeexplicitautograd_dispatch.h>
387
+ #include <ATen/ops/mvlgamma_compositeexplicitautograd_dispatch.h>
388
+ #include <ATen/ops/nan_to_num_compositeexplicitautograd_dispatch.h>
389
+ #include <ATen/ops/nanmedian_compositeexplicitautograd_dispatch.h>
390
+ #include <ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h>
391
+ #include <ATen/ops/native_dropout_compositeexplicitautograd_dispatch.h>
392
+ #include <ATen/ops/native_dropout_backward_compositeexplicitautograd_dispatch.h>
393
+ #include <ATen/ops/native_group_norm_compositeexplicitautograd_dispatch.h>
394
+ #include <ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h>
395
+ #include <ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h>
396
+ #include <ATen/ops/native_layer_norm_backward_compositeexplicitautograd_dispatch.h>
397
+ #include <ATen/ops/native_norm_compositeexplicitautograd_dispatch.h>
398
+ #include <ATen/ops/new_empty_compositeexplicitautograd_dispatch.h>
399
+ #include <ATen/ops/new_empty_strided_compositeexplicitautograd_dispatch.h>
400
+ #include <ATen/ops/new_full_compositeexplicitautograd_dispatch.h>
401
+ #include <ATen/ops/new_ones_compositeexplicitautograd_dispatch.h>
402
+ #include <ATen/ops/new_zeros_compositeexplicitautograd_dispatch.h>
403
+ #include <ATen/ops/norm_compositeexplicitautograd_dispatch.h>
404
+ #include <ATen/ops/normal_compositeexplicitautograd_dispatch.h>
405
+ #include <ATen/ops/ones_compositeexplicitautograd_dispatch.h>
406
+ #include <ATen/ops/ones_like_compositeexplicitautograd_dispatch.h>
407
+ #include <ATen/ops/permute_compositeexplicitautograd_dispatch.h>
408
+ #include <ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h>
409
+ #include <ATen/ops/pixel_shuffle_compositeexplicitautograd_dispatch.h>
410
+ #include <ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h>
411
+ #include <ATen/ops/poisson_compositeexplicitautograd_dispatch.h>
412
+ #include <ATen/ops/polar_compositeexplicitautograd_dispatch.h>
413
+ #include <ATen/ops/polygamma_compositeexplicitautograd_dispatch.h>
414
+ #include <ATen/ops/prod_compositeexplicitautograd_dispatch.h>
415
+ #include <ATen/ops/put_compositeexplicitautograd_dispatch.h>
416
+ #include <ATen/ops/q_per_channel_scales_compositeexplicitautograd_dispatch.h>
417
+ #include <ATen/ops/q_per_channel_zero_points_compositeexplicitautograd_dispatch.h>
418
+ #include <ATen/ops/quantize_per_channel_compositeexplicitautograd_dispatch.h>
419
+ #include <ATen/ops/quantize_per_tensor_compositeexplicitautograd_dispatch.h>
420
+ #include <ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h>
421
+ #include <ATen/ops/quantized_batch_norm_compositeexplicitautograd_dispatch.h>
422
+ #include <ATen/ops/quantized_max_pool1d_compositeexplicitautograd_dispatch.h>
423
+ #include <ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h>
424
+ #include <ATen/ops/quantized_max_pool3d_compositeexplicitautograd_dispatch.h>
425
+ #include <ATen/ops/rad2deg_compositeexplicitautograd_dispatch.h>
426
+ #include <ATen/ops/rand_compositeexplicitautograd_dispatch.h>
427
+ #include <ATen/ops/rand_like_compositeexplicitautograd_dispatch.h>
428
+ #include <ATen/ops/randint_compositeexplicitautograd_dispatch.h>
429
+ #include <ATen/ops/randint_like_compositeexplicitautograd_dispatch.h>
430
+ #include <ATen/ops/randn_compositeexplicitautograd_dispatch.h>
431
+ #include <ATen/ops/randn_like_compositeexplicitautograd_dispatch.h>
432
+ #include <ATen/ops/random_compositeexplicitautograd_dispatch.h>
433
+ #include <ATen/ops/randperm_compositeexplicitautograd_dispatch.h>
434
+ #include <ATen/ops/range_compositeexplicitautograd_dispatch.h>
435
+ #include <ATen/ops/relu_compositeexplicitautograd_dispatch.h>
436
+ #include <ATen/ops/remainder_compositeexplicitautograd_dispatch.h>
437
+ #include <ATen/ops/repeat_compositeexplicitautograd_dispatch.h>
438
+ #include <ATen/ops/repeat_interleave_compositeexplicitautograd_dispatch.h>
439
+ #include <ATen/ops/resize_compositeexplicitautograd_dispatch.h>
440
+ #include <ATen/ops/resize_as_compositeexplicitautograd_dispatch.h>
441
+ #include <ATen/ops/resize_as_sparse_compositeexplicitautograd_dispatch.h>
442
+ #include <ATen/ops/roll_compositeexplicitautograd_dispatch.h>
443
+ #include <ATen/ops/rot90_compositeexplicitautograd_dispatch.h>
444
+ #include <ATen/ops/row_indices_compositeexplicitautograd_dispatch.h>
445
+ #include <ATen/ops/row_indices_copy_compositeexplicitautograd_dispatch.h>
446
+ #include <ATen/ops/rrelu_with_noise_backward_compositeexplicitautograd_dispatch.h>
447
+ #include <ATen/ops/rshift_compositeexplicitautograd_dispatch.h>
448
+ #include <ATen/ops/rsub_compositeexplicitautograd_dispatch.h>
449
+ #include <ATen/ops/scalar_tensor_compositeexplicitautograd_dispatch.h>
450
+ #include <ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h>
451
+ #include <ATen/ops/select_compositeexplicitautograd_dispatch.h>
452
+ #include <ATen/ops/select_backward_compositeexplicitautograd_dispatch.h>
453
+ #include <ATen/ops/select_copy_compositeexplicitautograd_dispatch.h>
454
+ #include <ATen/ops/select_scatter_compositeexplicitautograd_dispatch.h>
455
+ #include <ATen/ops/set_compositeexplicitautograd_dispatch.h>
456
+ #include <ATen/ops/slice_compositeexplicitautograd_dispatch.h>
457
+ #include <ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h>
458
+ #include <ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h>
459
+ #include <ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h>
460
+ #include <ATen/ops/slow_conv_dilated2d_compositeexplicitautograd_dispatch.h>
461
+ #include <ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h>
462
+ #include <ATen/ops/smooth_l1_loss_backward_compositeexplicitautograd_dispatch.h>
463
+ #include <ATen/ops/soft_margin_loss_compositeexplicitautograd_dispatch.h>
464
+ #include <ATen/ops/soft_margin_loss_backward_compositeexplicitautograd_dispatch.h>
465
+ #include <ATen/ops/softmax_compositeexplicitautograd_dispatch.h>
466
+ #include <ATen/ops/sort_compositeexplicitautograd_dispatch.h>
467
+ #include <ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h>
468
+ #include <ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h>
469
+ #include <ATen/ops/sparse_mask_compositeexplicitautograd_dispatch.h>
470
+ #include <ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h>
471
+ #include <ATen/ops/sparse_resize_and_clear_compositeexplicitautograd_dispatch.h>
472
+ #include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
473
+ #include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
474
+ #include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
475
+ #include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
476
+ #include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h>
477
+ #include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautograd_dispatch.h>
478
+ #include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautograd_dispatch.h>
479
+ #include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h>
480
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h>
481
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h>
482
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautograd_dispatch.h>
483
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautograd_dispatch.h>
484
+ #include <ATen/ops/special_xlog1py_compositeexplicitautograd_dispatch.h>
485
+ #include <ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h>
486
+ #include <ATen/ops/split_compositeexplicitautograd_dispatch.h>
487
+ #include <ATen/ops/split_copy_compositeexplicitautograd_dispatch.h>
488
+ #include <ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h>
489
+ #include <ATen/ops/split_with_sizes_copy_compositeexplicitautograd_dispatch.h>
490
+ #include <ATen/ops/squeeze_compositeexplicitautograd_dispatch.h>
491
+ #include <ATen/ops/squeeze_copy_compositeexplicitautograd_dispatch.h>
492
+ #include <ATen/ops/stack_compositeexplicitautograd_dispatch.h>
493
+ #include <ATen/ops/std_mean_compositeexplicitautograd_dispatch.h>
494
+ #include <ATen/ops/sub_compositeexplicitautograd_dispatch.h>
495
+ #include <ATen/ops/sum_compositeexplicitautograd_dispatch.h>
496
+ #include <ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h>
497
+ #include <ATen/ops/sym_constrain_range_for_size_compositeexplicitautograd_dispatch.h>
498
+ #include <ATen/ops/t_compositeexplicitautograd_dispatch.h>
499
+ #include <ATen/ops/t_copy_compositeexplicitautograd_dispatch.h>
500
+ #include <ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h>
501
+ #include <ATen/ops/to_padded_tensor_compositeexplicitautograd_dispatch.h>
502
+ #include <ATen/ops/trace_compositeexplicitautograd_dispatch.h>
503
+ #include <ATen/ops/transpose_compositeexplicitautograd_dispatch.h>
504
+ #include <ATen/ops/transpose_copy_compositeexplicitautograd_dispatch.h>
505
+ #include <ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h>
506
+ #include <ATen/ops/triu_indices_compositeexplicitautograd_dispatch.h>
507
+ #include <ATen/ops/unbind_compositeexplicitautograd_dispatch.h>
508
+ #include <ATen/ops/unbind_copy_compositeexplicitautograd_dispatch.h>
509
+ #include <ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h>
510
+ #include <ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h>
511
+ #include <ATen/ops/uniform_compositeexplicitautograd_dispatch.h>
512
+ #include <ATen/ops/unique_consecutive_compositeexplicitautograd_dispatch.h>
513
+ #include <ATen/ops/unique_dim_compositeexplicitautograd_dispatch.h>
514
+ #include <ATen/ops/unique_dim_consecutive_compositeexplicitautograd_dispatch.h>
515
+ #include <ATen/ops/unsafe_split_compositeexplicitautograd_dispatch.h>
516
+ #include <ATen/ops/unsafe_split_with_sizes_compositeexplicitautograd_dispatch.h>
517
+ #include <ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h>
518
+ #include <ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h>
519
+ #include <ATen/ops/values_compositeexplicitautograd_dispatch.h>
520
+ #include <ATen/ops/values_copy_compositeexplicitautograd_dispatch.h>
521
+ #include <ATen/ops/var_mean_compositeexplicitautograd_dispatch.h>
522
+ #include <ATen/ops/vdot_compositeexplicitautograd_dispatch.h>
523
+ #include <ATen/ops/view_compositeexplicitautograd_dispatch.h>
524
+ #include <ATen/ops/view_as_complex_copy_compositeexplicitautograd_dispatch.h>
525
+ #include <ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h>
526
+ #include <ATen/ops/view_copy_compositeexplicitautograd_dispatch.h>
527
+ #include <ATen/ops/xlogy_compositeexplicitautograd_dispatch.h>
528
+ #include <ATen/ops/zero_compositeexplicitautograd_dispatch.h>
529
+ #include <ATen/ops/zeros_compositeexplicitautograd_dispatch.h>
530
+ #include <ATen/ops/zeros_like_compositeexplicitautograd_dispatch.h>
531
+
532
+
533
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeexplicitautogradnonfunctional_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_addmm_activation_compositeexplicitautogradnonfunctional_dispatch.h>
20
+ #include <ATen/ops/_conj_copy_compositeexplicitautogradnonfunctional_dispatch.h>
21
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h>
22
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_compositeexplicitautogradnonfunctional_dispatch.h>
23
+ #include <ATen/ops/_fw_primal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
24
+ #include <ATen/ops/_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
25
+ #include <ATen/ops/_linalg_det_compositeexplicitautogradnonfunctional_dispatch.h>
26
+ #include <ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h>
27
+ #include <ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h>
28
+ #include <ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h>
29
+ #include <ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h>
30
+ #include <ATen/ops/_log_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
31
+ #include <ATen/ops/_log_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
32
+ #include <ATen/ops/_make_dual_copy_compositeexplicitautogradnonfunctional_dispatch.h>
33
+ #include <ATen/ops/_neg_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
34
+ #include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h>
35
+ #include <ATen/ops/_reshape_alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
36
+ #include <ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
37
+ #include <ATen/ops/_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
38
+ #include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h>
39
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
40
+ #include <ATen/ops/_trilinear_compositeexplicitautogradnonfunctional_dispatch.h>
41
+ #include <ATen/ops/_upsample_bicubic2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
42
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
43
+ #include <ATen/ops/_upsample_bilinear2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
44
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
45
+ #include <ATen/ops/_upsample_nearest_exact1d_compositeexplicitautogradnonfunctional_dispatch.h>
46
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
47
+ #include <ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h>
48
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
49
+ #include <ATen/ops/_upsample_nearest_exact3d_compositeexplicitautogradnonfunctional_dispatch.h>
50
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
51
+ #include <ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
52
+ #include <ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h>
53
+ #include <ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h>
54
+ #include <ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
55
+ #include <ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
56
+ #include <ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
57
+ #include <ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
58
+ #include <ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h>
59
+ #include <ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h>
60
+ #include <ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h>
61
+ #include <ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h>
62
+ #include <ATen/ops/addmv_compositeexplicitautogradnonfunctional_dispatch.h>
63
+ #include <ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
64
+ #include <ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h>
65
+ #include <ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h>
66
+ #include <ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h>
67
+ #include <ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h>
68
+ #include <ATen/ops/any_compositeexplicitautogradnonfunctional_dispatch.h>
69
+ #include <ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h>
70
+ #include <ATen/ops/argmin_compositeexplicitautogradnonfunctional_dispatch.h>
71
+ #include <ATen/ops/as_strided_compositeexplicitautogradnonfunctional_dispatch.h>
72
+ #include <ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h>
73
+ #include <ATen/ops/as_strided_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
74
+ #include <ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h>
75
+ #include <ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h>
76
+ #include <ATen/ops/atan_compositeexplicitautogradnonfunctional_dispatch.h>
77
+ #include <ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h>
78
+ #include <ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h>
79
+ #include <ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
80
+ #include <ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
81
+ #include <ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
82
+ #include <ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
83
+ #include <ATen/ops/baddbmm_compositeexplicitautogradnonfunctional_dispatch.h>
84
+ #include <ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h>
85
+ #include <ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h>
86
+ #include <ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h>
87
+ #include <ATen/ops/bitwise_not_compositeexplicitautogradnonfunctional_dispatch.h>
88
+ #include <ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h>
89
+ #include <ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h>
90
+ #include <ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h>
91
+ #include <ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h>
92
+ #include <ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h>
93
+ #include <ATen/ops/ccol_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
94
+ #include <ATen/ops/ceil_compositeexplicitautogradnonfunctional_dispatch.h>
95
+ #include <ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h>
96
+ #include <ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h>
97
+ #include <ATen/ops/clamp_min_compositeexplicitautogradnonfunctional_dispatch.h>
98
+ #include <ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
99
+ #include <ATen/ops/copy_compositeexplicitautogradnonfunctional_dispatch.h>
100
+ #include <ATen/ops/copysign_compositeexplicitautogradnonfunctional_dispatch.h>
101
+ #include <ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h>
102
+ #include <ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h>
103
+ #include <ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
104
+ #include <ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h>
105
+ #include <ATen/ops/cumsum_compositeexplicitautogradnonfunctional_dispatch.h>
106
+ #include <ATen/ops/detach_copy_compositeexplicitautogradnonfunctional_dispatch.h>
107
+ #include <ATen/ops/diag_embed_compositeexplicitautogradnonfunctional_dispatch.h>
108
+ #include <ATen/ops/diagonal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
109
+ #include <ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
110
+ #include <ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h>
111
+ #include <ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h>
112
+ #include <ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h>
113
+ #include <ATen/ops/elu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
114
+ #include <ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h>
115
+ #include <ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h>
116
+ #include <ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h>
117
+ #include <ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h>
118
+ #include <ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h>
119
+ #include <ATen/ops/exp2_compositeexplicitautogradnonfunctional_dispatch.h>
120
+ #include <ATen/ops/expand_copy_compositeexplicitautogradnonfunctional_dispatch.h>
121
+ #include <ATen/ops/expm1_compositeexplicitautogradnonfunctional_dispatch.h>
122
+ #include <ATen/ops/floor_compositeexplicitautogradnonfunctional_dispatch.h>
123
+ #include <ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h>
124
+ #include <ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h>
125
+ #include <ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h>
126
+ #include <ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h>
127
+ #include <ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
128
+ #include <ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
129
+ #include <ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
130
+ #include <ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h>
131
+ #include <ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h>
132
+ #include <ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h>
133
+ #include <ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h>
134
+ #include <ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
135
+ #include <ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h>
136
+ #include <ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h>
137
+ #include <ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h>
138
+ #include <ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
139
+ #include <ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
140
+ #include <ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
141
+ #include <ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h>
142
+ #include <ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h>
143
+ #include <ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h>
144
+ #include <ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h>
145
+ #include <ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h>
146
+ #include <ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h>
147
+ #include <ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h>
148
+ #include <ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h>
149
+ #include <ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
150
+ #include <ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
151
+ #include <ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h>
152
+ #include <ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h>
153
+ #include <ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h>
154
+ #include <ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h>
155
+ #include <ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h>
156
+ #include <ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h>
157
+ #include <ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
158
+ #include <ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h>
159
+ #include <ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h>
160
+ #include <ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h>
161
+ #include <ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h>
162
+ #include <ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h>
163
+ #include <ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h>
164
+ #include <ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
165
+ #include <ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h>
166
+ #include <ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h>
167
+ #include <ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
168
+ #include <ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h>
169
+ #include <ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h>
170
+ #include <ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h>
171
+ #include <ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h>
172
+ #include <ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h>
173
+ #include <ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h>
174
+ #include <ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h>
175
+ #include <ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h>
176
+ #include <ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h>
177
+ #include <ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h>
178
+ #include <ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h>
179
+ #include <ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h>
180
+ #include <ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h>
181
+ #include <ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h>
182
+ #include <ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h>
183
+ #include <ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h>
184
+ #include <ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h>
185
+ #include <ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h>
186
+ #include <ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h>
187
+ #include <ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h>
188
+ #include <ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h>
189
+ #include <ATen/ops/mish_compositeexplicitautogradnonfunctional_dispatch.h>
190
+ #include <ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h>
191
+ #include <ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h>
192
+ #include <ATen/ops/mul_compositeexplicitautogradnonfunctional_dispatch.h>
193
+ #include <ATen/ops/narrow_copy_compositeexplicitautogradnonfunctional_dispatch.h>
194
+ #include <ATen/ops/ne_compositeexplicitautogradnonfunctional_dispatch.h>
195
+ #include <ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h>
196
+ #include <ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h>
197
+ #include <ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h>
198
+ #include <ATen/ops/nll_loss_backward_compositeexplicitautogradnonfunctional_dispatch.h>
199
+ #include <ATen/ops/nll_loss_forward_compositeexplicitautogradnonfunctional_dispatch.h>
200
+ #include <ATen/ops/norm_compositeexplicitautogradnonfunctional_dispatch.h>
201
+ #include <ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h>
202
+ #include <ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h>
203
+ #include <ATen/ops/pixel_unshuffle_compositeexplicitautogradnonfunctional_dispatch.h>
204
+ #include <ATen/ops/polygamma_compositeexplicitautogradnonfunctional_dispatch.h>
205
+ #include <ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h>
206
+ #include <ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h>
207
+ #include <ATen/ops/reciprocal_compositeexplicitautogradnonfunctional_dispatch.h>
208
+ #include <ATen/ops/reflection_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
209
+ #include <ATen/ops/reflection_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
210
+ #include <ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
211
+ #include <ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
212
+ #include <ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h>
213
+ #include <ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h>
214
+ #include <ATen/ops/replication_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
215
+ #include <ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
216
+ #include <ATen/ops/replication_pad2d_compositeexplicitautogradnonfunctional_dispatch.h>
217
+ #include <ATen/ops/replication_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
218
+ #include <ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h>
219
+ #include <ATen/ops/row_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
220
+ #include <ATen/ops/rsqrt_compositeexplicitautogradnonfunctional_dispatch.h>
221
+ #include <ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h>
222
+ #include <ATen/ops/scatter_add_compositeexplicitautogradnonfunctional_dispatch.h>
223
+ #include <ATen/ops/scatter_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
224
+ #include <ATen/ops/select_backward_compositeexplicitautogradnonfunctional_dispatch.h>
225
+ #include <ATen/ops/select_copy_compositeexplicitautogradnonfunctional_dispatch.h>
226
+ #include <ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
227
+ #include <ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h>
228
+ #include <ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
229
+ #include <ATen/ops/sigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
230
+ #include <ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h>
231
+ #include <ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h>
232
+ #include <ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h>
233
+ #include <ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
234
+ #include <ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h>
235
+ #include <ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h>
236
+ #include <ATen/ops/sinh_compositeexplicitautogradnonfunctional_dispatch.h>
237
+ #include <ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h>
238
+ #include <ATen/ops/slice_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
239
+ #include <ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h>
240
+ #include <ATen/ops/smooth_l1_loss_compositeexplicitautogradnonfunctional_dispatch.h>
241
+ #include <ATen/ops/softplus_compositeexplicitautogradnonfunctional_dispatch.h>
242
+ #include <ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h>
243
+ #include <ATen/ops/softshrink_compositeexplicitautogradnonfunctional_dispatch.h>
244
+ #include <ATen/ops/softshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
245
+ #include <ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h>
246
+ #include <ATen/ops/special_airy_ai_compositeexplicitautogradnonfunctional_dispatch.h>
247
+ #include <ATen/ops/special_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
248
+ #include <ATen/ops/special_bessel_j1_compositeexplicitautogradnonfunctional_dispatch.h>
249
+ #include <ATen/ops/special_bessel_y0_compositeexplicitautogradnonfunctional_dispatch.h>
250
+ #include <ATen/ops/special_bessel_y1_compositeexplicitautogradnonfunctional_dispatch.h>
251
+ #include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
252
+ #include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
253
+ #include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
254
+ #include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
255
+ #include <ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h>
256
+ #include <ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h>
257
+ #include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h>
258
+ #include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautogradnonfunctional_dispatch.h>
259
+ #include <ATen/ops/special_i0e_compositeexplicitautogradnonfunctional_dispatch.h>
260
+ #include <ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h>
261
+ #include <ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h>
262
+ #include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautogradnonfunctional_dispatch.h>
263
+ #include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautogradnonfunctional_dispatch.h>
264
+ #include <ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h>
265
+ #include <ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h>
266
+ #include <ATen/ops/special_modified_bessel_i1_compositeexplicitautogradnonfunctional_dispatch.h>
267
+ #include <ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
268
+ #include <ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
269
+ #include <ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h>
270
+ #include <ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
271
+ #include <ATen/ops/special_scaled_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
272
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
273
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
274
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
275
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
276
+ #include <ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
277
+ #include <ATen/ops/special_xlog1py_compositeexplicitautogradnonfunctional_dispatch.h>
278
+ #include <ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h>
279
+ #include <ATen/ops/split_copy_compositeexplicitautogradnonfunctional_dispatch.h>
280
+ #include <ATen/ops/split_with_sizes_copy_compositeexplicitautogradnonfunctional_dispatch.h>
281
+ #include <ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h>
282
+ #include <ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
283
+ #include <ATen/ops/sub_compositeexplicitautogradnonfunctional_dispatch.h>
284
+ #include <ATen/ops/sum_compositeexplicitautogradnonfunctional_dispatch.h>
285
+ #include <ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h>
286
+ #include <ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h>
287
+ #include <ATen/ops/tanh_compositeexplicitautogradnonfunctional_dispatch.h>
288
+ #include <ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h>
289
+ #include <ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h>
290
+ #include <ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h>
291
+ #include <ATen/ops/topk_compositeexplicitautogradnonfunctional_dispatch.h>
292
+ #include <ATen/ops/transpose_copy_compositeexplicitautogradnonfunctional_dispatch.h>
293
+ #include <ATen/ops/triangular_solve_compositeexplicitautogradnonfunctional_dispatch.h>
294
+ #include <ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h>
295
+ #include <ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h>
296
+ #include <ATen/ops/trunc_compositeexplicitautogradnonfunctional_dispatch.h>
297
+ #include <ATen/ops/unbind_copy_compositeexplicitautogradnonfunctional_dispatch.h>
298
+ #include <ATen/ops/unfold_copy_compositeexplicitautogradnonfunctional_dispatch.h>
299
+ #include <ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
300
+ #include <ATen/ops/upsample_bicubic2d_compositeexplicitautogradnonfunctional_dispatch.h>
301
+ #include <ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
302
+ #include <ATen/ops/upsample_bilinear2d_compositeexplicitautogradnonfunctional_dispatch.h>
303
+ #include <ATen/ops/upsample_bilinear2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
304
+ #include <ATen/ops/upsample_linear1d_compositeexplicitautogradnonfunctional_dispatch.h>
305
+ #include <ATen/ops/upsample_linear1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
306
+ #include <ATen/ops/upsample_nearest1d_compositeexplicitautogradnonfunctional_dispatch.h>
307
+ #include <ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
308
+ #include <ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h>
309
+ #include <ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
310
+ #include <ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h>
311
+ #include <ATen/ops/upsample_nearest3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
312
+ #include <ATen/ops/upsample_trilinear3d_compositeexplicitautogradnonfunctional_dispatch.h>
313
+ #include <ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
314
+ #include <ATen/ops/values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
315
+ #include <ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h>
316
+ #include <ATen/ops/view_as_real_copy_compositeexplicitautogradnonfunctional_dispatch.h>
317
+ #include <ATen/ops/view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
318
+ #include <ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h>
319
+
320
+
321
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeimplicitautograd_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_add_batch_dim_compositeimplicitautograd_dispatch.h>
20
+ #include <ATen/ops/_assert_tensor_metadata_compositeimplicitautograd_dispatch.h>
21
+ #include <ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h>
22
+ #include <ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h>
23
+ #include <ATen/ops/_backward_compositeimplicitautograd_dispatch.h>
24
+ #include <ATen/ops/_batch_norm_impl_index_compositeimplicitautograd_dispatch.h>
25
+ #include <ATen/ops/_batch_norm_impl_index_backward_compositeimplicitautograd_dispatch.h>
26
+ #include <ATen/ops/_cast_Byte_compositeimplicitautograd_dispatch.h>
27
+ #include <ATen/ops/_cast_Char_compositeimplicitautograd_dispatch.h>
28
+ #include <ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h>
29
+ #include <ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h>
30
+ #include <ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h>
31
+ #include <ATen/ops/_cast_Int_compositeimplicitautograd_dispatch.h>
32
+ #include <ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h>
33
+ #include <ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h>
34
+ #include <ATen/ops/_choose_qparams_per_tensor_compositeimplicitautograd_dispatch.h>
35
+ #include <ATen/ops/_convolution_compositeimplicitautograd_dispatch.h>
36
+ #include <ATen/ops/_convolution_double_backward_compositeimplicitautograd_dispatch.h>
37
+ #include <ATen/ops/_convolution_mode_compositeimplicitautograd_dispatch.h>
38
+ #include <ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h>
39
+ #include <ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
40
+ #include <ATen/ops/_cufft_get_plan_cache_size_compositeimplicitautograd_dispatch.h>
41
+ #include <ATen/ops/_cufft_set_plan_cache_max_size_compositeimplicitautograd_dispatch.h>
42
+ #include <ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h>
43
+ #include <ATen/ops/_dim_arange_compositeimplicitautograd_dispatch.h>
44
+ #include <ATen/ops/_embedding_bag_backward_compositeimplicitautograd_dispatch.h>
45
+ #include <ATen/ops/_embedding_bag_sparse_backward_compositeimplicitautograd_dispatch.h>
46
+ #include <ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h>
47
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h>
48
+ #include <ATen/ops/_has_compatible_shallow_copy_type_compositeimplicitautograd_dispatch.h>
49
+ #include <ATen/ops/_is_zerotensor_compositeimplicitautograd_dispatch.h>
50
+ #include <ATen/ops/_lu_with_info_compositeimplicitautograd_dispatch.h>
51
+ #include <ATen/ops/_nnpack_available_compositeimplicitautograd_dispatch.h>
52
+ #include <ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h>
53
+ #include <ATen/ops/_pad_circular_compositeimplicitautograd_dispatch.h>
54
+ #include <ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h>
55
+ #include <ATen/ops/_pad_packed_sequence_compositeimplicitautograd_dispatch.h>
56
+ #include <ATen/ops/_propagate_xla_data_compositeimplicitautograd_dispatch.h>
57
+ #include <ATen/ops/_remove_batch_dim_compositeimplicitautograd_dispatch.h>
58
+ #include <ATen/ops/_reshape_from_tensor_compositeimplicitautograd_dispatch.h>
59
+ #include <ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h>
60
+ #include <ATen/ops/_saturate_weight_to_fp16_compositeimplicitautograd_dispatch.h>
61
+ #include <ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h>
62
+ #include <ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h>
63
+ #include <ATen/ops/_sobol_engine_draw_compositeimplicitautograd_dispatch.h>
64
+ #include <ATen/ops/_sobol_engine_ff_compositeimplicitautograd_dispatch.h>
65
+ #include <ATen/ops/_sobol_engine_initialize_state_compositeimplicitautograd_dispatch.h>
66
+ #include <ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h>
67
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
68
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
69
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe_compositeimplicitautograd_dispatch.h>
70
+ #include <ATen/ops/_sparse_coo_tensor_unsafe_compositeimplicitautograd_dispatch.h>
71
+ #include <ATen/ops/_sparse_csc_tensor_unsafe_compositeimplicitautograd_dispatch.h>
72
+ #include <ATen/ops/_sparse_csr_tensor_unsafe_compositeimplicitautograd_dispatch.h>
73
+ #include <ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h>
74
+ #include <ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h>
75
+ #include <ATen/ops/_sparse_softmax_compositeimplicitautograd_dispatch.h>
76
+ #include <ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h>
77
+ #include <ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h>
78
+ #include <ATen/ops/_test_autograd_multiple_dispatch_compositeimplicitautograd_dispatch.h>
79
+ #include <ATen/ops/_test_check_tensor_compositeimplicitautograd_dispatch.h>
80
+ #include <ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h>
81
+ #include <ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h>
82
+ #include <ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h>
83
+ #include <ATen/ops/_thnn_differentiable_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
84
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_compositeimplicitautograd_dispatch.h>
85
+ #include <ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h>
86
+ #include <ATen/ops/_unpack_dual_compositeimplicitautograd_dispatch.h>
87
+ #include <ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h>
88
+ #include <ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h>
89
+ #include <ATen/ops/_upsample_nearest_exact1d_compositeimplicitautograd_dispatch.h>
90
+ #include <ATen/ops/_upsample_nearest_exact2d_compositeimplicitautograd_dispatch.h>
91
+ #include <ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h>
92
+ #include <ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h>
93
+ #include <ATen/ops/_validate_sparse_bsc_tensor_args_compositeimplicitautograd_dispatch.h>
94
+ #include <ATen/ops/_validate_sparse_bsr_tensor_args_compositeimplicitautograd_dispatch.h>
95
+ #include <ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h>
96
+ #include <ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h>
97
+ #include <ATen/ops/_validate_sparse_csc_tensor_args_compositeimplicitautograd_dispatch.h>
98
+ #include <ATen/ops/_validate_sparse_csr_tensor_args_compositeimplicitautograd_dispatch.h>
99
+ #include <ATen/ops/_version_compositeimplicitautograd_dispatch.h>
100
+ #include <ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h>
101
+ #include <ATen/ops/_weight_norm_differentiable_backward_compositeimplicitautograd_dispatch.h>
102
+ #include <ATen/ops/absolute_compositeimplicitautograd_dispatch.h>
103
+ #include <ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h>
104
+ #include <ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h>
105
+ #include <ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h>
106
+ #include <ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h>
107
+ #include <ATen/ops/adjoint_compositeimplicitautograd_dispatch.h>
108
+ #include <ATen/ops/affine_grid_generator_backward_compositeimplicitautograd_dispatch.h>
109
+ #include <ATen/ops/align_as_compositeimplicitautograd_dispatch.h>
110
+ #include <ATen/ops/align_tensors_compositeimplicitautograd_dispatch.h>
111
+ #include <ATen/ops/align_to_compositeimplicitautograd_dispatch.h>
112
+ #include <ATen/ops/all_compositeimplicitautograd_dispatch.h>
113
+ #include <ATen/ops/alpha_dropout_compositeimplicitautograd_dispatch.h>
114
+ #include <ATen/ops/and_compositeimplicitautograd_dispatch.h>
115
+ #include <ATen/ops/any_compositeimplicitautograd_dispatch.h>
116
+ #include <ATen/ops/arccos_compositeimplicitautograd_dispatch.h>
117
+ #include <ATen/ops/arccosh_compositeimplicitautograd_dispatch.h>
118
+ #include <ATen/ops/arcsin_compositeimplicitautograd_dispatch.h>
119
+ #include <ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h>
120
+ #include <ATen/ops/arctan_compositeimplicitautograd_dispatch.h>
121
+ #include <ATen/ops/arctan2_compositeimplicitautograd_dispatch.h>
122
+ #include <ATen/ops/arctanh_compositeimplicitautograd_dispatch.h>
123
+ #include <ATen/ops/argsort_compositeimplicitautograd_dispatch.h>
124
+ #include <ATen/ops/argwhere_compositeimplicitautograd_dispatch.h>
125
+ #include <ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h>
126
+ #include <ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h>
127
+ #include <ATen/ops/atleast_3d_compositeimplicitautograd_dispatch.h>
128
+ #include <ATen/ops/avg_pool1d_compositeimplicitautograd_dispatch.h>
129
+ #include <ATen/ops/batch_norm_compositeimplicitautograd_dispatch.h>
130
+ #include <ATen/ops/bilinear_compositeimplicitautograd_dispatch.h>
131
+ #include <ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h>
132
+ #include <ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h>
133
+ #include <ATen/ops/can_cast_compositeimplicitautograd_dispatch.h>
134
+ #include <ATen/ops/cartesian_prod_compositeimplicitautograd_dispatch.h>
135
+ #include <ATen/ops/cat_compositeimplicitautograd_dispatch.h>
136
+ #include <ATen/ops/cdist_compositeimplicitautograd_dispatch.h>
137
+ #include <ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h>
138
+ #include <ATen/ops/chalf_compositeimplicitautograd_dispatch.h>
139
+ #include <ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h>
140
+ #include <ATen/ops/chunk_compositeimplicitautograd_dispatch.h>
141
+ #include <ATen/ops/clip_compositeimplicitautograd_dispatch.h>
142
+ #include <ATen/ops/coalesce_compositeimplicitautograd_dispatch.h>
143
+ #include <ATen/ops/column_stack_compositeimplicitautograd_dispatch.h>
144
+ #include <ATen/ops/combinations_compositeimplicitautograd_dispatch.h>
145
+ #include <ATen/ops/concat_compositeimplicitautograd_dispatch.h>
146
+ #include <ATen/ops/concatenate_compositeimplicitautograd_dispatch.h>
147
+ #include <ATen/ops/conj_compositeimplicitautograd_dispatch.h>
148
+ #include <ATen/ops/conj_physical_compositeimplicitautograd_dispatch.h>
149
+ #include <ATen/ops/contiguous_compositeimplicitautograd_dispatch.h>
150
+ #include <ATen/ops/conv1d_compositeimplicitautograd_dispatch.h>
151
+ #include <ATen/ops/conv2d_compositeimplicitautograd_dispatch.h>
152
+ #include <ATen/ops/conv3d_compositeimplicitautograd_dispatch.h>
153
+ #include <ATen/ops/conv_tbc_backward_compositeimplicitautograd_dispatch.h>
154
+ #include <ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h>
155
+ #include <ATen/ops/conv_transpose2d_compositeimplicitautograd_dispatch.h>
156
+ #include <ATen/ops/conv_transpose3d_compositeimplicitautograd_dispatch.h>
157
+ #include <ATen/ops/corrcoef_compositeimplicitautograd_dispatch.h>
158
+ #include <ATen/ops/cosine_embedding_loss_compositeimplicitautograd_dispatch.h>
159
+ #include <ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h>
160
+ #include <ATen/ops/cov_compositeimplicitautograd_dispatch.h>
161
+ #include <ATen/ops/cross_compositeimplicitautograd_dispatch.h>
162
+ #include <ATen/ops/cross_entropy_loss_compositeimplicitautograd_dispatch.h>
163
+ #include <ATen/ops/ctc_loss_compositeimplicitautograd_dispatch.h>
164
+ #include <ATen/ops/cudnn_is_acceptable_compositeimplicitautograd_dispatch.h>
165
+ #include <ATen/ops/cummax_compositeimplicitautograd_dispatch.h>
166
+ #include <ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h>
167
+ #include <ATen/ops/cummin_compositeimplicitautograd_dispatch.h>
168
+ #include <ATen/ops/cumprod_compositeimplicitautograd_dispatch.h>
169
+ #include <ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h>
170
+ #include <ATen/ops/cumsum_compositeimplicitautograd_dispatch.h>
171
+ #include <ATen/ops/cumulative_trapezoid_compositeimplicitautograd_dispatch.h>
172
+ #include <ATen/ops/data_compositeimplicitautograd_dispatch.h>
173
+ #include <ATen/ops/det_compositeimplicitautograd_dispatch.h>
174
+ #include <ATen/ops/diag_compositeimplicitautograd_dispatch.h>
175
+ #include <ATen/ops/diagflat_compositeimplicitautograd_dispatch.h>
176
+ #include <ATen/ops/diagonal_compositeimplicitautograd_dispatch.h>
177
+ #include <ATen/ops/diff_compositeimplicitautograd_dispatch.h>
178
+ #include <ATen/ops/divide_compositeimplicitautograd_dispatch.h>
179
+ #include <ATen/ops/dropout_compositeimplicitautograd_dispatch.h>
180
+ #include <ATen/ops/dsplit_compositeimplicitautograd_dispatch.h>
181
+ #include <ATen/ops/dstack_compositeimplicitautograd_dispatch.h>
182
+ #include <ATen/ops/einsum_compositeimplicitautograd_dispatch.h>
183
+ #include <ATen/ops/embedding_backward_compositeimplicitautograd_dispatch.h>
184
+ #include <ATen/ops/embedding_bag_compositeimplicitautograd_dispatch.h>
185
+ #include <ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h>
186
+ #include <ATen/ops/empty_compositeimplicitautograd_dispatch.h>
187
+ #include <ATen/ops/expand_as_compositeimplicitautograd_dispatch.h>
188
+ #include <ATen/ops/fake_quantize_per_channel_affine_compositeimplicitautograd_dispatch.h>
189
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
190
+ #include <ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h>
191
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_compositeimplicitautograd_dispatch.h>
192
+ #include <ATen/ops/fbgemm_linear_fp16_weight_compositeimplicitautograd_dispatch.h>
193
+ #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
194
+ #include <ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h>
195
+ #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h>
196
+ #include <ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h>
197
+ #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_compositeimplicitautograd_dispatch.h>
198
+ #include <ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h>
199
+ #include <ATen/ops/feature_alpha_dropout_compositeimplicitautograd_dispatch.h>
200
+ #include <ATen/ops/feature_dropout_compositeimplicitautograd_dispatch.h>
201
+ #include <ATen/ops/fft_fft_compositeimplicitautograd_dispatch.h>
202
+ #include <ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h>
203
+ #include <ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h>
204
+ #include <ATen/ops/fft_fftshift_compositeimplicitautograd_dispatch.h>
205
+ #include <ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h>
206
+ #include <ATen/ops/fft_hfft2_compositeimplicitautograd_dispatch.h>
207
+ #include <ATen/ops/fft_hfftn_compositeimplicitautograd_dispatch.h>
208
+ #include <ATen/ops/fft_ifft_compositeimplicitautograd_dispatch.h>
209
+ #include <ATen/ops/fft_ifft2_compositeimplicitautograd_dispatch.h>
210
+ #include <ATen/ops/fft_ifftn_compositeimplicitautograd_dispatch.h>
211
+ #include <ATen/ops/fft_ifftshift_compositeimplicitautograd_dispatch.h>
212
+ #include <ATen/ops/fft_ihfft_compositeimplicitautograd_dispatch.h>
213
+ #include <ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h>
214
+ #include <ATen/ops/fft_ihfftn_compositeimplicitautograd_dispatch.h>
215
+ #include <ATen/ops/fft_irfft_compositeimplicitautograd_dispatch.h>
216
+ #include <ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h>
217
+ #include <ATen/ops/fft_irfftn_compositeimplicitautograd_dispatch.h>
218
+ #include <ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h>
219
+ #include <ATen/ops/fft_rfft2_compositeimplicitautograd_dispatch.h>
220
+ #include <ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h>
221
+ #include <ATen/ops/fill_diagonal_compositeimplicitautograd_dispatch.h>
222
+ #include <ATen/ops/fix_compositeimplicitautograd_dispatch.h>
223
+ #include <ATen/ops/flatten_compositeimplicitautograd_dispatch.h>
224
+ #include <ATen/ops/flatten_dense_tensors_compositeimplicitautograd_dispatch.h>
225
+ #include <ATen/ops/fliplr_compositeimplicitautograd_dispatch.h>
226
+ #include <ATen/ops/flipud_compositeimplicitautograd_dispatch.h>
227
+ #include <ATen/ops/float_power_compositeimplicitautograd_dispatch.h>
228
+ #include <ATen/ops/frobenius_norm_compositeimplicitautograd_dispatch.h>
229
+ #include <ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h>
230
+ #include <ATen/ops/gather_compositeimplicitautograd_dispatch.h>
231
+ #include <ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h>
232
+ #include <ATen/ops/ger_compositeimplicitautograd_dispatch.h>
233
+ #include <ATen/ops/gradient_compositeimplicitautograd_dispatch.h>
234
+ #include <ATen/ops/greater_compositeimplicitautograd_dispatch.h>
235
+ #include <ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h>
236
+ #include <ATen/ops/grid_sampler_compositeimplicitautograd_dispatch.h>
237
+ #include <ATen/ops/group_norm_compositeimplicitautograd_dispatch.h>
238
+ #include <ATen/ops/gru_compositeimplicitautograd_dispatch.h>
239
+ #include <ATen/ops/gru_cell_compositeimplicitautograd_dispatch.h>
240
+ #include <ATen/ops/hinge_embedding_loss_compositeimplicitautograd_dispatch.h>
241
+ #include <ATen/ops/histogramdd_compositeimplicitautograd_dispatch.h>
242
+ #include <ATen/ops/hsplit_compositeimplicitautograd_dispatch.h>
243
+ #include <ATen/ops/hstack_compositeimplicitautograd_dispatch.h>
244
+ #include <ATen/ops/imag_compositeimplicitautograd_dispatch.h>
245
+ #include <ATen/ops/index_add_compositeimplicitautograd_dispatch.h>
246
+ #include <ATen/ops/index_copy_compositeimplicitautograd_dispatch.h>
247
+ #include <ATen/ops/index_fill_compositeimplicitautograd_dispatch.h>
248
+ #include <ATen/ops/index_select_compositeimplicitautograd_dispatch.h>
249
+ #include <ATen/ops/index_select_backward_compositeimplicitautograd_dispatch.h>
250
+ #include <ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h>
251
+ #include <ATen/ops/inner_compositeimplicitautograd_dispatch.h>
252
+ #include <ATen/ops/instance_norm_compositeimplicitautograd_dispatch.h>
253
+ #include <ATen/ops/inverse_compositeimplicitautograd_dispatch.h>
254
+ #include <ATen/ops/is_complex_compositeimplicitautograd_dispatch.h>
255
+ #include <ATen/ops/is_conj_compositeimplicitautograd_dispatch.h>
256
+ #include <ATen/ops/is_distributed_compositeimplicitautograd_dispatch.h>
257
+ #include <ATen/ops/is_floating_point_compositeimplicitautograd_dispatch.h>
258
+ #include <ATen/ops/is_inference_compositeimplicitautograd_dispatch.h>
259
+ #include <ATen/ops/is_leaf_compositeimplicitautograd_dispatch.h>
260
+ #include <ATen/ops/is_neg_compositeimplicitautograd_dispatch.h>
261
+ #include <ATen/ops/is_nonzero_compositeimplicitautograd_dispatch.h>
262
+ #include <ATen/ops/is_signed_compositeimplicitautograd_dispatch.h>
263
+ #include <ATen/ops/is_vulkan_available_compositeimplicitautograd_dispatch.h>
264
+ #include <ATen/ops/isclose_compositeimplicitautograd_dispatch.h>
265
+ #include <ATen/ops/isfinite_compositeimplicitautograd_dispatch.h>
266
+ #include <ATen/ops/isreal_compositeimplicitautograd_dispatch.h>
267
+ #include <ATen/ops/istft_compositeimplicitautograd_dispatch.h>
268
+ #include <ATen/ops/item_compositeimplicitautograd_dispatch.h>
269
+ #include <ATen/ops/kl_div_compositeimplicitautograd_dispatch.h>
270
+ #include <ATen/ops/kron_compositeimplicitautograd_dispatch.h>
271
+ #include <ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h>
272
+ #include <ATen/ops/l1_loss_compositeimplicitautograd_dispatch.h>
273
+ #include <ATen/ops/layer_norm_compositeimplicitautograd_dispatch.h>
274
+ #include <ATen/ops/ldexp_compositeimplicitautograd_dispatch.h>
275
+ #include <ATen/ops/less_compositeimplicitautograd_dispatch.h>
276
+ #include <ATen/ops/less_equal_compositeimplicitautograd_dispatch.h>
277
+ #include <ATen/ops/linalg_cholesky_compositeimplicitautograd_dispatch.h>
278
+ #include <ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h>
279
+ #include <ATen/ops/linalg_det_compositeimplicitautograd_dispatch.h>
280
+ #include <ATen/ops/linalg_diagonal_compositeimplicitautograd_dispatch.h>
281
+ #include <ATen/ops/linalg_eigh_compositeimplicitautograd_dispatch.h>
282
+ #include <ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h>
283
+ #include <ATen/ops/linalg_eigvalsh_compositeimplicitautograd_dispatch.h>
284
+ #include <ATen/ops/linalg_inv_compositeimplicitautograd_dispatch.h>
285
+ #include <ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h>
286
+ #include <ATen/ops/linalg_lu_factor_compositeimplicitautograd_dispatch.h>
287
+ #include <ATen/ops/linalg_matmul_compositeimplicitautograd_dispatch.h>
288
+ #include <ATen/ops/linalg_matrix_norm_compositeimplicitautograd_dispatch.h>
289
+ #include <ATen/ops/linalg_matrix_power_compositeimplicitautograd_dispatch.h>
290
+ #include <ATen/ops/linalg_matrix_rank_compositeimplicitautograd_dispatch.h>
291
+ #include <ATen/ops/linalg_multi_dot_compositeimplicitautograd_dispatch.h>
292
+ #include <ATen/ops/linalg_norm_compositeimplicitautograd_dispatch.h>
293
+ #include <ATen/ops/linalg_pinv_compositeimplicitautograd_dispatch.h>
294
+ #include <ATen/ops/linalg_slogdet_compositeimplicitautograd_dispatch.h>
295
+ #include <ATen/ops/linalg_solve_compositeimplicitautograd_dispatch.h>
296
+ #include <ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h>
297
+ #include <ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h>
298
+ #include <ATen/ops/linalg_svdvals_compositeimplicitautograd_dispatch.h>
299
+ #include <ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h>
300
+ #include <ATen/ops/linalg_tensorsolve_compositeimplicitautograd_dispatch.h>
301
+ #include <ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h>
302
+ #include <ATen/ops/linalg_vecdot_compositeimplicitautograd_dispatch.h>
303
+ #include <ATen/ops/linear_compositeimplicitautograd_dispatch.h>
304
+ #include <ATen/ops/log_sigmoid_compositeimplicitautograd_dispatch.h>
305
+ #include <ATen/ops/log_softmax_compositeimplicitautograd_dispatch.h>
306
+ #include <ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h>
307
+ #include <ATen/ops/logdet_compositeimplicitautograd_dispatch.h>
308
+ #include <ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h>
309
+ #include <ATen/ops/lstm_compositeimplicitautograd_dispatch.h>
310
+ #include <ATen/ops/lstm_cell_compositeimplicitautograd_dispatch.h>
311
+ #include <ATen/ops/lu_solve_compositeimplicitautograd_dispatch.h>
312
+ #include <ATen/ops/mH_compositeimplicitautograd_dispatch.h>
313
+ #include <ATen/ops/mT_compositeimplicitautograd_dispatch.h>
314
+ #include <ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h>
315
+ #include <ATen/ops/masked_select_backward_compositeimplicitautograd_dispatch.h>
316
+ #include <ATen/ops/matmul_compositeimplicitautograd_dispatch.h>
317
+ #include <ATen/ops/matrix_H_compositeimplicitautograd_dispatch.h>
318
+ #include <ATen/ops/matrix_exp_compositeimplicitautograd_dispatch.h>
319
+ #include <ATen/ops/matrix_exp_backward_compositeimplicitautograd_dispatch.h>
320
+ #include <ATen/ops/matrix_power_compositeimplicitautograd_dispatch.h>
321
+ #include <ATen/ops/max_compositeimplicitautograd_dispatch.h>
322
+ #include <ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h>
323
+ #include <ATen/ops/max_pool1d_with_indices_compositeimplicitautograd_dispatch.h>
324
+ #include <ATen/ops/max_pool2d_compositeimplicitautograd_dispatch.h>
325
+ #include <ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h>
326
+ #include <ATen/ops/mean_compositeimplicitautograd_dispatch.h>
327
+ #include <ATen/ops/median_compositeimplicitautograd_dispatch.h>
328
+ #include <ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h>
329
+ #include <ATen/ops/min_compositeimplicitautograd_dispatch.h>
330
+ #include <ATen/ops/mish_backward_compositeimplicitautograd_dispatch.h>
331
+ #include <ATen/ops/mode_compositeimplicitautograd_dispatch.h>
332
+ #include <ATen/ops/moveaxis_compositeimplicitautograd_dispatch.h>
333
+ #include <ATen/ops/movedim_compositeimplicitautograd_dispatch.h>
334
+ #include <ATen/ops/msort_compositeimplicitautograd_dispatch.h>
335
+ #include <ATen/ops/multilabel_margin_loss_compositeimplicitautograd_dispatch.h>
336
+ #include <ATen/ops/multiply_compositeimplicitautograd_dispatch.h>
337
+ #include <ATen/ops/nanmean_compositeimplicitautograd_dispatch.h>
338
+ #include <ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h>
339
+ #include <ATen/ops/nanquantile_compositeimplicitautograd_dispatch.h>
340
+ #include <ATen/ops/narrow_compositeimplicitautograd_dispatch.h>
341
+ #include <ATen/ops/native_channel_shuffle_compositeimplicitautograd_dispatch.h>
342
+ #include <ATen/ops/negative_compositeimplicitautograd_dispatch.h>
343
+ #include <ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h>
344
+ #include <ATen/ops/nll_loss_compositeimplicitautograd_dispatch.h>
345
+ #include <ATen/ops/nll_loss2d_compositeimplicitautograd_dispatch.h>
346
+ #include <ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h>
347
+ #include <ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h>
348
+ #include <ATen/ops/norm_compositeimplicitautograd_dispatch.h>
349
+ #include <ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h>
350
+ #include <ATen/ops/not_equal_compositeimplicitautograd_dispatch.h>
351
+ #include <ATen/ops/nuclear_norm_compositeimplicitautograd_dispatch.h>
352
+ #include <ATen/ops/numpy_T_compositeimplicitautograd_dispatch.h>
353
+ #include <ATen/ops/one_hot_compositeimplicitautograd_dispatch.h>
354
+ #include <ATen/ops/or_compositeimplicitautograd_dispatch.h>
355
+ #include <ATen/ops/orgqr_compositeimplicitautograd_dispatch.h>
356
+ #include <ATen/ops/outer_compositeimplicitautograd_dispatch.h>
357
+ #include <ATen/ops/output_nr_compositeimplicitautograd_dispatch.h>
358
+ #include <ATen/ops/pad_compositeimplicitautograd_dispatch.h>
359
+ #include <ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h>
360
+ #include <ATen/ops/pairwise_distance_compositeimplicitautograd_dispatch.h>
361
+ #include <ATen/ops/pdist_compositeimplicitautograd_dispatch.h>
362
+ #include <ATen/ops/pin_memory_compositeimplicitautograd_dispatch.h>
363
+ #include <ATen/ops/pinverse_compositeimplicitautograd_dispatch.h>
364
+ #include <ATen/ops/poisson_nll_loss_compositeimplicitautograd_dispatch.h>
365
+ #include <ATen/ops/positive_compositeimplicitautograd_dispatch.h>
366
+ #include <ATen/ops/prelu_compositeimplicitautograd_dispatch.h>
367
+ #include <ATen/ops/prod_compositeimplicitautograd_dispatch.h>
368
+ #include <ATen/ops/promote_types_compositeimplicitautograd_dispatch.h>
369
+ #include <ATen/ops/qr_compositeimplicitautograd_dispatch.h>
370
+ #include <ATen/ops/quantile_compositeimplicitautograd_dispatch.h>
371
+ #include <ATen/ops/quantized_gru_cell_compositeimplicitautograd_dispatch.h>
372
+ #include <ATen/ops/quantized_lstm_cell_compositeimplicitautograd_dispatch.h>
373
+ #include <ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h>
374
+ #include <ATen/ops/quantized_rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
375
+ #include <ATen/ops/rand_compositeimplicitautograd_dispatch.h>
376
+ #include <ATen/ops/randn_compositeimplicitautograd_dispatch.h>
377
+ #include <ATen/ops/ravel_compositeimplicitautograd_dispatch.h>
378
+ #include <ATen/ops/real_compositeimplicitautograd_dispatch.h>
379
+ #include <ATen/ops/refine_names_compositeimplicitautograd_dispatch.h>
380
+ #include <ATen/ops/relu6_compositeimplicitautograd_dispatch.h>
381
+ #include <ATen/ops/rename_compositeimplicitautograd_dispatch.h>
382
+ #include <ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h>
383
+ #include <ATen/ops/requires_grad_compositeimplicitautograd_dispatch.h>
384
+ #include <ATen/ops/reshape_compositeimplicitautograd_dispatch.h>
385
+ #include <ATen/ops/reshape_as_compositeimplicitautograd_dispatch.h>
386
+ #include <ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h>
387
+ #include <ATen/ops/resolve_neg_compositeimplicitautograd_dispatch.h>
388
+ #include <ATen/ops/result_type_compositeimplicitautograd_dispatch.h>
389
+ #include <ATen/ops/retain_grad_compositeimplicitautograd_dispatch.h>
390
+ #include <ATen/ops/retains_grad_compositeimplicitautograd_dispatch.h>
391
+ #include <ATen/ops/rnn_relu_compositeimplicitautograd_dispatch.h>
392
+ #include <ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h>
393
+ #include <ATen/ops/rnn_tanh_compositeimplicitautograd_dispatch.h>
394
+ #include <ATen/ops/rnn_tanh_cell_compositeimplicitautograd_dispatch.h>
395
+ #include <ATen/ops/row_stack_compositeimplicitautograd_dispatch.h>
396
+ #include <ATen/ops/rrelu_compositeimplicitautograd_dispatch.h>
397
+ #include <ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h>
398
+ #include <ATen/ops/scatter_compositeimplicitautograd_dispatch.h>
399
+ #include <ATen/ops/scatter_add_compositeimplicitautograd_dispatch.h>
400
+ #include <ATen/ops/select_compositeimplicitautograd_dispatch.h>
401
+ #include <ATen/ops/selu_compositeimplicitautograd_dispatch.h>
402
+ #include <ATen/ops/set_compositeimplicitautograd_dispatch.h>
403
+ #include <ATen/ops/set_data_compositeimplicitautograd_dispatch.h>
404
+ #include <ATen/ops/silu_backward_compositeimplicitautograd_dispatch.h>
405
+ #include <ATen/ops/size_compositeimplicitautograd_dispatch.h>
406
+ #include <ATen/ops/slogdet_compositeimplicitautograd_dispatch.h>
407
+ #include <ATen/ops/slow_conv3d_compositeimplicitautograd_dispatch.h>
408
+ #include <ATen/ops/smm_compositeimplicitautograd_dispatch.h>
409
+ #include <ATen/ops/softmax_compositeimplicitautograd_dispatch.h>
410
+ #include <ATen/ops/sort_compositeimplicitautograd_dispatch.h>
411
+ #include <ATen/ops/sparse_bsc_tensor_compositeimplicitautograd_dispatch.h>
412
+ #include <ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h>
413
+ #include <ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h>
414
+ #include <ATen/ops/sparse_csc_tensor_compositeimplicitautograd_dispatch.h>
415
+ #include <ATen/ops/sparse_csr_tensor_compositeimplicitautograd_dispatch.h>
416
+ #include <ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h>
417
+ #include <ATen/ops/special_erf_compositeimplicitautograd_dispatch.h>
418
+ #include <ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h>
419
+ #include <ATen/ops/special_erfinv_compositeimplicitautograd_dispatch.h>
420
+ #include <ATen/ops/special_exp2_compositeimplicitautograd_dispatch.h>
421
+ #include <ATen/ops/special_expit_compositeimplicitautograd_dispatch.h>
422
+ #include <ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h>
423
+ #include <ATen/ops/special_gammainc_compositeimplicitautograd_dispatch.h>
424
+ #include <ATen/ops/special_gammaincc_compositeimplicitautograd_dispatch.h>
425
+ #include <ATen/ops/special_gammaln_compositeimplicitautograd_dispatch.h>
426
+ #include <ATen/ops/special_i0_compositeimplicitautograd_dispatch.h>
427
+ #include <ATen/ops/special_log1p_compositeimplicitautograd_dispatch.h>
428
+ #include <ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h>
429
+ #include <ATen/ops/special_logit_compositeimplicitautograd_dispatch.h>
430
+ #include <ATen/ops/special_logsumexp_compositeimplicitautograd_dispatch.h>
431
+ #include <ATen/ops/special_multigammaln_compositeimplicitautograd_dispatch.h>
432
+ #include <ATen/ops/special_ndtr_compositeimplicitautograd_dispatch.h>
433
+ #include <ATen/ops/special_polygamma_compositeimplicitautograd_dispatch.h>
434
+ #include <ATen/ops/special_psi_compositeimplicitautograd_dispatch.h>
435
+ #include <ATen/ops/special_round_compositeimplicitautograd_dispatch.h>
436
+ #include <ATen/ops/special_sinc_compositeimplicitautograd_dispatch.h>
437
+ #include <ATen/ops/special_softmax_compositeimplicitautograd_dispatch.h>
438
+ #include <ATen/ops/special_xlogy_compositeimplicitautograd_dispatch.h>
439
+ #include <ATen/ops/split_compositeimplicitautograd_dispatch.h>
440
+ #include <ATen/ops/square_compositeimplicitautograd_dispatch.h>
441
+ #include <ATen/ops/squeeze_compositeimplicitautograd_dispatch.h>
442
+ #include <ATen/ops/sspaddmm_compositeimplicitautograd_dispatch.h>
443
+ #include <ATen/ops/std_compositeimplicitautograd_dispatch.h>
444
+ #include <ATen/ops/std_mean_compositeimplicitautograd_dispatch.h>
445
+ #include <ATen/ops/stft_compositeimplicitautograd_dispatch.h>
446
+ #include <ATen/ops/stride_compositeimplicitautograd_dispatch.h>
447
+ #include <ATen/ops/subtract_compositeimplicitautograd_dispatch.h>
448
+ #include <ATen/ops/sum_compositeimplicitautograd_dispatch.h>
449
+ #include <ATen/ops/sum_to_size_compositeimplicitautograd_dispatch.h>
450
+ #include <ATen/ops/svd_compositeimplicitautograd_dispatch.h>
451
+ #include <ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h>
452
+ #include <ATen/ops/swapdims_compositeimplicitautograd_dispatch.h>
453
+ #include <ATen/ops/sym_numel_compositeimplicitautograd_dispatch.h>
454
+ #include <ATen/ops/sym_size_compositeimplicitautograd_dispatch.h>
455
+ #include <ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h>
456
+ #include <ATen/ops/sym_stride_compositeimplicitautograd_dispatch.h>
457
+ #include <ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h>
458
+ #include <ATen/ops/tensor_split_compositeimplicitautograd_dispatch.h>
459
+ #include <ATen/ops/tensordot_compositeimplicitautograd_dispatch.h>
460
+ #include <ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h>
461
+ #include <ATen/ops/tile_compositeimplicitautograd_dispatch.h>
462
+ #include <ATen/ops/to_compositeimplicitautograd_dispatch.h>
463
+ #include <ATen/ops/to_dense_compositeimplicitautograd_dispatch.h>
464
+ #include <ATen/ops/to_dense_backward_compositeimplicitautograd_dispatch.h>
465
+ #include <ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h>
466
+ #include <ATen/ops/to_sparse_compositeimplicitautograd_dispatch.h>
467
+ #include <ATen/ops/to_sparse_bsc_compositeimplicitautograd_dispatch.h>
468
+ #include <ATen/ops/to_sparse_bsr_compositeimplicitautograd_dispatch.h>
469
+ #include <ATen/ops/to_sparse_csc_compositeimplicitautograd_dispatch.h>
470
+ #include <ATen/ops/to_sparse_csr_compositeimplicitautograd_dispatch.h>
471
+ #include <ATen/ops/trace_backward_compositeimplicitautograd_dispatch.h>
472
+ #include <ATen/ops/transpose_compositeimplicitautograd_dispatch.h>
473
+ #include <ATen/ops/trapezoid_compositeimplicitautograd_dispatch.h>
474
+ #include <ATen/ops/trapz_compositeimplicitautograd_dispatch.h>
475
+ #include <ATen/ops/triplet_margin_loss_compositeimplicitautograd_dispatch.h>
476
+ #include <ATen/ops/true_divide_compositeimplicitautograd_dispatch.h>
477
+ #include <ATen/ops/type_as_compositeimplicitautograd_dispatch.h>
478
+ #include <ATen/ops/unbind_compositeimplicitautograd_dispatch.h>
479
+ #include <ATen/ops/unflatten_compositeimplicitautograd_dispatch.h>
480
+ #include <ATen/ops/unflatten_dense_tensors_compositeimplicitautograd_dispatch.h>
481
+ #include <ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h>
482
+ #include <ATen/ops/upsample_bicubic2d_compositeimplicitautograd_dispatch.h>
483
+ #include <ATen/ops/upsample_bilinear2d_compositeimplicitautograd_dispatch.h>
484
+ #include <ATen/ops/upsample_linear1d_compositeimplicitautograd_dispatch.h>
485
+ #include <ATen/ops/upsample_nearest1d_compositeimplicitautograd_dispatch.h>
486
+ #include <ATen/ops/upsample_nearest2d_compositeimplicitautograd_dispatch.h>
487
+ #include <ATen/ops/upsample_nearest3d_compositeimplicitautograd_dispatch.h>
488
+ #include <ATen/ops/upsample_trilinear3d_compositeimplicitautograd_dispatch.h>
489
+ #include <ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h>
490
+ #include <ATen/ops/vander_compositeimplicitautograd_dispatch.h>
491
+ #include <ATen/ops/var_compositeimplicitautograd_dispatch.h>
492
+ #include <ATen/ops/var_mean_compositeimplicitautograd_dispatch.h>
493
+ #include <ATen/ops/view_as_compositeimplicitautograd_dispatch.h>
494
+ #include <ATen/ops/vsplit_compositeimplicitautograd_dispatch.h>
495
+ #include <ATen/ops/vstack_compositeimplicitautograd_dispatch.h>
496
+ #include <ATen/ops/where_compositeimplicitautograd_dispatch.h>
497
+ #include <ATen/ops/xor_compositeimplicitautograd_dispatch.h>
498
+
499
+
500
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeimplicitautogradnestedtensor_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/randn_like_compositeimplicitautogradnestedtensor_dispatch.h>
20
+ #include <ATen/ops/reshape_compositeimplicitautogradnestedtensor_dispatch.h>
21
+ #include <ATen/ops/reshape_as_compositeimplicitautogradnestedtensor_dispatch.h>
22
+ #include <ATen/ops/zeros_like_compositeimplicitautogradnestedtensor_dispatch.h>
23
+
24
+
25
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/Tensor.h>
5
+ #include <ATen/dlpack.h>
6
+
7
+ // this convertor will:
8
+ // 1) take a Tensor object and wrap it in the DLPack tensor
9
+ // 2) take a dlpack tensor and convert it to the ATen Tensor
10
+
11
+ namespace at {
12
+
13
+ TORCH_API ScalarType toScalarType(const DLDataType& dtype);
14
+ TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
15
+ TORCH_API Tensor fromDLPack(const DLManagedTensor* src);
16
+ TORCH_API Tensor
17
+ fromDLPack(const DLManagedTensor* src, std::function<void(void*)> deleter);
18
+ TORCH_API DLDataType getDLDataType(const Tensor& t);
19
+ TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id);
20
+
21
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Device.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/Device.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Dimname.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <ATen/core/Dimname.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Dispatch.h ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/DeprecatedTypeProperties.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/util/Metaprogramming.h>
8
+ #include <c10/util/complex.h>
9
+ #include <c10/util/string_view.h>
10
+
11
+ #ifdef __CUDACC__
12
+ #include <cuda.h> // For CUDA_VERSION
13
+ #endif
14
+
15
+ #ifdef TEMPLATE_SELECTIVE_BUILD
16
+ #include <ATen/selected_mobile_ops.h>
17
+ #else
18
+ namespace at {
19
+ /**
20
+ * The method should_include_kernel_dtype() returns true/false
21
+ * based on whether the switching code for a specific dtype should be
22
+ * included based on build time constants generated from tracing model
23
+ * execution. This method will be implmeneted via code-generation and
24
+ * included in this file when code-gen is ready.
25
+ */
26
+ inline constexpr bool should_include_kernel_dtype(
27
+ const char* /*kernel_tag_str*/,
28
+ at::ScalarType /*scalar_type*/
29
+ ) {
30
+ return true;
31
+ }
32
+ } // namespace at
33
+ #endif
34
+
35
+ /**
36
+ * In the Facebook internal build (using BUCK), this macro is enabled by
37
+ * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer
38
+ * binary.
39
+ */
40
+ #if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE
41
+ namespace at {
42
+ namespace detail {
43
+ TORCH_API void record_kernel_function_dtype(std::string name);
44
+ }
45
+ } // namespace at
46
+
47
+ #define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) \
48
+ at::detail::record_kernel_function_dtype( \
49
+ std::string(NAME) + "$" + toString(enum_type));
50
+ #else
51
+ #define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type)
52
+ #endif
53
+
54
+ #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) \
55
+ do { \
56
+ if constexpr (!at::should_include_kernel_dtype( \
57
+ at_dispatch_name, enum_type)) { \
58
+ AT_ERROR( \
59
+ "dtype '", \
60
+ toString(enum_type), \
61
+ "' not selected for kernel tag ", \
62
+ at_dispatch_name); \
63
+ } \
64
+ } while (0)
65
+
66
+ #define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
67
+ case enum_type: { \
68
+ AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
69
+ using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
70
+ return __VA_ARGS__(); \
71
+ }
72
+
73
+ #define AT_DISPATCH_CASE(enum_type, ...) \
74
+ AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
75
+
76
+ #define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \
77
+ case enum_type: { \
78
+ AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
79
+ using scalar_t = scalar_type; \
80
+ using underlying_t C10_UNUSED = typename scalar_t::underlying; \
81
+ const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
82
+ const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
83
+ return __VA_ARGS__(); \
84
+ }
85
+
86
+ #define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
87
+ enum_type, scalar_type, bitwidth, qmin, qmax, ...) \
88
+ case enum_type: { \
89
+ AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
90
+ using scalar_t = scalar_type; \
91
+ using underlying_t C10_UNUSED = typename scalar_t::underlying; \
92
+ const auto& SCALAR_TYPE C10_UNUSED = enum_type; \
93
+ const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \
94
+ C10_UNUSED int bit_width = bitwidth; \
95
+ C10_UNUSED int64_t quant_min = qmin; \
96
+ C10_UNUSED int64_t quant_max = qmax; \
97
+ return __VA_ARGS__(); \
98
+ }
99
+
100
+ namespace detail {
101
+
102
+ inline at::ScalarType scalar_type(at::ScalarType s) {
103
+ return s;
104
+ }
105
+
106
+ C10_DEPRECATED_MESSAGE(
107
+ "passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, "
108
+ "pass an at::ScalarType instead")
109
+ inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) {
110
+ return t.scalarType();
111
+ }
112
+
113
+ C10_DEPRECATED_MESSAGE(
114
+ "AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, "
115
+ "use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")
116
+ inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {}
117
+
118
+ C10_DEPRECATED_MESSAGE(
119
+ "AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, "
120
+ "use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) "
121
+ "instead")
122
+ inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
123
+
124
+ } // namespace detail
125
+
126
+ // The AT_DISPATCH_* family of macros provides the ability to
127
+ // conveniently generate specializations of a kernel over all of the
128
+ // dtypes we care about in PyTorch. We call it "dispatch" because
129
+ // we are "dispatching" to the correct, dtype-specific kernel.
130
+ //
131
+ // A standard usage looks like:
132
+ //
133
+ // AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] {
134
+ // // Your code here, with 'scalar_t' now defined to
135
+ // // be the dtype in question
136
+ // });
137
+ //
138
+ // There are many variations of this macro, so it's important to
139
+ // understand exactly /which/ dtypes you want to get instantiated, as
140
+ // well as what the "default" set is.
141
+ //
142
+ // The default set of dtypes that are instantiated (e.g., by
143
+ // AT_DISPATCH_ALL_TYPES) are floating point types (float, double),
144
+ // and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t),
145
+ // but NOT booleans (bool), half-precision floats (Half) or
146
+ // complex number (c10::complex<float>, c10::complex<double>).
147
+ // This "cut" is somewhat historical (the default types are the
148
+ // ones that TH historically supported), but it also reflects the
149
+ // fact that the non-default types are "poorly" behaved (booleans
150
+ // are NOT integers mod 2, half precision operations ~essentially
151
+ // don't exist on CPU, complex numbers are an experimental application).
152
+ //
153
+ // Here are the questions you should generally ask to decide which
154
+ // dispatch you want:
155
+ //
156
+ // 1. Is this an integral or floating point specific operation?
157
+ // (If so, you'll want one of the FLOATING or INTEGRAL macros.)
158
+ //
159
+ // 2. Should half be supported? (If you're on CPU, the answer is almost
160
+ // definitely no. If you do want support, use one of the AND_HALF
161
+ // macros)
162
+ //
163
+ // Much rarer situations:
164
+ //
165
+ // 3. Should bool be supported? (You often have to write your kernel
166
+ // differently if arithmetic operations are involved.) If so,
167
+ // Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool
168
+ //
169
+ // 4. Should complex be supported? The answer is almost always no,
170
+ // unless you are working on "generic" code that should work on
171
+ // all dtypes.
172
+ //
173
+ // Parameters:
174
+ // -----------
175
+ //
176
+ // 1. The NAME argument is a "tag" that is used to trace and then
177
+ // conditionally compile fragments of the case statements such
178
+ // that the kernel functions are specialized only for the dtypes
179
+ // that are needed. The NAME parameter *must* be a build time
180
+ // const char* (can't be std::string, etc...)
181
+ //
182
+ // Please ensure that the NAME is unique for every implementation
183
+ // or you run the risk of over-including code for the kernel
184
+ // functions. There is no risk of missing out on any code, so
185
+ // it's mostly a risk of a Type-2 error, and not a Type-1 error.
186
+ //
187
+ // Switch-like syntax:
188
+ // -------------------
189
+ // There is also a switch-case like syntax which is useful if a kernel
190
+ // needs to be specialized for particular scalar types
191
+ //
192
+ // AT_DISPATCH_SWITCH(self.scalar_type(), "op_name",
193
+ // AT_DISPATCH_CASE_INTEGRAL_TYPES([&] {
194
+ // op_integral<scalar_t>(iter);
195
+ // })
196
+ // AT_DISPATCH_CASE_FLOATING_TYPES([&] {
197
+ // op_floating<scalar_t>(iter);
198
+ // })
199
+ // AT_DISPATCH_CASE(kBool, [&] {
200
+ // op_bool(iter);
201
+ // })
202
+ // );
203
+ //
204
+ // For each AT_DISPATCH_FOO macro, there is a corresponding
205
+ // AT_DISPATCH_CASE_FOO macro which can be used inside of an
206
+ // AT_DISPATCH_SWITCH block.
207
+
208
+ // NB: the the_type variable is not used, but we have kept it for
209
+ // backwards compatibility. It's probably not used by anyone though;
210
+ // but we're just being safe (and it doesn't hurt.) Note we must
211
+ // use it to shut up warnings about unused store.
212
+
213
+ #define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \
214
+ [&] { \
215
+ const auto& the_type = TYPE; \
216
+ constexpr const char* at_dispatch_name = NAME; \
217
+ /* don't use TYPE again in case it is an expensive or side-effect op */ \
218
+ at::ScalarType _st = ::detail::scalar_type(the_type); \
219
+ RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
220
+ switch (_st) { \
221
+ __VA_ARGS__ \
222
+ default: \
223
+ AT_ERROR( \
224
+ '"', \
225
+ at_dispatch_name, \
226
+ "\" not implemented for '", \
227
+ toString(_st), \
228
+ "'"); \
229
+ } \
230
+ }()
231
+
232
+ #define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
233
+ AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
234
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__)
235
+
236
+ #define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
237
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
238
+
239
+ #define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) \
240
+ AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
241
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
242
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__)
243
+
244
+ #define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \
245
+ AT_DISPATCH_SWITCH( \
246
+ TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__))
247
+
248
+ #define AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(...) \
249
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
250
+ AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
251
+
252
+ #define AT_DISPATCH_REDUCED_FLOATING_TYPES(TYPE, NAME, ...) \
253
+ AT_DISPATCH_SWITCH( \
254
+ TYPE, NAME, AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__))
255
+
256
+ #define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) \
257
+ AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
258
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
259
+
260
+ #define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
261
+ AT_DISPATCH_SWITCH( \
262
+ TYPE, \
263
+ NAME, \
264
+ AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__))
265
+
266
+ #define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
267
+ AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
268
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
269
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
270
+
271
+ #define AT_DISPATCH_FLOATING_TYPES_AND2( \
272
+ SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
273
+ AT_DISPATCH_SWITCH( \
274
+ TYPE, \
275
+ NAME, \
276
+ AT_DISPATCH_CASE_FLOATING_TYPES_AND2( \
277
+ SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
278
+
279
+ #define AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
280
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
281
+ AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
282
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
283
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
284
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
285
+
286
+ #define AT_DISPATCH_FLOATING_TYPES_AND3( \
287
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
288
+ AT_DISPATCH_SWITCH( \
289
+ TYPE, \
290
+ NAME, \
291
+ AT_DISPATCH_CASE_FLOATING_TYPES_AND3( \
292
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
293
+
294
+ #define AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
295
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
296
+ AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
297
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
298
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
299
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
300
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
301
+
302
+ #define AT_DISPATCH_FLOATING_TYPES_AND4( \
303
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
304
+ AT_DISPATCH_SWITCH( \
305
+ TYPE, \
306
+ NAME, \
307
+ AT_DISPATCH_CASE_FLOATING_TYPES_AND4( \
308
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
309
+
310
+ #define AT_DISPATCH_CASE_COMPLEX_TYPES(...) \
311
+ AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) \
312
+ AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__)
313
+
314
+ #define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) \
315
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__))
316
+
317
+ #define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) \
318
+ AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) \
319
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
320
+
321
+ #define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
322
+ AT_DISPATCH_SWITCH( \
323
+ TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__))
324
+
325
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) \
326
+ AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \
327
+ AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
328
+
329
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) \
330
+ AT_DISPATCH_SWITCH( \
331
+ TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__))
332
+
333
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) \
334
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
335
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
336
+
337
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( \
338
+ SCALARTYPE, TYPE, NAME, ...) \
339
+ AT_DISPATCH_SWITCH( \
340
+ TYPE, \
341
+ NAME, \
342
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( \
343
+ SCALARTYPE, __VA_ARGS__))
344
+
345
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
346
+ SCALARTYPE1, SCALARTYPE2, ...) \
347
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
348
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
349
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
350
+
351
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( \
352
+ SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
353
+ AT_DISPATCH_SWITCH( \
354
+ TYPE, \
355
+ NAME, \
356
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \
357
+ SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
358
+
359
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
360
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
361
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
362
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
363
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
364
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
365
+
366
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( \
367
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
368
+ AT_DISPATCH_SWITCH( \
369
+ TYPE, \
370
+ NAME, \
371
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \
372
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
373
+
374
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
375
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
376
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
377
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
378
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
379
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
380
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
381
+
382
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND4( \
383
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
384
+ AT_DISPATCH_SWITCH( \
385
+ TYPE, \
386
+ NAME, \
387
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( \
388
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
389
+
390
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
391
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
392
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
393
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
394
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
395
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
396
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
397
+ AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
398
+
399
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND5( \
400
+ SCALARTYPE1, \
401
+ SCALARTYPE2, \
402
+ SCALARTYPE3, \
403
+ SCALARTYPE4, \
404
+ SCALARTYPE5, \
405
+ TYPE, \
406
+ NAME, \
407
+ ...) \
408
+ AT_DISPATCH_SWITCH( \
409
+ TYPE, \
410
+ NAME, \
411
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND5( \
412
+ SCALARTYPE1, \
413
+ SCALARTYPE2, \
414
+ SCALARTYPE3, \
415
+ SCALARTYPE4, \
416
+ SCALARTYPE5, \
417
+ __VA_ARGS__))
418
+
419
+ #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
420
+ SCALARTYPE1, \
421
+ SCALARTYPE2, \
422
+ SCALARTYPE3, \
423
+ SCALARTYPE4, \
424
+ SCALARTYPE5, \
425
+ SCALARTYPE6, \
426
+ ...) \
427
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \
428
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
429
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
430
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
431
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
432
+ AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
433
+ AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
434
+
435
+ #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND6( \
436
+ SCALARTYPE1, \
437
+ SCALARTYPE2, \
438
+ SCALARTYPE3, \
439
+ SCALARTYPE4, \
440
+ SCALARTYPE5, \
441
+ SCALARTYPE6, \
442
+ TYPE, \
443
+ NAME, \
444
+ ...) \
445
+ AT_DISPATCH_SWITCH( \
446
+ TYPE, \
447
+ NAME, \
448
+ AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND6( \
449
+ SCALARTYPE1, \
450
+ SCALARTYPE2, \
451
+ SCALARTYPE3, \
452
+ SCALARTYPE4, \
453
+ SCALARTYPE5, \
454
+ SCALARTYPE6, \
455
+ __VA_ARGS__))
456
+
457
+ #define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) \
458
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
459
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
460
+ AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
461
+ AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
462
+ AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__)
463
+
464
+ #define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
465
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
466
+
467
+ #define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) \
468
+ AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
469
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
470
+
471
+ #define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
472
+ AT_DISPATCH_SWITCH( \
473
+ TYPE, \
474
+ NAME, \
475
+ AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
476
+
477
+ #define AT_DISPATCH_CASE_ALL_TYPES(...) \
478
+ AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \
479
+ AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)
480
+
481
+ #define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \
482
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__))
483
+
484
+ #define AT_DISPATCH_CASE_QINT_TYPES(...) \
485
+ AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
486
+ AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) \
487
+ AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__)
488
+
489
+ #define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) \
490
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__))
491
+
492
+ #define AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, ...) \
493
+ AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__) \
494
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
495
+
496
+ #define AT_DISPATCH_QINT_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
497
+ AT_DISPATCH_SWITCH( \
498
+ TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, __VA_ARGS__))
499
+
500
+ #define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) \
501
+ AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \
502
+ AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__)
503
+
504
+ #define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) \
505
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__))
506
+
507
+ #define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) \
508
+ AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
509
+ at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) \
510
+ AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
511
+ at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) \
512
+ AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
513
+ at::kQInt32, \
514
+ at::qint32, \
515
+ CHAR_BIT * sizeof(int), \
516
+ INT_MIN, \
517
+ INT_MAX, \
518
+ __VA_ARGS__) \
519
+ AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
520
+ at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) \
521
+ AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \
522
+ at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__)
523
+
524
+ #define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) \
525
+ AT_DISPATCH_SWITCH( \
526
+ TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__))
527
+
528
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) \
529
+ AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
530
+ AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)
531
+
532
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) \
533
+ AT_DISPATCH_SWITCH( \
534
+ TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__))
535
+
536
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) \
537
+ AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
538
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
539
+
540
+ #define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \
541
+ AT_DISPATCH_SWITCH( \
542
+ TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__))
543
+
544
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) \
545
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
546
+ AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__)
547
+
548
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) \
549
+ AT_DISPATCH_SWITCH( \
550
+ TYPE, \
551
+ NAME, \
552
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__))
553
+
554
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \
555
+ AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
556
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
557
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
558
+
559
+ #define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
560
+ AT_DISPATCH_SWITCH( \
561
+ TYPE, \
562
+ NAME, \
563
+ AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
564
+
565
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
566
+ SCALARTYPE1, SCALARTYPE2, ...) \
567
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
568
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
569
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__)
570
+
571
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( \
572
+ SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \
573
+ AT_DISPATCH_SWITCH( \
574
+ TYPE, \
575
+ NAME, \
576
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \
577
+ SCALARTYPE1, SCALARTYPE2, __VA_ARGS__))
578
+
579
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND3( \
580
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
581
+ AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \
582
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
583
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
584
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
585
+
586
+ #define AT_DISPATCH_ALL_TYPES_AND3( \
587
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
588
+ AT_DISPATCH_SWITCH( \
589
+ TYPE, \
590
+ NAME, \
591
+ AT_DISPATCH_CASE_ALL_TYPES_AND3( \
592
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
593
+
594
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
595
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \
596
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
597
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
598
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
599
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__)
600
+
601
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( \
602
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \
603
+ AT_DISPATCH_SWITCH( \
604
+ TYPE, \
605
+ NAME, \
606
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \
607
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__))
608
+
609
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
610
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \
611
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
612
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
613
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
614
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
615
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__)
616
+
617
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( \
618
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \
619
+ AT_DISPATCH_SWITCH( \
620
+ TYPE, \
621
+ NAME, \
622
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
623
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__))
624
+
625
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
626
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) \
627
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
628
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
629
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
630
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
631
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
632
+ AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__)
633
+
634
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND5( \
635
+ SCALARTYPE1, \
636
+ SCALARTYPE2, \
637
+ SCALARTYPE3, \
638
+ SCALARTYPE4, \
639
+ SCALARTYPE5, \
640
+ TYPE, \
641
+ NAME, \
642
+ ...) \
643
+ AT_DISPATCH_SWITCH( \
644
+ TYPE, \
645
+ NAME, \
646
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( \
647
+ SCALARTYPE1, \
648
+ SCALARTYPE2, \
649
+ SCALARTYPE3, \
650
+ SCALARTYPE4, \
651
+ SCALARTYPE5, \
652
+ __VA_ARGS__))
653
+
654
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
655
+ SCALARTYPE1, \
656
+ SCALARTYPE2, \
657
+ SCALARTYPE3, \
658
+ SCALARTYPE4, \
659
+ SCALARTYPE5, \
660
+ SCALARTYPE6, \
661
+ ...) \
662
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
663
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
664
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
665
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
666
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
667
+ AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
668
+ AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__)
669
+
670
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND6( \
671
+ SCALARTYPE1, \
672
+ SCALARTYPE2, \
673
+ SCALARTYPE3, \
674
+ SCALARTYPE4, \
675
+ SCALARTYPE5, \
676
+ SCALARTYPE6, \
677
+ TYPE, \
678
+ NAME, \
679
+ ...) \
680
+ AT_DISPATCH_SWITCH( \
681
+ TYPE, \
682
+ NAME, \
683
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( \
684
+ SCALARTYPE1, \
685
+ SCALARTYPE2, \
686
+ SCALARTYPE3, \
687
+ SCALARTYPE4, \
688
+ SCALARTYPE5, \
689
+ SCALARTYPE6, \
690
+ __VA_ARGS__))
691
+
692
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
693
+ SCALARTYPE1, \
694
+ SCALARTYPE2, \
695
+ SCALARTYPE3, \
696
+ SCALARTYPE4, \
697
+ SCALARTYPE5, \
698
+ SCALARTYPE6, \
699
+ SCALARTYPE7, \
700
+ ...) \
701
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
702
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
703
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
704
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
705
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
706
+ AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
707
+ AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
708
+ AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__)
709
+
710
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND7( \
711
+ SCALARTYPE1, \
712
+ SCALARTYPE2, \
713
+ SCALARTYPE3, \
714
+ SCALARTYPE4, \
715
+ SCALARTYPE5, \
716
+ SCALARTYPE6, \
717
+ SCALARTYPE7, \
718
+ TYPE, \
719
+ NAME, \
720
+ ...) \
721
+ AT_DISPATCH_SWITCH( \
722
+ TYPE, \
723
+ NAME, \
724
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND7( \
725
+ SCALARTYPE1, \
726
+ SCALARTYPE2, \
727
+ SCALARTYPE3, \
728
+ SCALARTYPE4, \
729
+ SCALARTYPE5, \
730
+ SCALARTYPE6, \
731
+ SCALARTYPE7, \
732
+ __VA_ARGS__))
733
+
734
+ #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
735
+ SCALARTYPE1, \
736
+ SCALARTYPE2, \
737
+ SCALARTYPE3, \
738
+ SCALARTYPE4, \
739
+ SCALARTYPE5, \
740
+ SCALARTYPE6, \
741
+ SCALARTYPE7, \
742
+ SCALARTYPE8, \
743
+ ...) \
744
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \
745
+ AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \
746
+ AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \
747
+ AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \
748
+ AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) \
749
+ AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) \
750
+ AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) \
751
+ AT_DISPATCH_CASE(SCALARTYPE7, __VA_ARGS__) \
752
+ AT_DISPATCH_CASE(SCALARTYPE8, __VA_ARGS__)
753
+
754
+ #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND8( \
755
+ SCALARTYPE1, \
756
+ SCALARTYPE2, \
757
+ SCALARTYPE3, \
758
+ SCALARTYPE4, \
759
+ SCALARTYPE5, \
760
+ SCALARTYPE6, \
761
+ SCALARTYPE7, \
762
+ SCALARTYPE8, \
763
+ TYPE, \
764
+ NAME, \
765
+ ...) \
766
+ AT_DISPATCH_SWITCH( \
767
+ TYPE, \
768
+ NAME, \
769
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND8( \
770
+ SCALARTYPE1, \
771
+ SCALARTYPE2, \
772
+ SCALARTYPE3, \
773
+ SCALARTYPE4, \
774
+ SCALARTYPE5, \
775
+ SCALARTYPE6, \
776
+ SCALARTYPE7, \
777
+ SCALARTYPE8, \
778
+ __VA_ARGS__))
779
+
780
+ #define AT_DISPATCH_CASE_BIT_TYPES(...) \
781
+ AT_DISPATCH_CASE(at::ScalarType::Bits1x8, __VA_ARGS__) \
782
+ AT_DISPATCH_CASE(at::ScalarType::Bits2x4, __VA_ARGS__) \
783
+ AT_DISPATCH_CASE(at::ScalarType::Bits4x2, __VA_ARGS__) \
784
+ AT_DISPATCH_CASE(at::ScalarType::Bits8, __VA_ARGS__) \
785
+ AT_DISPATCH_CASE(at::ScalarType::Bits16, __VA_ARGS__)
786
+
787
+ #define AT_DISPATCH_BIT_TYPES(TYPE, NAME, ...) \
788
+ AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_BIT_TYPES(__VA_ARGS__))
789
+
790
+ #define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) \
791
+ AT_DISPATCH_SWITCH( \
792
+ TYPE, \
793
+ NAME, \
794
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
795
+ at::ScalarType::Int, index_t, __VA_ARGS__) \
796
+ AT_PRIVATE_CASE_TYPE_USING_HINT( \
797
+ at::ScalarType::Long, index_t, __VA_ARGS__))
798
+
799
+ // ----------------------------------------------------------------------------
800
+ // DEPRECATED MACROS, DON'T USE THESE
801
+ // ----------------------------------------------------------------------------
802
+
803
+ #define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \
804
+ detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); \
805
+ AT_DISPATCH_SWITCH( \
806
+ TYPE, \
807
+ NAME, \
808
+ AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__))
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+
4
+ namespace at::detail {
5
+
6
+ inline void check_size_nonnegative(ArrayRef<int64_t> size) {
7
+ for (const auto& x : size) {
8
+ TORCH_CHECK(
9
+ x >= 0,
10
+ "Trying to create tensor with negative dimension ",
11
+ x,
12
+ ": ",
13
+ size);
14
+ }
15
+ }
16
+
17
+ inline void check_size_nonnegative(ArrayRef<c10::SymInt> size) {
18
+ for (const auto& x : size) {
19
+ TORCH_CHECK(
20
+ x.expect_size(__FILE__, __LINE__),
21
+ "Trying to create tensor with negative dimension ",
22
+ x,
23
+ ": ",
24
+ size);
25
+ }
26
+ }
27
+
28
+ TORCH_API size_t computeStorageNbytesContiguous(
29
+ IntArrayRef sizes,
30
+ size_t itemsize,
31
+ size_t storage_offset = 0);
32
+ TORCH_API SymInt computeStorageNbytesContiguous(
33
+ SymIntArrayRef sizes,
34
+ const SymInt& itemsize,
35
+ const SymInt& storage_offset = 0);
36
+ TORCH_API size_t computeStorageNbytes(
37
+ IntArrayRef sizes,
38
+ IntArrayRef strides,
39
+ size_t itemsize,
40
+ size_t storage_offset = 0);
41
+ TORCH_API SymInt computeStorageNbytes(
42
+ SymIntArrayRef sizes,
43
+ SymIntArrayRef strides,
44
+ const SymInt& itemsize,
45
+ const SymInt& storage_offset = 0);
46
+
47
+ TORCH_API TensorBase empty_generic(
48
+ IntArrayRef size,
49
+ c10::Allocator* allocator,
50
+ c10::DispatchKeySet ks,
51
+ ScalarType scalar_type,
52
+ c10::optional<c10::MemoryFormat> memory_format_opt);
53
+
54
+ TORCH_API TensorBase empty_strided_generic(
55
+ IntArrayRef size,
56
+ IntArrayRef stride,
57
+ c10::Allocator* allocator,
58
+ c10::DispatchKeySet ks,
59
+ ScalarType scalar_type);
60
+
61
+ TORCH_API TensorBase empty_strided_symint_generic(
62
+ SymIntArrayRef size,
63
+ SymIntArrayRef stride,
64
+ c10::Allocator* allocator,
65
+ c10::DispatchKeySet ks,
66
+ ScalarType scalar_type);
67
+
68
+ TORCH_API TensorBase empty_cpu(
69
+ IntArrayRef size,
70
+ ScalarType dtype,
71
+ bool pin_memory = false,
72
+ c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
73
+
74
+ TORCH_API TensorBase empty_cpu(
75
+ IntArrayRef size,
76
+ c10::optional<ScalarType> dtype_opt,
77
+ c10::optional<Layout> layout_opt,
78
+ c10::optional<Device> device_opt,
79
+ c10::optional<bool> pin_memory_opt,
80
+ c10::optional<c10::MemoryFormat> memory_format_opt);
81
+
82
+ TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options);
83
+
84
+ TORCH_API TensorBase empty_strided_cpu(
85
+ IntArrayRef size,
86
+ IntArrayRef stride,
87
+ ScalarType dtype,
88
+ bool pin_memory = false);
89
+
90
+ TORCH_API TensorBase empty_strided_cpu(
91
+ IntArrayRef size,
92
+ IntArrayRef stride,
93
+ c10::optional<ScalarType> dtype_opt,
94
+ c10::optional<Layout> layout_opt,
95
+ c10::optional<Device> device_opt,
96
+ c10::optional<bool> pin_memory_opt);
97
+
98
+ TORCH_API TensorBase empty_strided_cpu(
99
+ IntArrayRef size,
100
+ IntArrayRef stride,
101
+ const TensorOptions& options);
102
+
103
+ TORCH_API TensorBase empty_meta(
104
+ IntArrayRef size,
105
+ ScalarType dtype,
106
+ c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
107
+
108
+ TORCH_API TensorBase empty_meta(
109
+ IntArrayRef size,
110
+ c10::optional<ScalarType> dtype_opt,
111
+ c10::optional<Layout> layout_opt,
112
+ c10::optional<Device> device_opt,
113
+ c10::optional<bool> pin_memory_opt,
114
+ c10::optional<c10::MemoryFormat> memory_format_opt);
115
+
116
+ TORCH_API TensorBase empty_symint_meta(
117
+ SymIntArrayRef size,
118
+ c10::optional<ScalarType> dtype_opt,
119
+ c10::optional<Layout> layout_opt,
120
+ c10::optional<Device> device_opt,
121
+ c10::optional<bool> pin_memory_opt,
122
+ c10::optional<c10::MemoryFormat> memory_format_opt);
123
+
124
+ TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options);
125
+
126
+ TORCH_API TensorBase
127
+ empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype);
128
+
129
+ TORCH_API TensorBase empty_strided_meta(
130
+ IntArrayRef size,
131
+ IntArrayRef stride,
132
+ c10::optional<ScalarType> dtype_opt,
133
+ c10::optional<Layout> layout_opt,
134
+ c10::optional<Device> device_opt,
135
+ c10::optional<bool> pin_memory_opt);
136
+
137
+ TORCH_API TensorBase empty_strided_meta(
138
+ IntArrayRef size,
139
+ IntArrayRef stride,
140
+ const TensorOptions& options);
141
+
142
+ TORCH_API TensorBase empty_strided_symint_meta(
143
+ SymIntArrayRef size,
144
+ SymIntArrayRef stride,
145
+ ScalarType dtype);
146
+
147
+ TORCH_API TensorBase empty_strided_symint_meta(
148
+ SymIntArrayRef size,
149
+ SymIntArrayRef stride,
150
+ c10::optional<ScalarType> dtype_opt,
151
+ c10::optional<Layout> layout_opt,
152
+ c10::optional<Device> device_opt,
153
+ c10::optional<bool> pin_memory_opt);
154
+
155
+ TORCH_API TensorBase empty_strided_symint_meta(
156
+ SymIntArrayRef size,
157
+ SymIntArrayRef stride,
158
+ const TensorOptions& options);
159
+
160
+ } // namespace at::detail
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBase.h>
2
+
3
+ // Broadcasting utilities for working with TensorBase
4
+ namespace at {
5
+ namespace internal {
6
+ TORCH_API TensorBase expand_slow_path(const TensorBase& self, IntArrayRef size);
7
+ } // namespace internal
8
+
9
+ inline c10::MaybeOwned<TensorBase> expand_size(
10
+ const TensorBase& self,
11
+ IntArrayRef size) {
12
+ if (size.equals(self.sizes())) {
13
+ return c10::MaybeOwned<TensorBase>::borrowed(self);
14
+ }
15
+ return c10::MaybeOwned<TensorBase>::owned(
16
+ at::internal::expand_slow_path(self, size));
17
+ }
18
+ c10::MaybeOwned<TensorBase> expand_size(TensorBase&& self, IntArrayRef size) =
19
+ delete;
20
+
21
+ inline c10::MaybeOwned<TensorBase> expand_inplace(
22
+ const TensorBase& tensor,
23
+ const TensorBase& to_expand) {
24
+ return expand_size(to_expand, tensor.sizes());
25
+ }
26
+ c10::MaybeOwned<TensorBase> expand_inplace(
27
+ const TensorBase& tensor,
28
+ TensorBase&& to_expand) = delete;
29
+
30
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Formatting.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <ATen/core/Formatting.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <memory>
5
+
6
+ namespace at::functorch {
7
+
8
+ // NOTE [functorch TLS in pytorch/pytorch]
9
+ //
10
+ // functorch lives out-of-tree. However, it has some TLS that needs to be
11
+ // propagated. The solution for that is we store a pointer to the TLS
12
+ // inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to
13
+ // include whatever functorch needs.
14
+ //
15
+ // We need to store a pointer due to the indirection:
16
+ // inside functorch, we will create a subclass of FunctorchTLSBase called
17
+ // FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack.
18
+ // FuncTorchTLSBase doesn't have any metadata because it hasn't been defined
19
+ // yet.
20
+ //
21
+ // Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside
22
+ // functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*.
23
+ // We can't directly pass around FunctorchTLSBase (without a pointer) because
24
+ // FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having
25
+ // more elements.
26
+ struct TORCH_API FuncTorchTLSBase {
27
+ virtual ~FuncTorchTLSBase() = default;
28
+ virtual std::unique_ptr<FuncTorchTLSBase> deepcopy() const = 0;
29
+
30
+ virtual int64_t checkSupportsSingleLevelAutogradFunction() const = 0;
31
+ virtual void checkSupportsCppAutogradFunction() const = 0;
32
+ virtual void checkSupportsInplaceRequiresGrad() const = 0;
33
+ virtual void checkSupportsRetainGrad() const = 0;
34
+ };
35
+
36
+ // returns deepcopy of the functorch tls
37
+ TORCH_API std::unique_ptr<FuncTorchTLSBase> getCopyOfFuncTorchTLS();
38
+
39
+ // sets the functorch tls. always does a deep copy.
40
+ TORCH_API void setFuncTorchTLS(
41
+ const std::shared_ptr<const FuncTorchTLSBase>& state);
42
+
43
+ // get a mutable reference to the functorch tls
44
+ TORCH_API std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor();
45
+
46
+ } // namespace at::functorch
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/InferSize.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/core/SymIntArrayRef.h>
6
+ #include <c10/util/DimVector.h>
7
+ #include <c10/util/Optional.h>
8
+ #include <sstream>
9
+ #include <vector>
10
+
11
+ namespace at {
12
+
13
+ // Infers the size of a dim with size -1, if it exists. Also checks that new
14
+ // shape is compatible with the number of elements.
15
+ //
16
+ // templated to handle std::vector<int64_t> and DimVector use cases, see
17
+ // below
18
+ //
19
+ template <typename InputArrayRef, typename NumelType, typename ResultVec>
20
+ inline void infer_size_impl(
21
+ InputArrayRef shape,
22
+ NumelType numel,
23
+ ResultVec& res) {
24
+ NumelType newsize = 1;
25
+ // N.B. this is an index, not a sym dim!
26
+ auto infer_dim = c10::optional<int64_t>();
27
+ for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
28
+ if (shape[dim] == -1) {
29
+ if (infer_dim) {
30
+ throw std::runtime_error("only one dimension can be inferred");
31
+ }
32
+ infer_dim = dim;
33
+ } else if (shape[dim] >= 0) {
34
+ newsize *= shape[dim];
35
+ } else {
36
+ AT_ERROR("invalid shape dimension ", shape[dim]);
37
+ }
38
+ }
39
+
40
+ if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) {
41
+ if (infer_dim) {
42
+ // We have a degree of freedom here to select the dimension size; follow
43
+ // NumPy semantics and just bail. However, a nice error message is needed
44
+ // because users often use `view` as a way to flatten & unflatten
45
+ // dimensions and will otherwise be confused why
46
+ // empty_tensor.view( 0, 0)
47
+ // works yet
48
+ // empty_tensor.view(-1, 0)
49
+ // doesn't.
50
+ TORCH_CHECK(
51
+ newsize != 0,
52
+ "cannot reshape tensor of 0 elements into shape ",
53
+ shape,
54
+ " because the unspecified dimension size -1 can be any "
55
+ "value and is ambiguous");
56
+ res[*infer_dim] = numel / newsize;
57
+ }
58
+ return;
59
+ }
60
+
61
+ std::ostringstream ss;
62
+ ss << "shape '" << shape << "' is invalid for input of size " << numel;
63
+ throw std::runtime_error(ss.str());
64
+ }
65
+
66
+ inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
67
+ auto res = shape.vec();
68
+ infer_size_impl(shape, numel, res);
69
+ return res;
70
+ }
71
+
72
+ inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) {
73
+ auto res = at::DimVector(shape);
74
+ infer_size_impl(shape, numel, res);
75
+ return res;
76
+ }
77
+
78
+ inline at::SymDimVector infer_size_dv(
79
+ c10::SymIntArrayRef shape,
80
+ c10::SymInt numel) {
81
+ auto res = at::SymDimVector(shape);
82
+ infer_size_impl<c10::SymIntArrayRef, c10::SymInt, at::SymDimVector>(
83
+ shape, std::move(numel), res);
84
+ return res;
85
+ }
86
+
87
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/TensorOptions.h>
4
+
5
+ namespace at {
6
+
7
+ // Represents the initial TensorOptions, before the "defaults" are ever changed.
8
+ // This is designed to be used in library code, where the explicit devices,
9
+ // dtypes, etc. are known. NOTE: this is not a stable API.
10
+ inline TensorOptions initialTensorOptions() {
11
+ return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad(
12
+ false);
13
+ }
14
+
15
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Layout.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/Layout.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyBatchedFallback.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/ATen.h>
3
+ #include <ATen/core/op_registration/op_registration.h>
4
+ #include <torch/library.h>
5
+
6
+ namespace at {
7
+
8
+ // If an operator doesn't have a batching rule implemented then we fallback
9
+ // to this implementation. The fallback only works on out-of-place operators
10
+ // that return only tensors with new memory. (e.g., no in-place operators, no
11
+ // view operations).
12
+ //
13
+ // The fallback effectively takes all of the BatchedTensors in `stack`, slices
14
+ // them, and runs `op` on all of the corresponding slices to produce slices
15
+ // of the outputs. The output slices then get `torch.stack`ed to create the
16
+ // final returns.
17
+ //
18
+ // The performance of the fallback is not very good because it introduces an
19
+ // extra copy from stacking the sliced outputs. Because of this, we prefer to
20
+ // write batching rules for operators whenever possible.
21
+ void batchedTensorForLoopFallback(
22
+ const c10::OperatorHandle& op,
23
+ torch::jit::Stack* stack);
24
+
25
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapMode.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/LocalDispatchKeySet.h>
4
+
5
+ namespace at::impl {
6
+
7
+ // VmapMode contains a thread local count of how many nested vmaps
8
+ // we are currently inside. That number is known as the `vmap level`.
9
+ // VmapMode is used in the implementation of the Python `torch.vmap` API.
10
+ //
11
+ // NOTE: this is NOT the c++ api for torch.vmap. That doesn't exist yet.
12
+
13
+ struct TORCH_API VmapMode {
14
+ // Returns the vmap level, aka the count of how many nested vmaps we're in.
15
+ static int64_t current_vmap_level();
16
+
17
+ // Increment the count of nested vmaps. If this causes the vmap level to be
18
+ // greater than 0, then it enables DispatchKey::VmapMode on all tensors.
19
+ static int64_t increment_nesting();
20
+
21
+ // Decrements the count of nested vmaps. If this causes the vmap level to be
22
+ // equal to 0, then it disables DispatchKey::VmapMode on all tensors.
23
+ static int64_t decrement_nesting();
24
+ };
25
+
26
+ } // namespace at::impl
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/Utils.h>
3
+ #include <c10/util/ArrayRef.h>
4
+
5
+ #include <vector>
6
+
7
+ namespace at {
8
+ /// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that
9
+ /// we can easily view it as a multidimensional array.
10
+ ///
11
+ /// Like ArrayRef, this class does not own the underlying data, it is expected
12
+ /// to be used in situations where the data resides in some other buffer.
13
+ ///
14
+ /// This is intended to be trivially copyable, so it should be passed by
15
+ /// value.
16
+ ///
17
+ /// For now, 2D only (so the copies are actually cheap, without having
18
+ /// to write a SmallVector class) and contiguous only (so we can
19
+ /// return non-strided ArrayRef on index).
20
+ ///
21
+ /// P.S. dimension 0 indexes rows, dimension 1 indexes columns
22
+ template <typename T>
23
+ class MatrixRef {
24
+ public:
25
+ typedef size_t size_type;
26
+
27
+ private:
28
+ /// Underlying ArrayRef
29
+ ArrayRef<T> arr;
30
+
31
+ /// Stride of dim 0 (outer dimension)
32
+ size_type stride0;
33
+
34
+ // Stride of dim 1 is assumed to be 1
35
+
36
+ public:
37
+ /// Construct an empty Matrixref.
38
+ /*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {}
39
+
40
+ /// Construct an MatrixRef from an ArrayRef and outer stride.
41
+ /*implicit*/ MatrixRef(ArrayRef<T> arr, size_type stride0)
42
+ : arr(arr), stride0(stride0) {
43
+ TORCH_CHECK(
44
+ arr.size() % stride0 == 0,
45
+ "MatrixRef: ArrayRef size ",
46
+ arr.size(),
47
+ " not divisible by stride ",
48
+ stride0)
49
+ }
50
+
51
+ /// @}
52
+ /// @name Simple Operations
53
+ /// @{
54
+
55
+ /// empty - Check if the matrix is empty.
56
+ bool empty() const {
57
+ return arr.empty();
58
+ }
59
+
60
+ const T* data() const {
61
+ return arr.data();
62
+ }
63
+
64
+ /// size - Get size a dimension
65
+ size_t size(size_t dim) const {
66
+ if (dim == 0) {
67
+ return arr.size() / stride0;
68
+ } else if (dim == 1) {
69
+ return stride0;
70
+ } else {
71
+ TORCH_CHECK(
72
+ 0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1");
73
+ }
74
+ }
75
+
76
+ size_t numel() const {
77
+ return arr.size();
78
+ }
79
+
80
+ /// equals - Check for element-wise equality.
81
+ bool equals(MatrixRef RHS) const {
82
+ return stride0 == RHS.stride0 && arr.equals(RHS.arr);
83
+ }
84
+
85
+ /// @}
86
+ /// @name Operator Overloads
87
+ /// @{
88
+ ArrayRef<T> operator[](size_t Index) const {
89
+ return arr.slice(Index * stride0, stride0);
90
+ }
91
+
92
+ /// Disallow accidental assignment from a temporary.
93
+ ///
94
+ /// The declaration here is extra complicated so that "arrayRef = {}"
95
+ /// continues to select the move assignment operator.
96
+ template <typename U>
97
+ typename std::enable_if<std::is_same<U, T>::value, MatrixRef<T>>::type&
98
+ operator=(U&& Temporary) = delete;
99
+
100
+ /// Disallow accidental assignment from a temporary.
101
+ ///
102
+ /// The declaration here is extra complicated so that "arrayRef = {}"
103
+ /// continues to select the move assignment operator.
104
+ template <typename U>
105
+ typename std::enable_if<std::is_same<U, T>::value, MatrixRef<T>>::type&
106
+ operator=(std::initializer_list<U>) = delete;
107
+ };
108
+
109
+ } // end namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+ struct TensorImpl;
7
+ }
8
+
9
+ namespace at {
10
+ class TensorBase;
11
+
12
+ // MemOverlap: Whether or not there is memory overlap
13
+ //
14
+ // No: Absolutely no memory overlap
15
+ // Yes: Absolutely yes memory overlap
16
+ // TooHard: There might be memory overlap, but it was too expensive to compute.
17
+ //
18
+ // NB: Please update the python test for these if you renumber them.
19
+ enum class MemOverlap { No, Yes, TooHard };
20
+
21
+ enum class MemOverlapStatus { Full, Partial, No, TooHard };
22
+
23
+ TORCH_API MemOverlap has_internal_overlap(const TensorBase& t);
24
+ TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t);
25
+
26
+ TORCH_API void assert_no_internal_overlap(const TensorBase& t);
27
+ TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t);
28
+
29
+ TORCH_API MemOverlapStatus
30
+ get_overlap_status(const TensorBase& a, const TensorBase& b);
31
+ TORCH_API MemOverlapStatus
32
+ get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b);
33
+
34
+ TORCH_API void assert_no_partial_overlap(
35
+ const TensorBase& a,
36
+ const TensorBase& b);
37
+ void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
38
+
39
+ TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b);
40
+ TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
41
+
42
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/MetaFunctions_inl.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_meta_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_add_relu_meta_dispatch.h>
20
+ #include <ATen/ops/_addmm_activation_meta_dispatch.h>
21
+ #include <ATen/ops/_amp_update_scale_meta_dispatch.h>
22
+ #include <ATen/ops/_coalesced_meta_dispatch.h>
23
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_meta_dispatch.h>
24
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_meta_dispatch.h>
25
+ #include <ATen/ops/_ctc_loss_meta_dispatch.h>
26
+ #include <ATen/ops/_efficientzerotensor_meta_dispatch.h>
27
+ #include <ATen/ops/_fill_mem_eff_dropout_mask_meta_dispatch.h>
28
+ #include <ATen/ops/_fused_sdp_choice_meta_dispatch.h>
29
+ #include <ATen/ops/_index_put_impl_meta_dispatch.h>
30
+ #include <ATen/ops/_linalg_det_meta_dispatch.h>
31
+ #include <ATen/ops/_linalg_eigh_meta_dispatch.h>
32
+ #include <ATen/ops/_linalg_slogdet_meta_dispatch.h>
33
+ #include <ATen/ops/_linalg_solve_ex_meta_dispatch.h>
34
+ #include <ATen/ops/_linalg_svd_meta_dispatch.h>
35
+ #include <ATen/ops/_log_softmax_meta_dispatch.h>
36
+ #include <ATen/ops/_log_softmax_backward_data_meta_dispatch.h>
37
+ #include <ATen/ops/_mkldnn_transpose_meta_dispatch.h>
38
+ #include <ATen/ops/_reshape_alias_meta_dispatch.h>
39
+ #include <ATen/ops/_resize_output_meta_dispatch.h>
40
+ #include <ATen/ops/_softmax_meta_dispatch.h>
41
+ #include <ATen/ops/_softmax_backward_data_meta_dispatch.h>
42
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h>
43
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_meta_dispatch.h>
44
+ #include <ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h>
45
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_meta_dispatch.h>
46
+ #include <ATen/ops/_upsample_bilinear2d_aa_meta_dispatch.h>
47
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_meta_dispatch.h>
48
+ #include <ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h>
49
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h>
50
+ #include <ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h>
51
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h>
52
+ #include <ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h>
53
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_meta_dispatch.h>
54
+ #include <ATen/ops/acos_meta_dispatch.h>
55
+ #include <ATen/ops/acosh_meta_dispatch.h>
56
+ #include <ATen/ops/adaptive_max_pool2d_meta_dispatch.h>
57
+ #include <ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h>
58
+ #include <ATen/ops/adaptive_max_pool3d_meta_dispatch.h>
59
+ #include <ATen/ops/adaptive_max_pool3d_backward_meta_dispatch.h>
60
+ #include <ATen/ops/add_meta_dispatch.h>
61
+ #include <ATen/ops/addbmm_meta_dispatch.h>
62
+ #include <ATen/ops/addcdiv_meta_dispatch.h>
63
+ #include <ATen/ops/addcmul_meta_dispatch.h>
64
+ #include <ATen/ops/addmm_meta_dispatch.h>
65
+ #include <ATen/ops/addmv_meta_dispatch.h>
66
+ #include <ATen/ops/all_meta_dispatch.h>
67
+ #include <ATen/ops/amax_meta_dispatch.h>
68
+ #include <ATen/ops/amin_meta_dispatch.h>
69
+ #include <ATen/ops/aminmax_meta_dispatch.h>
70
+ #include <ATen/ops/any_meta_dispatch.h>
71
+ #include <ATen/ops/arange_meta_dispatch.h>
72
+ #include <ATen/ops/argmax_meta_dispatch.h>
73
+ #include <ATen/ops/argmin_meta_dispatch.h>
74
+ #include <ATen/ops/as_strided_meta_dispatch.h>
75
+ #include <ATen/ops/asin_meta_dispatch.h>
76
+ #include <ATen/ops/asinh_meta_dispatch.h>
77
+ #include <ATen/ops/atan_meta_dispatch.h>
78
+ #include <ATen/ops/atan2_meta_dispatch.h>
79
+ #include <ATen/ops/atanh_meta_dispatch.h>
80
+ #include <ATen/ops/avg_pool2d_meta_dispatch.h>
81
+ #include <ATen/ops/avg_pool2d_backward_meta_dispatch.h>
82
+ #include <ATen/ops/avg_pool3d_meta_dispatch.h>
83
+ #include <ATen/ops/avg_pool3d_backward_meta_dispatch.h>
84
+ #include <ATen/ops/baddbmm_meta_dispatch.h>
85
+ #include <ATen/ops/bernoulli_meta_dispatch.h>
86
+ #include <ATen/ops/bitwise_and_meta_dispatch.h>
87
+ #include <ATen/ops/bitwise_left_shift_meta_dispatch.h>
88
+ #include <ATen/ops/bitwise_not_meta_dispatch.h>
89
+ #include <ATen/ops/bitwise_or_meta_dispatch.h>
90
+ #include <ATen/ops/bitwise_right_shift_meta_dispatch.h>
91
+ #include <ATen/ops/bitwise_xor_meta_dispatch.h>
92
+ #include <ATen/ops/bmm_meta_dispatch.h>
93
+ #include <ATen/ops/cat_meta_dispatch.h>
94
+ #include <ATen/ops/cauchy_meta_dispatch.h>
95
+ #include <ATen/ops/ceil_meta_dispatch.h>
96
+ #include <ATen/ops/clamp_meta_dispatch.h>
97
+ #include <ATen/ops/clamp_max_meta_dispatch.h>
98
+ #include <ATen/ops/clamp_min_meta_dispatch.h>
99
+ #include <ATen/ops/copy_sparse_to_sparse_meta_dispatch.h>
100
+ #include <ATen/ops/copysign_meta_dispatch.h>
101
+ #include <ATen/ops/cos_meta_dispatch.h>
102
+ #include <ATen/ops/cosh_meta_dispatch.h>
103
+ #include <ATen/ops/cumprod_meta_dispatch.h>
104
+ #include <ATen/ops/cumsum_meta_dispatch.h>
105
+ #include <ATen/ops/digamma_meta_dispatch.h>
106
+ #include <ATen/ops/div_meta_dispatch.h>
107
+ #include <ATen/ops/elu_meta_dispatch.h>
108
+ #include <ATen/ops/elu_backward_meta_dispatch.h>
109
+ #include <ATen/ops/embedding_renorm_meta_dispatch.h>
110
+ #include <ATen/ops/empty_meta_dispatch.h>
111
+ #include <ATen/ops/empty_strided_meta_dispatch.h>
112
+ #include <ATen/ops/eq_meta_dispatch.h>
113
+ #include <ATen/ops/erf_meta_dispatch.h>
114
+ #include <ATen/ops/erfc_meta_dispatch.h>
115
+ #include <ATen/ops/erfinv_meta_dispatch.h>
116
+ #include <ATen/ops/exp_meta_dispatch.h>
117
+ #include <ATen/ops/exp2_meta_dispatch.h>
118
+ #include <ATen/ops/expm1_meta_dispatch.h>
119
+ #include <ATen/ops/exponential_meta_dispatch.h>
120
+ #include <ATen/ops/eye_meta_dispatch.h>
121
+ #include <ATen/ops/fill_meta_dispatch.h>
122
+ #include <ATen/ops/floor_meta_dispatch.h>
123
+ #include <ATen/ops/floor_divide_meta_dispatch.h>
124
+ #include <ATen/ops/fmax_meta_dispatch.h>
125
+ #include <ATen/ops/fmin_meta_dispatch.h>
126
+ #include <ATen/ops/fmod_meta_dispatch.h>
127
+ #include <ATen/ops/frac_meta_dispatch.h>
128
+ #include <ATen/ops/fractional_max_pool2d_meta_dispatch.h>
129
+ #include <ATen/ops/fractional_max_pool2d_backward_meta_dispatch.h>
130
+ #include <ATen/ops/fractional_max_pool3d_meta_dispatch.h>
131
+ #include <ATen/ops/gather_meta_dispatch.h>
132
+ #include <ATen/ops/gcd_meta_dispatch.h>
133
+ #include <ATen/ops/ge_meta_dispatch.h>
134
+ #include <ATen/ops/gelu_meta_dispatch.h>
135
+ #include <ATen/ops/gelu_backward_meta_dispatch.h>
136
+ #include <ATen/ops/geometric_meta_dispatch.h>
137
+ #include <ATen/ops/glu_meta_dispatch.h>
138
+ #include <ATen/ops/gt_meta_dispatch.h>
139
+ #include <ATen/ops/hardshrink_meta_dispatch.h>
140
+ #include <ATen/ops/hardshrink_backward_meta_dispatch.h>
141
+ #include <ATen/ops/hardsigmoid_meta_dispatch.h>
142
+ #include <ATen/ops/hardsigmoid_backward_meta_dispatch.h>
143
+ #include <ATen/ops/hardswish_meta_dispatch.h>
144
+ #include <ATen/ops/hardtanh_meta_dispatch.h>
145
+ #include <ATen/ops/heaviside_meta_dispatch.h>
146
+ #include <ATen/ops/hypot_meta_dispatch.h>
147
+ #include <ATen/ops/i0_meta_dispatch.h>
148
+ #include <ATen/ops/igamma_meta_dispatch.h>
149
+ #include <ATen/ops/igammac_meta_dispatch.h>
150
+ #include <ATen/ops/index_meta_dispatch.h>
151
+ #include <ATen/ops/index_add_meta_dispatch.h>
152
+ #include <ATen/ops/index_copy_meta_dispatch.h>
153
+ #include <ATen/ops/index_fill_meta_dispatch.h>
154
+ #include <ATen/ops/index_reduce_meta_dispatch.h>
155
+ #include <ATen/ops/isin_meta_dispatch.h>
156
+ #include <ATen/ops/isneginf_meta_dispatch.h>
157
+ #include <ATen/ops/isposinf_meta_dispatch.h>
158
+ #include <ATen/ops/lcm_meta_dispatch.h>
159
+ #include <ATen/ops/le_meta_dispatch.h>
160
+ #include <ATen/ops/leaky_relu_meta_dispatch.h>
161
+ #include <ATen/ops/leaky_relu_backward_meta_dispatch.h>
162
+ #include <ATen/ops/lerp_meta_dispatch.h>
163
+ #include <ATen/ops/lgamma_meta_dispatch.h>
164
+ #include <ATen/ops/linalg_cholesky_ex_meta_dispatch.h>
165
+ #include <ATen/ops/linalg_cross_meta_dispatch.h>
166
+ #include <ATen/ops/linalg_inv_ex_meta_dispatch.h>
167
+ #include <ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h>
168
+ #include <ATen/ops/linalg_ldl_solve_meta_dispatch.h>
169
+ #include <ATen/ops/linalg_lu_meta_dispatch.h>
170
+ #include <ATen/ops/linalg_lu_factor_ex_meta_dispatch.h>
171
+ #include <ATen/ops/linalg_lu_solve_meta_dispatch.h>
172
+ #include <ATen/ops/linalg_qr_meta_dispatch.h>
173
+ #include <ATen/ops/linalg_vector_norm_meta_dispatch.h>
174
+ #include <ATen/ops/linspace_meta_dispatch.h>
175
+ #include <ATen/ops/log_meta_dispatch.h>
176
+ #include <ATen/ops/log10_meta_dispatch.h>
177
+ #include <ATen/ops/log1p_meta_dispatch.h>
178
+ #include <ATen/ops/log2_meta_dispatch.h>
179
+ #include <ATen/ops/log_normal_meta_dispatch.h>
180
+ #include <ATen/ops/logaddexp_meta_dispatch.h>
181
+ #include <ATen/ops/logaddexp2_meta_dispatch.h>
182
+ #include <ATen/ops/logit_meta_dispatch.h>
183
+ #include <ATen/ops/logit_backward_meta_dispatch.h>
184
+ #include <ATen/ops/logspace_meta_dispatch.h>
185
+ #include <ATen/ops/lshift_meta_dispatch.h>
186
+ #include <ATen/ops/lt_meta_dispatch.h>
187
+ #include <ATen/ops/lu_unpack_meta_dispatch.h>
188
+ #include <ATen/ops/masked_fill_meta_dispatch.h>
189
+ #include <ATen/ops/masked_scatter_meta_dispatch.h>
190
+ #include <ATen/ops/max_meta_dispatch.h>
191
+ #include <ATen/ops/max_pool2d_with_indices_meta_dispatch.h>
192
+ #include <ATen/ops/max_pool2d_with_indices_backward_meta_dispatch.h>
193
+ #include <ATen/ops/maximum_meta_dispatch.h>
194
+ #include <ATen/ops/mean_meta_dispatch.h>
195
+ #include <ATen/ops/min_meta_dispatch.h>
196
+ #include <ATen/ops/minimum_meta_dispatch.h>
197
+ #include <ATen/ops/mish_meta_dispatch.h>
198
+ #include <ATen/ops/mm_meta_dispatch.h>
199
+ #include <ATen/ops/mse_loss_meta_dispatch.h>
200
+ #include <ATen/ops/mul_meta_dispatch.h>
201
+ #include <ATen/ops/ne_meta_dispatch.h>
202
+ #include <ATen/ops/neg_meta_dispatch.h>
203
+ #include <ATen/ops/nextafter_meta_dispatch.h>
204
+ #include <ATen/ops/nll_loss_backward_meta_dispatch.h>
205
+ #include <ATen/ops/nll_loss_forward_meta_dispatch.h>
206
+ #include <ATen/ops/norm_meta_dispatch.h>
207
+ #include <ATen/ops/normal_meta_dispatch.h>
208
+ #include <ATen/ops/polygamma_meta_dispatch.h>
209
+ #include <ATen/ops/pow_meta_dispatch.h>
210
+ #include <ATen/ops/prod_meta_dispatch.h>
211
+ #include <ATen/ops/put_meta_dispatch.h>
212
+ #include <ATen/ops/random_meta_dispatch.h>
213
+ #include <ATen/ops/range_meta_dispatch.h>
214
+ #include <ATen/ops/reciprocal_meta_dispatch.h>
215
+ #include <ATen/ops/reflection_pad1d_meta_dispatch.h>
216
+ #include <ATen/ops/reflection_pad1d_backward_meta_dispatch.h>
217
+ #include <ATen/ops/reflection_pad3d_meta_dispatch.h>
218
+ #include <ATen/ops/reflection_pad3d_backward_meta_dispatch.h>
219
+ #include <ATen/ops/relu_meta_dispatch.h>
220
+ #include <ATen/ops/remainder_meta_dispatch.h>
221
+ #include <ATen/ops/renorm_meta_dispatch.h>
222
+ #include <ATen/ops/replication_pad1d_meta_dispatch.h>
223
+ #include <ATen/ops/replication_pad1d_backward_meta_dispatch.h>
224
+ #include <ATen/ops/replication_pad2d_meta_dispatch.h>
225
+ #include <ATen/ops/replication_pad3d_meta_dispatch.h>
226
+ #include <ATen/ops/resize_meta_dispatch.h>
227
+ #include <ATen/ops/resize_as_sparse_meta_dispatch.h>
228
+ #include <ATen/ops/round_meta_dispatch.h>
229
+ #include <ATen/ops/rrelu_with_noise_meta_dispatch.h>
230
+ #include <ATen/ops/rshift_meta_dispatch.h>
231
+ #include <ATen/ops/rsqrt_meta_dispatch.h>
232
+ #include <ATen/ops/scatter_meta_dispatch.h>
233
+ #include <ATen/ops/scatter_add_meta_dispatch.h>
234
+ #include <ATen/ops/scatter_reduce_meta_dispatch.h>
235
+ #include <ATen/ops/set_meta_dispatch.h>
236
+ #include <ATen/ops/sgn_meta_dispatch.h>
237
+ #include <ATen/ops/sigmoid_meta_dispatch.h>
238
+ #include <ATen/ops/sigmoid_backward_meta_dispatch.h>
239
+ #include <ATen/ops/sign_meta_dispatch.h>
240
+ #include <ATen/ops/signbit_meta_dispatch.h>
241
+ #include <ATen/ops/silu_meta_dispatch.h>
242
+ #include <ATen/ops/silu_backward_meta_dispatch.h>
243
+ #include <ATen/ops/sin_meta_dispatch.h>
244
+ #include <ATen/ops/sinc_meta_dispatch.h>
245
+ #include <ATen/ops/sinh_meta_dispatch.h>
246
+ #include <ATen/ops/slow_conv_transpose2d_meta_dispatch.h>
247
+ #include <ATen/ops/smooth_l1_loss_meta_dispatch.h>
248
+ #include <ATen/ops/softplus_meta_dispatch.h>
249
+ #include <ATen/ops/softplus_backward_meta_dispatch.h>
250
+ #include <ATen/ops/softshrink_meta_dispatch.h>
251
+ #include <ATen/ops/softshrink_backward_meta_dispatch.h>
252
+ #include <ATen/ops/sort_meta_dispatch.h>
253
+ #include <ATen/ops/sparse_resize_meta_dispatch.h>
254
+ #include <ATen/ops/sparse_resize_and_clear_meta_dispatch.h>
255
+ #include <ATen/ops/special_airy_ai_meta_dispatch.h>
256
+ #include <ATen/ops/special_bessel_j0_meta_dispatch.h>
257
+ #include <ATen/ops/special_bessel_j1_meta_dispatch.h>
258
+ #include <ATen/ops/special_bessel_y0_meta_dispatch.h>
259
+ #include <ATen/ops/special_bessel_y1_meta_dispatch.h>
260
+ #include <ATen/ops/special_chebyshev_polynomial_t_meta_dispatch.h>
261
+ #include <ATen/ops/special_chebyshev_polynomial_u_meta_dispatch.h>
262
+ #include <ATen/ops/special_chebyshev_polynomial_v_meta_dispatch.h>
263
+ #include <ATen/ops/special_chebyshev_polynomial_w_meta_dispatch.h>
264
+ #include <ATen/ops/special_entr_meta_dispatch.h>
265
+ #include <ATen/ops/special_erfcx_meta_dispatch.h>
266
+ #include <ATen/ops/special_hermite_polynomial_h_meta_dispatch.h>
267
+ #include <ATen/ops/special_hermite_polynomial_he_meta_dispatch.h>
268
+ #include <ATen/ops/special_i0e_meta_dispatch.h>
269
+ #include <ATen/ops/special_i1_meta_dispatch.h>
270
+ #include <ATen/ops/special_i1e_meta_dispatch.h>
271
+ #include <ATen/ops/special_laguerre_polynomial_l_meta_dispatch.h>
272
+ #include <ATen/ops/special_legendre_polynomial_p_meta_dispatch.h>
273
+ #include <ATen/ops/special_log_ndtr_meta_dispatch.h>
274
+ #include <ATen/ops/special_modified_bessel_i0_meta_dispatch.h>
275
+ #include <ATen/ops/special_modified_bessel_i1_meta_dispatch.h>
276
+ #include <ATen/ops/special_modified_bessel_k0_meta_dispatch.h>
277
+ #include <ATen/ops/special_modified_bessel_k1_meta_dispatch.h>
278
+ #include <ATen/ops/special_ndtri_meta_dispatch.h>
279
+ #include <ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h>
280
+ #include <ATen/ops/special_scaled_modified_bessel_k1_meta_dispatch.h>
281
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_meta_dispatch.h>
282
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h>
283
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_meta_dispatch.h>
284
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_meta_dispatch.h>
285
+ #include <ATen/ops/special_spherical_bessel_j0_meta_dispatch.h>
286
+ #include <ATen/ops/special_xlog1py_meta_dispatch.h>
287
+ #include <ATen/ops/special_zeta_meta_dispatch.h>
288
+ #include <ATen/ops/sqrt_meta_dispatch.h>
289
+ #include <ATen/ops/sub_meta_dispatch.h>
290
+ #include <ATen/ops/sum_meta_dispatch.h>
291
+ #include <ATen/ops/tan_meta_dispatch.h>
292
+ #include <ATen/ops/tanh_meta_dispatch.h>
293
+ #include <ATen/ops/tanh_backward_meta_dispatch.h>
294
+ #include <ATen/ops/threshold_meta_dispatch.h>
295
+ #include <ATen/ops/threshold_backward_meta_dispatch.h>
296
+ #include <ATen/ops/topk_meta_dispatch.h>
297
+ #include <ATen/ops/triangular_solve_meta_dispatch.h>
298
+ #include <ATen/ops/tril_meta_dispatch.h>
299
+ #include <ATen/ops/triu_meta_dispatch.h>
300
+ #include <ATen/ops/trunc_meta_dispatch.h>
301
+ #include <ATen/ops/unfold_meta_dispatch.h>
302
+ #include <ATen/ops/uniform_meta_dispatch.h>
303
+ #include <ATen/ops/upsample_bicubic2d_meta_dispatch.h>
304
+ #include <ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h>
305
+ #include <ATen/ops/upsample_bilinear2d_meta_dispatch.h>
306
+ #include <ATen/ops/upsample_bilinear2d_backward_meta_dispatch.h>
307
+ #include <ATen/ops/upsample_linear1d_meta_dispatch.h>
308
+ #include <ATen/ops/upsample_linear1d_backward_meta_dispatch.h>
309
+ #include <ATen/ops/upsample_nearest1d_meta_dispatch.h>
310
+ #include <ATen/ops/upsample_nearest1d_backward_meta_dispatch.h>
311
+ #include <ATen/ops/upsample_nearest2d_meta_dispatch.h>
312
+ #include <ATen/ops/upsample_nearest2d_backward_meta_dispatch.h>
313
+ #include <ATen/ops/upsample_nearest3d_meta_dispatch.h>
314
+ #include <ATen/ops/upsample_nearest3d_backward_meta_dispatch.h>
315
+ #include <ATen/ops/upsample_trilinear3d_meta_dispatch.h>
316
+ #include <ATen/ops/upsample_trilinear3d_backward_meta_dispatch.h>
317
+ #include <ATen/ops/view_meta_dispatch.h>
318
+ #include <ATen/ops/view_as_complex_meta_dispatch.h>
319
+ #include <ATen/ops/view_as_real_meta_dispatch.h>
320
+ #include <ATen/ops/xlogy_meta_dispatch.h>
321
+ #include <ATen/ops/zero_meta_dispatch.h>
322
+
323
+
324
+
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/NamedTensor.h>
3
+ #include <ATen/TensorNames.h>
4
+ #include <ATen/WrapDimUtilsMulti.h>
5
+
6
+ #include <ATen/core/DimVector.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <functional>
9
+
10
+ namespace at {
11
+
12
+ using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
13
+
14
+ inline bool has_names(const ITensorListRef& tensors) {
15
+ return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) {
16
+ return t.has_names();
17
+ });
18
+ }
19
+
20
+ // Converts dim to an positional index. Errors if `dim` cannot be used to
21
+ // refer to any dimension of tensor.
22
+ TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
23
+ TORCH_API std::vector<int64_t> dimnames_to_positions(
24
+ const Tensor& tensor,
25
+ DimnameList dims);
26
+
27
+ // Unifies two DimnameList to produce a third. This is useful for implementing
28
+ // the named inference rule for binary broadcasting operations like add.
29
+ //
30
+ // There are three main constraints:
31
+ // 1) Check matching: Names must match positionally from the right.
32
+ // 2) Check misaligned: If a name `n` is in `names`, then it must appear at
33
+ // the same index from the right in other.
34
+ // 3) The output names are obtained by unifying the names individually from the
35
+ // right.
36
+ TORCH_API std::vector<Dimname> unify_from_right(
37
+ DimnameList names,
38
+ DimnameList other,
39
+ const char* action = "broadcast");
40
+
41
+ [[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
42
+ TORCH_CHECK(
43
+ false,
44
+ op_name,
45
+ ": You passed a dimname (string) to this op in place of a dimension "
46
+ "index but it does not yet support this behavior. Please pass a dimension "
47
+ "index to work around this.");
48
+ }
49
+
50
+ // [NOTE] Writing name inference rules
51
+ //
52
+ // Operators that support named tensors are either composed of operations that
53
+ // support named tensors or implement some name inference rule. An op that
54
+ // implements its own name inference rule generally looks like the following:
55
+ //
56
+ // Tensor op(...) {
57
+ // perform_shape_checks(...);
58
+ // # (1)
59
+ // auto maybe_outnames = compute_outnames(...);
60
+ // auto result = [&]() {
61
+ // NoNamesGuard guard;
62
+ // return op_impl(...);
63
+ // }();
64
+ // # (2)
65
+ // propagate_names_if_nonempty(result, maybe_outnames);
66
+ //
67
+ // Each op has (1) a compute outnames step and (2) a propagate names step.
68
+ //
69
+ // compute_outnames is responsible for checking that input names match and
70
+ // determining what the output names should be. It returns either:
71
+ // - {} (if the inputs tensors are all unnamed)
72
+ // - non-empty outnames.
73
+ //
74
+ // propagate_names_if_nonempty propagates the outnames if they exist to the
75
+ // result tensors.
76
+ //
77
+ // The {} case is an optimization; if the user does not use named tensors they
78
+ // pay no perf cost for it.
79
+
80
+ namespace namedinference {
81
+
82
+ const Tensor& propagate_names_if_present_and_nonempty(
83
+ const Tensor& result,
84
+ c10::optional<DimnameList> maybe_names,
85
+ bool validate_names = false);
86
+ // Propagates `names` to `result` if `names` is not empty.
87
+ // `names` can be empty; see [NOTE] Writing name inference rules
88
+ // If `names` is not empty, `names.size()` should equal `result.dim()`.
89
+ // When in doubt, use this overload instead of the others.
90
+ TORCH_API const Tensor& propagate_names_if_nonempty(
91
+ const Tensor& result,
92
+ DimnameList maybe_names,
93
+ bool validate_names = false);
94
+
95
+ // Propagates `names` to `result`. Only use this if we are certain that there
96
+ // are names to propagate (that names is not empty).
97
+ TORCH_API const Tensor& propagate_names(
98
+ const Tensor& result,
99
+ DimnameList names,
100
+ bool validate_names = false);
101
+
102
+ // Propagates all names from src to result.
103
+ TORCH_API void propagate_names(const Tensor& result, const Tensor& src);
104
+
105
+ // Propagates all names except for those at the excluded_idxs.
106
+ TORCH_API void propagate_names_except(
107
+ const Tensor& result,
108
+ const Tensor& src,
109
+ IntArrayRef excluded_idxs);
110
+
111
+ // Used for reduction ops that have a `keepdim` arg.
112
+ TORCH_API void propagate_names_for_reduction(
113
+ const Tensor& result,
114
+ const Tensor& src,
115
+ IntArrayRef excluded_idxs,
116
+ bool keepdim);
117
+
118
+ TORCH_API void propagate_names_for_expand(
119
+ const Tensor& result,
120
+ const Tensor& self);
121
+
122
+ TORCH_API std::vector<Dimname> compute_cat_outnames(
123
+ const MaterializedITensorListRef& tensors);
124
+
125
+ TORCH_API std::vector<Dimname> compute_broadcast_outnames(
126
+ const Tensor& self,
127
+ const Tensor& other);
128
+
129
+ TORCH_API std::vector<Dimname> broadcast_to_outnames(
130
+ const Tensor& tensor,
131
+ const Tensor& reference_tensor,
132
+ const char* op_name);
133
+
134
+ TORCH_API std::vector<Dimname> compute_matmul_outnames(
135
+ const Tensor& self,
136
+ const Tensor& other);
137
+
138
+ TORCH_API std::vector<Dimname> compute_cdist_outnames(
139
+ const Tensor& self,
140
+ const Tensor& other);
141
+
142
+ TORCH_API std::vector<Dimname> compute_bmm_outnames(
143
+ const Tensor& result,
144
+ const Tensor& self,
145
+ const Tensor& other);
146
+
147
+ TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
148
+ TORCH_API std::vector<Dimname> compute_squeeze_outnames(
149
+ const Tensor& tensor,
150
+ std::bitset<dim_bitset_size> dims);
151
+
152
+ std::vector<Dimname> compute_diagonal_outnames(
153
+ const Tensor& tensor,
154
+ int64_t dim1,
155
+ int64_t dim2);
156
+
157
+ // TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
158
+
159
+ TORCH_API TensorImpl* propagate_names_if_nonempty(
160
+ TensorImpl* result,
161
+ DimnameList maybe_names,
162
+ bool validate_names = false);
163
+
164
+ TORCH_API TensorImpl* propagate_names(
165
+ TensorImpl* result,
166
+ DimnameList names,
167
+ bool validate_names = false);
168
+
169
+ TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
170
+
171
+ TORCH_API inline void propagate_names(
172
+ const TensorBase& result,
173
+ DimnameList names,
174
+ bool validate_names = false) {
175
+ propagate_names(result.unsafeGetTensorImpl(), names, validate_names);
176
+ }
177
+
178
+ TORCH_API inline void propagate_names_if_nonempty(
179
+ const TensorBase& result,
180
+ DimnameList names,
181
+ bool validate_names = false) {
182
+ propagate_names_if_nonempty(
183
+ result.unsafeGetTensorImpl(), names, validate_names);
184
+ }
185
+
186
+ TORCH_API inline void propagate_names(
187
+ const TensorBase& result,
188
+ const TensorBase& src) {
189
+ propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl());
190
+ }
191
+
192
+ // result = m1 @ m2 + bias
193
+ TORCH_API std::vector<Dimname> propagate_names_for_addmm(
194
+ const Tensor& m1,
195
+ const Tensor& m2,
196
+ const Tensor& bias);
197
+
198
+ TORCH_API std::vector<Dimname> propagate_names_for_addmv(
199
+ const Tensor& mat,
200
+ const Tensor& vec,
201
+ const Tensor& bias);
202
+
203
+ TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
204
+
205
+ TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
206
+ const Tensor& result,
207
+ const Tensor& self,
208
+ const Tensor& other,
209
+ const Tensor& bias);
210
+
211
+ TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
212
+
213
+ } // namespace namedinference
214
+
215
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NativeMetaFunctions.h ADDED
@@ -0,0 +1,1281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunctions.h
4
+
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/core/IListRef.h>
7
+ #include <ATen/TensorMeta.h>
8
+ #include <ATen/TensorIterator.h>
9
+
10
+ #include <ATen/ops/_adaptive_avg_pool2d_meta.h>
11
+ #include <ATen/ops/_adaptive_avg_pool2d_backward_meta.h>
12
+ #include <ATen/ops/_adaptive_avg_pool3d_meta.h>
13
+ #include <ATen/ops/_adaptive_avg_pool3d_backward_meta.h>
14
+ #include <ATen/ops/_add_batch_dim_meta.h>
15
+ #include <ATen/ops/_add_relu_meta.h>
16
+ #include <ATen/ops/_addmm_activation_meta.h>
17
+ #include <ATen/ops/_aminmax_meta.h>
18
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_meta.h>
19
+ #include <ATen/ops/_amp_update_scale_meta.h>
20
+ #include <ATen/ops/_assert_async_meta.h>
21
+ #include <ATen/ops/_assert_tensor_metadata_meta.h>
22
+ #include <ATen/ops/_autocast_to_full_precision_meta.h>
23
+ #include <ATen/ops/_autocast_to_reduced_precision_meta.h>
24
+ #include <ATen/ops/_backward_meta.h>
25
+ #include <ATen/ops/_batch_norm_impl_index_meta.h>
26
+ #include <ATen/ops/_batch_norm_impl_index_backward_meta.h>
27
+ #include <ATen/ops/_cast_Byte_meta.h>
28
+ #include <ATen/ops/_cast_Char_meta.h>
29
+ #include <ATen/ops/_cast_Double_meta.h>
30
+ #include <ATen/ops/_cast_Float_meta.h>
31
+ #include <ATen/ops/_cast_Half_meta.h>
32
+ #include <ATen/ops/_cast_Int_meta.h>
33
+ #include <ATen/ops/_cast_Long_meta.h>
34
+ #include <ATen/ops/_cast_Short_meta.h>
35
+ #include <ATen/ops/_cdist_backward_meta.h>
36
+ #include <ATen/ops/_cdist_forward_meta.h>
37
+ #include <ATen/ops/_cholesky_solve_helper_meta.h>
38
+ #include <ATen/ops/_choose_qparams_per_tensor_meta.h>
39
+ #include <ATen/ops/_coalesce_meta.h>
40
+ #include <ATen/ops/_coalesced_meta.h>
41
+ #include <ATen/ops/_compute_linear_combination_meta.h>
42
+ #include <ATen/ops/_conj_meta.h>
43
+ #include <ATen/ops/_conj_copy_meta.h>
44
+ #include <ATen/ops/_conj_physical_meta.h>
45
+ #include <ATen/ops/_conv_depthwise2d_meta.h>
46
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_meta.h>
47
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_meta.h>
48
+ #include <ATen/ops/_convert_weight_to_int4pack_meta.h>
49
+ #include <ATen/ops/_convolution_meta.h>
50
+ #include <ATen/ops/_convolution_double_backward_meta.h>
51
+ #include <ATen/ops/_convolution_mode_meta.h>
52
+ #include <ATen/ops/_copy_from_meta.h>
53
+ #include <ATen/ops/_copy_from_and_resize_meta.h>
54
+ #include <ATen/ops/_cslt_compress_meta.h>
55
+ #include <ATen/ops/_cslt_sparse_mm_meta.h>
56
+ #include <ATen/ops/_ctc_loss_meta.h>
57
+ #include <ATen/ops/_ctc_loss_backward_meta.h>
58
+ #include <ATen/ops/_cudnn_ctc_loss_meta.h>
59
+ #include <ATen/ops/_cudnn_init_dropout_state_meta.h>
60
+ #include <ATen/ops/_cudnn_rnn_meta.h>
61
+ #include <ATen/ops/_cudnn_rnn_backward_meta.h>
62
+ #include <ATen/ops/_cudnn_rnn_flatten_weight_meta.h>
63
+ #include <ATen/ops/_cufft_clear_plan_cache_meta.h>
64
+ #include <ATen/ops/_cufft_get_plan_cache_max_size_meta.h>
65
+ #include <ATen/ops/_cufft_get_plan_cache_size_meta.h>
66
+ #include <ATen/ops/_cufft_set_plan_cache_max_size_meta.h>
67
+ #include <ATen/ops/_cummax_helper_meta.h>
68
+ #include <ATen/ops/_cummin_helper_meta.h>
69
+ #include <ATen/ops/_debug_has_internal_overlap_meta.h>
70
+ #include <ATen/ops/_dimI_meta.h>
71
+ #include <ATen/ops/_dimV_meta.h>
72
+ #include <ATen/ops/_dim_arange_meta.h>
73
+ #include <ATen/ops/_dirichlet_grad_meta.h>
74
+ #include <ATen/ops/_efficient_attention_backward_meta.h>
75
+ #include <ATen/ops/_efficient_attention_forward_meta.h>
76
+ #include <ATen/ops/_efficientzerotensor_meta.h>
77
+ #include <ATen/ops/_embedding_bag_meta.h>
78
+ #include <ATen/ops/_embedding_bag_backward_meta.h>
79
+ #include <ATen/ops/_embedding_bag_dense_backward_meta.h>
80
+ #include <ATen/ops/_embedding_bag_forward_only_meta.h>
81
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward_meta.h>
82
+ #include <ATen/ops/_embedding_bag_sparse_backward_meta.h>
83
+ #include <ATen/ops/_empty_affine_quantized_meta.h>
84
+ #include <ATen/ops/_empty_per_channel_affine_quantized_meta.h>
85
+ #include <ATen/ops/_euclidean_dist_meta.h>
86
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_meta.h>
87
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_meta.h>
88
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_meta.h>
89
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_meta.h>
90
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_meta.h>
91
+ #include <ATen/ops/_fft_c2c_meta.h>
92
+ #include <ATen/ops/_fft_c2r_meta.h>
93
+ #include <ATen/ops/_fft_r2c_meta.h>
94
+ #include <ATen/ops/_fill_mem_eff_dropout_mask_meta.h>
95
+ #include <ATen/ops/_flash_attention_backward_meta.h>
96
+ #include <ATen/ops/_flash_attention_forward_meta.h>
97
+ #include <ATen/ops/_foobar_meta.h>
98
+ #include <ATen/ops/_foreach_abs_meta.h>
99
+ #include <ATen/ops/_foreach_acos_meta.h>
100
+ #include <ATen/ops/_foreach_add_meta.h>
101
+ #include <ATen/ops/_foreach_addcdiv_meta.h>
102
+ #include <ATen/ops/_foreach_addcmul_meta.h>
103
+ #include <ATen/ops/_foreach_asin_meta.h>
104
+ #include <ATen/ops/_foreach_atan_meta.h>
105
+ #include <ATen/ops/_foreach_ceil_meta.h>
106
+ #include <ATen/ops/_foreach_clamp_max_meta.h>
107
+ #include <ATen/ops/_foreach_clamp_min_meta.h>
108
+ #include <ATen/ops/_foreach_copy_meta.h>
109
+ #include <ATen/ops/_foreach_cos_meta.h>
110
+ #include <ATen/ops/_foreach_cosh_meta.h>
111
+ #include <ATen/ops/_foreach_div_meta.h>
112
+ #include <ATen/ops/_foreach_erf_meta.h>
113
+ #include <ATen/ops/_foreach_erfc_meta.h>
114
+ #include <ATen/ops/_foreach_exp_meta.h>
115
+ #include <ATen/ops/_foreach_expm1_meta.h>
116
+ #include <ATen/ops/_foreach_floor_meta.h>
117
+ #include <ATen/ops/_foreach_frac_meta.h>
118
+ #include <ATen/ops/_foreach_lerp_meta.h>
119
+ #include <ATen/ops/_foreach_lgamma_meta.h>
120
+ #include <ATen/ops/_foreach_log_meta.h>
121
+ #include <ATen/ops/_foreach_log10_meta.h>
122
+ #include <ATen/ops/_foreach_log1p_meta.h>
123
+ #include <ATen/ops/_foreach_log2_meta.h>
124
+ #include <ATen/ops/_foreach_maximum_meta.h>
125
+ #include <ATen/ops/_foreach_minimum_meta.h>
126
+ #include <ATen/ops/_foreach_mul_meta.h>
127
+ #include <ATen/ops/_foreach_neg_meta.h>
128
+ #include <ATen/ops/_foreach_norm_meta.h>
129
+ #include <ATen/ops/_foreach_pow_meta.h>
130
+ #include <ATen/ops/_foreach_reciprocal_meta.h>
131
+ #include <ATen/ops/_foreach_round_meta.h>
132
+ #include <ATen/ops/_foreach_sigmoid_meta.h>
133
+ #include <ATen/ops/_foreach_sign_meta.h>
134
+ #include <ATen/ops/_foreach_sin_meta.h>
135
+ #include <ATen/ops/_foreach_sinh_meta.h>
136
+ #include <ATen/ops/_foreach_sqrt_meta.h>
137
+ #include <ATen/ops/_foreach_sub_meta.h>
138
+ #include <ATen/ops/_foreach_tan_meta.h>
139
+ #include <ATen/ops/_foreach_tanh_meta.h>
140
+ #include <ATen/ops/_foreach_trunc_meta.h>
141
+ #include <ATen/ops/_foreach_zero_meta.h>
142
+ #include <ATen/ops/_functional_assert_async_meta.h>
143
+ #include <ATen/ops/_functional_sym_constrain_range_meta.h>
144
+ #include <ATen/ops/_functional_sym_constrain_range_for_size_meta.h>
145
+ #include <ATen/ops/_fused_adam_meta.h>
146
+ #include <ATen/ops/_fused_adamw_meta.h>
147
+ #include <ATen/ops/_fused_dropout_meta.h>
148
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper_meta.h>
149
+ #include <ATen/ops/_fused_sdp_choice_meta.h>
150
+ #include <ATen/ops/_fw_primal_meta.h>
151
+ #include <ATen/ops/_fw_primal_copy_meta.h>
152
+ #include <ATen/ops/_gather_sparse_backward_meta.h>
153
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_meta.h>
154
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_meta.h>
155
+ #include <ATen/ops/_has_compatible_shallow_copy_type_meta.h>
156
+ #include <ATen/ops/_has_same_storage_numel_meta.h>
157
+ #include <ATen/ops/_histogramdd_bin_edges_meta.h>
158
+ #include <ATen/ops/_histogramdd_from_bin_cts_meta.h>
159
+ #include <ATen/ops/_histogramdd_from_bin_tensors_meta.h>
160
+ #include <ATen/ops/_index_put_impl_meta.h>
161
+ #include <ATen/ops/_indices_meta.h>
162
+ #include <ATen/ops/_indices_copy_meta.h>
163
+ #include <ATen/ops/_int_mm_meta.h>
164
+ #include <ATen/ops/_is_all_true_meta.h>
165
+ #include <ATen/ops/_is_any_true_meta.h>
166
+ #include <ATen/ops/_is_zerotensor_meta.h>
167
+ #include <ATen/ops/_linalg_check_errors_meta.h>
168
+ #include <ATen/ops/_linalg_det_meta.h>
169
+ #include <ATen/ops/_linalg_eigh_meta.h>
170
+ #include <ATen/ops/_linalg_slogdet_meta.h>
171
+ #include <ATen/ops/_linalg_solve_ex_meta.h>
172
+ #include <ATen/ops/_linalg_svd_meta.h>
173
+ #include <ATen/ops/_local_scalar_dense_meta.h>
174
+ #include <ATen/ops/_log_softmax_meta.h>
175
+ #include <ATen/ops/_log_softmax_backward_data_meta.h>
176
+ #include <ATen/ops/_logcumsumexp_meta.h>
177
+ #include <ATen/ops/_lstm_mps_meta.h>
178
+ #include <ATen/ops/_lu_with_info_meta.h>
179
+ #include <ATen/ops/_make_dep_token_meta.h>
180
+ #include <ATen/ops/_make_dual_meta.h>
181
+ #include <ATen/ops/_make_dual_copy_meta.h>
182
+ #include <ATen/ops/_make_per_channel_quantized_tensor_meta.h>
183
+ #include <ATen/ops/_make_per_tensor_quantized_tensor_meta.h>
184
+ #include <ATen/ops/_masked_scale_meta.h>
185
+ #include <ATen/ops/_masked_softmax_meta.h>
186
+ #include <ATen/ops/_masked_softmax_backward_meta.h>
187
+ #include <ATen/ops/_mixed_dtypes_linear_meta.h>
188
+ #include <ATen/ops/_mkldnn_reshape_meta.h>
189
+ #include <ATen/ops/_mkldnn_transpose_meta.h>
190
+ #include <ATen/ops/_mps_convolution_meta.h>
191
+ #include <ATen/ops/_mps_convolution_transpose_meta.h>
192
+ #include <ATen/ops/_native_batch_norm_legit_meta.h>
193
+ #include <ATen/ops/_native_batch_norm_legit_no_training_meta.h>
194
+ #include <ATen/ops/_native_multi_head_attention_meta.h>
195
+ #include <ATen/ops/_neg_view_meta.h>
196
+ #include <ATen/ops/_neg_view_copy_meta.h>
197
+ #include <ATen/ops/_nested_from_padded_meta.h>
198
+ #include <ATen/ops/_nested_from_padded_and_nested_example_meta.h>
199
+ #include <ATen/ops/_nested_select_backward_meta.h>
200
+ #include <ATen/ops/_nested_sum_backward_meta.h>
201
+ #include <ATen/ops/_nested_tensor_from_mask_meta.h>
202
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned_meta.h>
203
+ #include <ATen/ops/_nested_tensor_from_tensor_list_meta.h>
204
+ #include <ATen/ops/_nested_tensor_size_meta.h>
205
+ #include <ATen/ops/_nested_tensor_softmax_with_shape_meta.h>
206
+ #include <ATen/ops/_nested_tensor_storage_offsets_meta.h>
207
+ #include <ATen/ops/_nested_tensor_strides_meta.h>
208
+ #include <ATen/ops/_nested_view_from_buffer_meta.h>
209
+ #include <ATen/ops/_nested_view_from_buffer_copy_meta.h>
210
+ #include <ATen/ops/_new_zeros_with_same_feature_meta_meta.h>
211
+ #include <ATen/ops/_nnpack_available_meta.h>
212
+ #include <ATen/ops/_nnpack_spatial_convolution_meta.h>
213
+ #include <ATen/ops/_nnz_meta.h>
214
+ #include <ATen/ops/_pack_padded_sequence_meta.h>
215
+ #include <ATen/ops/_pack_padded_sequence_backward_meta.h>
216
+ #include <ATen/ops/_pad_circular_meta.h>
217
+ #include <ATen/ops/_pad_enum_meta.h>
218
+ #include <ATen/ops/_pad_packed_sequence_meta.h>
219
+ #include <ATen/ops/_pdist_backward_meta.h>
220
+ #include <ATen/ops/_pdist_forward_meta.h>
221
+ #include <ATen/ops/_pin_memory_meta.h>
222
+ #include <ATen/ops/_prelu_kernel_meta.h>
223
+ #include <ATen/ops/_prelu_kernel_backward_meta.h>
224
+ #include <ATen/ops/_propagate_xla_data_meta.h>
225
+ #include <ATen/ops/_remove_batch_dim_meta.h>
226
+ #include <ATen/ops/_reshape_alias_meta.h>
227
+ #include <ATen/ops/_reshape_alias_copy_meta.h>
228
+ #include <ATen/ops/_reshape_copy_meta.h>
229
+ #include <ATen/ops/_reshape_from_tensor_meta.h>
230
+ #include <ATen/ops/_resize_output_meta.h>
231
+ #include <ATen/ops/_rowwise_prune_meta.h>
232
+ #include <ATen/ops/_sample_dirichlet_meta.h>
233
+ #include <ATen/ops/_saturate_weight_to_fp16_meta.h>
234
+ #include <ATen/ops/_scaled_dot_product_attention_math_meta.h>
235
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_meta.h>
236
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_backward_meta.h>
237
+ #include <ATen/ops/_scaled_dot_product_flash_attention_meta.h>
238
+ #include <ATen/ops/_scaled_dot_product_flash_attention_backward_meta.h>
239
+ #include <ATen/ops/_scaled_mm_meta.h>
240
+ #include <ATen/ops/_segment_reduce_backward_meta.h>
241
+ #include <ATen/ops/_shape_as_tensor_meta.h>
242
+ #include <ATen/ops/_slow_conv2d_backward_meta.h>
243
+ #include <ATen/ops/_slow_conv2d_forward_meta.h>
244
+ #include <ATen/ops/_sobol_engine_draw_meta.h>
245
+ #include <ATen/ops/_sobol_engine_ff_meta.h>
246
+ #include <ATen/ops/_sobol_engine_initialize_state_meta.h>
247
+ #include <ATen/ops/_sobol_engine_scramble_meta.h>
248
+ #include <ATen/ops/_softmax_meta.h>
249
+ #include <ATen/ops/_softmax_backward_data_meta.h>
250
+ #include <ATen/ops/_sparse_addmm_meta.h>
251
+ #include <ATen/ops/_sparse_broadcast_to_meta.h>
252
+ #include <ATen/ops/_sparse_broadcast_to_copy_meta.h>
253
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe_meta.h>
254
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe_meta.h>
255
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe_meta.h>
256
+ #include <ATen/ops/_sparse_coo_tensor_unsafe_meta.h>
257
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_meta.h>
258
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_meta.h>
259
+ #include <ATen/ops/_sparse_csc_tensor_unsafe_meta.h>
260
+ #include <ATen/ops/_sparse_csr_prod_meta.h>
261
+ #include <ATen/ops/_sparse_csr_sum_meta.h>
262
+ #include <ATen/ops/_sparse_csr_tensor_unsafe_meta.h>
263
+ #include <ATen/ops/_sparse_log_softmax_meta.h>
264
+ #include <ATen/ops/_sparse_log_softmax_backward_data_meta.h>
265
+ #include <ATen/ops/_sparse_mask_projection_meta.h>
266
+ #include <ATen/ops/_sparse_mm_meta.h>
267
+ #include <ATen/ops/_sparse_mm_reduce_impl_meta.h>
268
+ #include <ATen/ops/_sparse_mm_reduce_impl_backward_meta.h>
269
+ #include <ATen/ops/_sparse_semi_structured_linear_meta.h>
270
+ #include <ATen/ops/_sparse_softmax_meta.h>
271
+ #include <ATen/ops/_sparse_softmax_backward_data_meta.h>
272
+ #include <ATen/ops/_sparse_sparse_matmul_meta.h>
273
+ #include <ATen/ops/_sparse_sum_meta.h>
274
+ #include <ATen/ops/_sparse_sum_backward_meta.h>
275
+ #include <ATen/ops/_spdiags_meta.h>
276
+ #include <ATen/ops/_stack_meta.h>
277
+ #include <ATen/ops/_standard_gamma_meta.h>
278
+ #include <ATen/ops/_standard_gamma_grad_meta.h>
279
+ #include <ATen/ops/_test_ambiguous_defaults_meta.h>
280
+ #include <ATen/ops/_test_autograd_multiple_dispatch_meta.h>
281
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_meta.h>
282
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_meta.h>
283
+ #include <ATen/ops/_test_check_tensor_meta.h>
284
+ #include <ATen/ops/_test_functorch_fallback_meta.h>
285
+ #include <ATen/ops/_test_optional_filled_intlist_meta.h>
286
+ #include <ATen/ops/_test_optional_floatlist_meta.h>
287
+ #include <ATen/ops/_test_optional_intlist_meta.h>
288
+ #include <ATen/ops/_test_serialization_subcmul_meta.h>
289
+ #include <ATen/ops/_test_string_default_meta.h>
290
+ #include <ATen/ops/_test_warn_in_autograd_meta.h>
291
+ #include <ATen/ops/_thnn_differentiable_gru_cell_backward_meta.h>
292
+ #include <ATen/ops/_thnn_differentiable_lstm_cell_backward_meta.h>
293
+ #include <ATen/ops/_thnn_fused_gru_cell_meta.h>
294
+ #include <ATen/ops/_thnn_fused_gru_cell_backward_meta.h>
295
+ #include <ATen/ops/_thnn_fused_lstm_cell_meta.h>
296
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_meta.h>
297
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_meta.h>
298
+ #include <ATen/ops/_to_copy_meta.h>
299
+ #include <ATen/ops/_to_cpu_meta.h>
300
+ #include <ATen/ops/_to_dense_meta.h>
301
+ #include <ATen/ops/_to_sparse_meta.h>
302
+ #include <ATen/ops/_to_sparse_bsc_meta.h>
303
+ #include <ATen/ops/_to_sparse_bsr_meta.h>
304
+ #include <ATen/ops/_to_sparse_csc_meta.h>
305
+ #include <ATen/ops/_to_sparse_csr_meta.h>
306
+ #include <ATen/ops/_to_sparse_semi_structured_meta.h>
307
+ #include <ATen/ops/_transform_bias_rescale_qkv_meta.h>
308
+ #include <ATen/ops/_transformer_encoder_layer_fwd_meta.h>
309
+ #include <ATen/ops/_trilinear_meta.h>
310
+ #include <ATen/ops/_triton_multi_head_attention_meta.h>
311
+ #include <ATen/ops/_triton_scaled_dot_attention_meta.h>
312
+ #include <ATen/ops/_unique_meta.h>
313
+ #include <ATen/ops/_unique2_meta.h>
314
+ #include <ATen/ops/_unpack_dual_meta.h>
315
+ #include <ATen/ops/_unsafe_index_meta.h>
316
+ #include <ATen/ops/_unsafe_index_put_meta.h>
317
+ #include <ATen/ops/_unsafe_view_meta.h>
318
+ #include <ATen/ops/_upsample_bicubic2d_aa_meta.h>
319
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_meta.h>
320
+ #include <ATen/ops/_upsample_bilinear2d_aa_meta.h>
321
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_meta.h>
322
+ #include <ATen/ops/_upsample_nearest_exact1d_meta.h>
323
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_meta.h>
324
+ #include <ATen/ops/_upsample_nearest_exact2d_meta.h>
325
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_meta.h>
326
+ #include <ATen/ops/_upsample_nearest_exact3d_meta.h>
327
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_meta.h>
328
+ #include <ATen/ops/_use_cudnn_ctc_loss_meta.h>
329
+ #include <ATen/ops/_use_cudnn_rnn_flatten_weight_meta.h>
330
+ #include <ATen/ops/_validate_compressed_sparse_indices_meta.h>
331
+ #include <ATen/ops/_validate_sparse_bsc_tensor_args_meta.h>
332
+ #include <ATen/ops/_validate_sparse_bsr_tensor_args_meta.h>
333
+ #include <ATen/ops/_validate_sparse_compressed_tensor_args_meta.h>
334
+ #include <ATen/ops/_validate_sparse_coo_tensor_args_meta.h>
335
+ #include <ATen/ops/_validate_sparse_csc_tensor_args_meta.h>
336
+ #include <ATen/ops/_validate_sparse_csr_tensor_args_meta.h>
337
+ #include <ATen/ops/_values_meta.h>
338
+ #include <ATen/ops/_values_copy_meta.h>
339
+ #include <ATen/ops/_version_meta.h>
340
+ #include <ATen/ops/_weight_int4pack_mm_meta.h>
341
+ #include <ATen/ops/_weight_norm_meta.h>
342
+ #include <ATen/ops/_weight_norm_differentiable_backward_meta.h>
343
+ #include <ATen/ops/_weight_norm_interface_meta.h>
344
+ #include <ATen/ops/_weight_norm_interface_backward_meta.h>
345
+ #include <ATen/ops/abs_meta.h>
346
+ #include <ATen/ops/absolute_meta.h>
347
+ #include <ATen/ops/acos_meta.h>
348
+ #include <ATen/ops/acosh_meta.h>
349
+ #include <ATen/ops/adaptive_avg_pool1d_meta.h>
350
+ #include <ATen/ops/adaptive_avg_pool2d_meta.h>
351
+ #include <ATen/ops/adaptive_avg_pool3d_meta.h>
352
+ #include <ATen/ops/adaptive_avg_pool3d_backward_meta.h>
353
+ #include <ATen/ops/adaptive_max_pool1d_meta.h>
354
+ #include <ATen/ops/adaptive_max_pool2d_meta.h>
355
+ #include <ATen/ops/adaptive_max_pool2d_backward_meta.h>
356
+ #include <ATen/ops/adaptive_max_pool3d_meta.h>
357
+ #include <ATen/ops/adaptive_max_pool3d_backward_meta.h>
358
+ #include <ATen/ops/add_meta.h>
359
+ #include <ATen/ops/addbmm_meta.h>
360
+ #include <ATen/ops/addcdiv_meta.h>
361
+ #include <ATen/ops/addcmul_meta.h>
362
+ #include <ATen/ops/addmm_meta.h>
363
+ #include <ATen/ops/addmv_meta.h>
364
+ #include <ATen/ops/addr_meta.h>
365
+ #include <ATen/ops/adjoint_meta.h>
366
+ #include <ATen/ops/affine_grid_generator_meta.h>
367
+ #include <ATen/ops/affine_grid_generator_backward_meta.h>
368
+ #include <ATen/ops/alias_meta.h>
369
+ #include <ATen/ops/alias_copy_meta.h>
370
+ #include <ATen/ops/align_as_meta.h>
371
+ #include <ATen/ops/align_tensors_meta.h>
372
+ #include <ATen/ops/align_to_meta.h>
373
+ #include <ATen/ops/all_meta.h>
374
+ #include <ATen/ops/allclose_meta.h>
375
+ #include <ATen/ops/alpha_dropout_meta.h>
376
+ #include <ATen/ops/amax_meta.h>
377
+ #include <ATen/ops/amin_meta.h>
378
+ #include <ATen/ops/aminmax_meta.h>
379
+ #include <ATen/ops/and_meta.h>
380
+ #include <ATen/ops/angle_meta.h>
381
+ #include <ATen/ops/any_meta.h>
382
+ #include <ATen/ops/arange_meta.h>
383
+ #include <ATen/ops/arccos_meta.h>
384
+ #include <ATen/ops/arccosh_meta.h>
385
+ #include <ATen/ops/arcsin_meta.h>
386
+ #include <ATen/ops/arcsinh_meta.h>
387
+ #include <ATen/ops/arctan_meta.h>
388
+ #include <ATen/ops/arctan2_meta.h>
389
+ #include <ATen/ops/arctanh_meta.h>
390
+ #include <ATen/ops/argmax_meta.h>
391
+ #include <ATen/ops/argmin_meta.h>
392
+ #include <ATen/ops/argsort_meta.h>
393
+ #include <ATen/ops/argwhere_meta.h>
394
+ #include <ATen/ops/as_strided_meta.h>
395
+ #include <ATen/ops/as_strided_copy_meta.h>
396
+ #include <ATen/ops/as_strided_scatter_meta.h>
397
+ #include <ATen/ops/asin_meta.h>
398
+ #include <ATen/ops/asinh_meta.h>
399
+ #include <ATen/ops/atan_meta.h>
400
+ #include <ATen/ops/atan2_meta.h>
401
+ #include <ATen/ops/atanh_meta.h>
402
+ #include <ATen/ops/atleast_1d_meta.h>
403
+ #include <ATen/ops/atleast_2d_meta.h>
404
+ #include <ATen/ops/atleast_3d_meta.h>
405
+ #include <ATen/ops/avg_pool1d_meta.h>
406
+ #include <ATen/ops/avg_pool2d_meta.h>
407
+ #include <ATen/ops/avg_pool2d_backward_meta.h>
408
+ #include <ATen/ops/avg_pool3d_meta.h>
409
+ #include <ATen/ops/avg_pool3d_backward_meta.h>
410
+ #include <ATen/ops/baddbmm_meta.h>
411
+ #include <ATen/ops/bartlett_window_meta.h>
412
+ #include <ATen/ops/batch_norm_meta.h>
413
+ #include <ATen/ops/batch_norm_backward_elemt_meta.h>
414
+ #include <ATen/ops/batch_norm_backward_reduce_meta.h>
415
+ #include <ATen/ops/batch_norm_elemt_meta.h>
416
+ #include <ATen/ops/batch_norm_gather_stats_meta.h>
417
+ #include <ATen/ops/batch_norm_gather_stats_with_counts_meta.h>
418
+ #include <ATen/ops/batch_norm_stats_meta.h>
419
+ #include <ATen/ops/batch_norm_update_stats_meta.h>
420
+ #include <ATen/ops/bernoulli_meta.h>
421
+ #include <ATen/ops/bilinear_meta.h>
422
+ #include <ATen/ops/binary_cross_entropy_meta.h>
423
+ #include <ATen/ops/binary_cross_entropy_backward_meta.h>
424
+ #include <ATen/ops/binary_cross_entropy_with_logits_meta.h>
425
+ #include <ATen/ops/bincount_meta.h>
426
+ #include <ATen/ops/binomial_meta.h>
427
+ #include <ATen/ops/bitwise_and_meta.h>
428
+ #include <ATen/ops/bitwise_left_shift_meta.h>
429
+ #include <ATen/ops/bitwise_not_meta.h>
430
+ #include <ATen/ops/bitwise_or_meta.h>
431
+ #include <ATen/ops/bitwise_right_shift_meta.h>
432
+ #include <ATen/ops/bitwise_xor_meta.h>
433
+ #include <ATen/ops/blackman_window_meta.h>
434
+ #include <ATen/ops/block_diag_meta.h>
435
+ #include <ATen/ops/bmm_meta.h>
436
+ #include <ATen/ops/broadcast_tensors_meta.h>
437
+ #include <ATen/ops/broadcast_to_meta.h>
438
+ #include <ATen/ops/bucketize_meta.h>
439
+ #include <ATen/ops/can_cast_meta.h>
440
+ #include <ATen/ops/cartesian_prod_meta.h>
441
+ #include <ATen/ops/cat_meta.h>
442
+ #include <ATen/ops/cauchy_meta.h>
443
+ #include <ATen/ops/ccol_indices_meta.h>
444
+ #include <ATen/ops/ccol_indices_copy_meta.h>
445
+ #include <ATen/ops/cdist_meta.h>
446
+ #include <ATen/ops/ceil_meta.h>
447
+ #include <ATen/ops/celu_meta.h>
448
+ #include <ATen/ops/chain_matmul_meta.h>
449
+ #include <ATen/ops/chalf_meta.h>
450
+ #include <ATen/ops/channel_shuffle_meta.h>
451
+ #include <ATen/ops/cholesky_meta.h>
452
+ #include <ATen/ops/cholesky_inverse_meta.h>
453
+ #include <ATen/ops/cholesky_solve_meta.h>
454
+ #include <ATen/ops/choose_qparams_optimized_meta.h>
455
+ #include <ATen/ops/chunk_meta.h>
456
+ #include <ATen/ops/clamp_meta.h>
457
+ #include <ATen/ops/clamp_max_meta.h>
458
+ #include <ATen/ops/clamp_min_meta.h>
459
+ #include <ATen/ops/clip_meta.h>
460
+ #include <ATen/ops/clone_meta.h>
461
+ #include <ATen/ops/coalesce_meta.h>
462
+ #include <ATen/ops/col2im_meta.h>
463
+ #include <ATen/ops/col_indices_meta.h>
464
+ #include <ATen/ops/col_indices_copy_meta.h>
465
+ #include <ATen/ops/column_stack_meta.h>
466
+ #include <ATen/ops/combinations_meta.h>
467
+ #include <ATen/ops/complex_meta.h>
468
+ #include <ATen/ops/concat_meta.h>
469
+ #include <ATen/ops/concatenate_meta.h>
470
+ #include <ATen/ops/conj_meta.h>
471
+ #include <ATen/ops/conj_physical_meta.h>
472
+ #include <ATen/ops/constant_pad_nd_meta.h>
473
+ #include <ATen/ops/contiguous_meta.h>
474
+ #include <ATen/ops/conv1d_meta.h>
475
+ #include <ATen/ops/conv2d_meta.h>
476
+ #include <ATen/ops/conv3d_meta.h>
477
+ #include <ATen/ops/conv_depthwise3d_meta.h>
478
+ #include <ATen/ops/conv_tbc_meta.h>
479
+ #include <ATen/ops/conv_tbc_backward_meta.h>
480
+ #include <ATen/ops/conv_transpose1d_meta.h>
481
+ #include <ATen/ops/conv_transpose2d_meta.h>
482
+ #include <ATen/ops/conv_transpose3d_meta.h>
483
+ #include <ATen/ops/convolution_meta.h>
484
+ #include <ATen/ops/convolution_backward_meta.h>
485
+ #include <ATen/ops/convolution_backward_overrideable_meta.h>
486
+ #include <ATen/ops/convolution_overrideable_meta.h>
487
+ #include <ATen/ops/copy_meta.h>
488
+ #include <ATen/ops/copy_sparse_to_sparse_meta.h>
489
+ #include <ATen/ops/copysign_meta.h>
490
+ #include <ATen/ops/corrcoef_meta.h>
491
+ #include <ATen/ops/cos_meta.h>
492
+ #include <ATen/ops/cosh_meta.h>
493
+ #include <ATen/ops/cosine_embedding_loss_meta.h>
494
+ #include <ATen/ops/cosine_similarity_meta.h>
495
+ #include <ATen/ops/count_nonzero_meta.h>
496
+ #include <ATen/ops/cov_meta.h>
497
+ #include <ATen/ops/cross_meta.h>
498
+ #include <ATen/ops/cross_entropy_loss_meta.h>
499
+ #include <ATen/ops/crow_indices_meta.h>
500
+ #include <ATen/ops/crow_indices_copy_meta.h>
501
+ #include <ATen/ops/ctc_loss_meta.h>
502
+ #include <ATen/ops/cudnn_affine_grid_generator_meta.h>
503
+ #include <ATen/ops/cudnn_affine_grid_generator_backward_meta.h>
504
+ #include <ATen/ops/cudnn_batch_norm_meta.h>
505
+ #include <ATen/ops/cudnn_batch_norm_backward_meta.h>
506
+ #include <ATen/ops/cudnn_convolution_meta.h>
507
+ #include <ATen/ops/cudnn_convolution_add_relu_meta.h>
508
+ #include <ATen/ops/cudnn_convolution_relu_meta.h>
509
+ #include <ATen/ops/cudnn_convolution_transpose_meta.h>
510
+ #include <ATen/ops/cudnn_grid_sampler_meta.h>
511
+ #include <ATen/ops/cudnn_grid_sampler_backward_meta.h>
512
+ #include <ATen/ops/cudnn_is_acceptable_meta.h>
513
+ #include <ATen/ops/cummax_meta.h>
514
+ #include <ATen/ops/cummaxmin_backward_meta.h>
515
+ #include <ATen/ops/cummin_meta.h>
516
+ #include <ATen/ops/cumprod_meta.h>
517
+ #include <ATen/ops/cumprod_backward_meta.h>
518
+ #include <ATen/ops/cumsum_meta.h>
519
+ #include <ATen/ops/cumulative_trapezoid_meta.h>
520
+ #include <ATen/ops/data_meta.h>
521
+ #include <ATen/ops/deg2rad_meta.h>
522
+ #include <ATen/ops/dense_dim_meta.h>
523
+ #include <ATen/ops/dequantize_meta.h>
524
+ #include <ATen/ops/det_meta.h>
525
+ #include <ATen/ops/detach_meta.h>
526
+ #include <ATen/ops/detach_copy_meta.h>
527
+ #include <ATen/ops/diag_meta.h>
528
+ #include <ATen/ops/diag_embed_meta.h>
529
+ #include <ATen/ops/diagflat_meta.h>
530
+ #include <ATen/ops/diagonal_meta.h>
531
+ #include <ATen/ops/diagonal_backward_meta.h>
532
+ #include <ATen/ops/diagonal_copy_meta.h>
533
+ #include <ATen/ops/diagonal_scatter_meta.h>
534
+ #include <ATen/ops/diff_meta.h>
535
+ #include <ATen/ops/digamma_meta.h>
536
+ #include <ATen/ops/dist_meta.h>
537
+ #include <ATen/ops/div_meta.h>
538
+ #include <ATen/ops/divide_meta.h>
539
+ #include <ATen/ops/dot_meta.h>
540
+ #include <ATen/ops/dropout_meta.h>
541
+ #include <ATen/ops/dsplit_meta.h>
542
+ #include <ATen/ops/dstack_meta.h>
543
+ #include <ATen/ops/einsum_meta.h>
544
+ #include <ATen/ops/elu_meta.h>
545
+ #include <ATen/ops/elu_backward_meta.h>
546
+ #include <ATen/ops/embedding_meta.h>
547
+ #include <ATen/ops/embedding_backward_meta.h>
548
+ #include <ATen/ops/embedding_bag_meta.h>
549
+ #include <ATen/ops/embedding_dense_backward_meta.h>
550
+ #include <ATen/ops/embedding_renorm_meta.h>
551
+ #include <ATen/ops/embedding_sparse_backward_meta.h>
552
+ #include <ATen/ops/empty_meta.h>
553
+ #include <ATen/ops/empty_like_meta.h>
554
+ #include <ATen/ops/empty_permuted_meta.h>
555
+ #include <ATen/ops/empty_quantized_meta.h>
556
+ #include <ATen/ops/empty_strided_meta.h>
557
+ #include <ATen/ops/eq_meta.h>
558
+ #include <ATen/ops/equal_meta.h>
559
+ #include <ATen/ops/erf_meta.h>
560
+ #include <ATen/ops/erfc_meta.h>
561
+ #include <ATen/ops/erfinv_meta.h>
562
+ #include <ATen/ops/exp_meta.h>
563
+ #include <ATen/ops/exp2_meta.h>
564
+ #include <ATen/ops/expand_meta.h>
565
+ #include <ATen/ops/expand_as_meta.h>
566
+ #include <ATen/ops/expand_copy_meta.h>
567
+ #include <ATen/ops/expm1_meta.h>
568
+ #include <ATen/ops/exponential_meta.h>
569
+ #include <ATen/ops/eye_meta.h>
570
+ #include <ATen/ops/fake_quantize_per_channel_affine_meta.h>
571
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_meta.h>
572
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_meta.h>
573
+ #include <ATen/ops/fake_quantize_per_tensor_affine_meta.h>
574
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_meta.h>
575
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_meta.h>
576
+ #include <ATen/ops/fbgemm_linear_fp16_weight_meta.h>
577
+ #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_meta.h>
578
+ #include <ATen/ops/fbgemm_linear_int8_weight_meta.h>
579
+ #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_meta.h>
580
+ #include <ATen/ops/fbgemm_linear_quantize_weight_meta.h>
581
+ #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_meta.h>
582
+ #include <ATen/ops/fbgemm_pack_quantized_matrix_meta.h>
583
+ #include <ATen/ops/feature_alpha_dropout_meta.h>
584
+ #include <ATen/ops/feature_dropout_meta.h>
585
+ #include <ATen/ops/fft_fft_meta.h>
586
+ #include <ATen/ops/fft_fft2_meta.h>
587
+ #include <ATen/ops/fft_fftfreq_meta.h>
588
+ #include <ATen/ops/fft_fftn_meta.h>
589
+ #include <ATen/ops/fft_fftshift_meta.h>
590
+ #include <ATen/ops/fft_hfft_meta.h>
591
+ #include <ATen/ops/fft_hfft2_meta.h>
592
+ #include <ATen/ops/fft_hfftn_meta.h>
593
+ #include <ATen/ops/fft_ifft_meta.h>
594
+ #include <ATen/ops/fft_ifft2_meta.h>
595
+ #include <ATen/ops/fft_ifftn_meta.h>
596
+ #include <ATen/ops/fft_ifftshift_meta.h>
597
+ #include <ATen/ops/fft_ihfft_meta.h>
598
+ #include <ATen/ops/fft_ihfft2_meta.h>
599
+ #include <ATen/ops/fft_ihfftn_meta.h>
600
+ #include <ATen/ops/fft_irfft_meta.h>
601
+ #include <ATen/ops/fft_irfft2_meta.h>
602
+ #include <ATen/ops/fft_irfftn_meta.h>
603
+ #include <ATen/ops/fft_rfft_meta.h>
604
+ #include <ATen/ops/fft_rfft2_meta.h>
605
+ #include <ATen/ops/fft_rfftfreq_meta.h>
606
+ #include <ATen/ops/fft_rfftn_meta.h>
607
+ #include <ATen/ops/fill_meta.h>
608
+ #include <ATen/ops/fill_diagonal_meta.h>
609
+ #include <ATen/ops/fix_meta.h>
610
+ #include <ATen/ops/flatten_meta.h>
611
+ #include <ATen/ops/flatten_dense_tensors_meta.h>
612
+ #include <ATen/ops/flip_meta.h>
613
+ #include <ATen/ops/fliplr_meta.h>
614
+ #include <ATen/ops/flipud_meta.h>
615
+ #include <ATen/ops/float_power_meta.h>
616
+ #include <ATen/ops/floor_meta.h>
617
+ #include <ATen/ops/floor_divide_meta.h>
618
+ #include <ATen/ops/fmax_meta.h>
619
+ #include <ATen/ops/fmin_meta.h>
620
+ #include <ATen/ops/fmod_meta.h>
621
+ #include <ATen/ops/frac_meta.h>
622
+ #include <ATen/ops/fractional_max_pool2d_meta.h>
623
+ #include <ATen/ops/fractional_max_pool2d_backward_meta.h>
624
+ #include <ATen/ops/fractional_max_pool3d_meta.h>
625
+ #include <ATen/ops/fractional_max_pool3d_backward_meta.h>
626
+ #include <ATen/ops/frexp_meta.h>
627
+ #include <ATen/ops/frobenius_norm_meta.h>
628
+ #include <ATen/ops/from_file_meta.h>
629
+ #include <ATen/ops/full_meta.h>
630
+ #include <ATen/ops/full_like_meta.h>
631
+ #include <ATen/ops/fused_moving_avg_obs_fake_quant_meta.h>
632
+ #include <ATen/ops/gather_meta.h>
633
+ #include <ATen/ops/gather_backward_meta.h>
634
+ #include <ATen/ops/gcd_meta.h>
635
+ #include <ATen/ops/ge_meta.h>
636
+ #include <ATen/ops/gelu_meta.h>
637
+ #include <ATen/ops/gelu_backward_meta.h>
638
+ #include <ATen/ops/geometric_meta.h>
639
+ #include <ATen/ops/geqrf_meta.h>
640
+ #include <ATen/ops/ger_meta.h>
641
+ #include <ATen/ops/glu_meta.h>
642
+ #include <ATen/ops/glu_backward_meta.h>
643
+ #include <ATen/ops/glu_backward_jvp_meta.h>
644
+ #include <ATen/ops/glu_jvp_meta.h>
645
+ #include <ATen/ops/gradient_meta.h>
646
+ #include <ATen/ops/greater_meta.h>
647
+ #include <ATen/ops/greater_equal_meta.h>
648
+ #include <ATen/ops/grid_sampler_meta.h>
649
+ #include <ATen/ops/grid_sampler_2d_meta.h>
650
+ #include <ATen/ops/grid_sampler_2d_backward_meta.h>
651
+ #include <ATen/ops/grid_sampler_3d_meta.h>
652
+ #include <ATen/ops/grid_sampler_3d_backward_meta.h>
653
+ #include <ATen/ops/group_norm_meta.h>
654
+ #include <ATen/ops/gru_meta.h>
655
+ #include <ATen/ops/gru_cell_meta.h>
656
+ #include <ATen/ops/gt_meta.h>
657
+ #include <ATen/ops/hamming_window_meta.h>
658
+ #include <ATen/ops/hann_window_meta.h>
659
+ #include <ATen/ops/hardshrink_meta.h>
660
+ #include <ATen/ops/hardshrink_backward_meta.h>
661
+ #include <ATen/ops/hardsigmoid_meta.h>
662
+ #include <ATen/ops/hardsigmoid_backward_meta.h>
663
+ #include <ATen/ops/hardswish_meta.h>
664
+ #include <ATen/ops/hardswish_backward_meta.h>
665
+ #include <ATen/ops/hardtanh_meta.h>
666
+ #include <ATen/ops/hardtanh_backward_meta.h>
667
+ #include <ATen/ops/heaviside_meta.h>
668
+ #include <ATen/ops/hinge_embedding_loss_meta.h>
669
+ #include <ATen/ops/histc_meta.h>
670
+ #include <ATen/ops/histogram_meta.h>
671
+ #include <ATen/ops/histogramdd_meta.h>
672
+ #include <ATen/ops/hsplit_meta.h>
673
+ #include <ATen/ops/hspmm_meta.h>
674
+ #include <ATen/ops/hstack_meta.h>
675
+ #include <ATen/ops/huber_loss_meta.h>
676
+ #include <ATen/ops/huber_loss_backward_meta.h>
677
+ #include <ATen/ops/hypot_meta.h>
678
+ #include <ATen/ops/i0_meta.h>
679
+ #include <ATen/ops/igamma_meta.h>
680
+ #include <ATen/ops/igammac_meta.h>
681
+ #include <ATen/ops/im2col_meta.h>
682
+ #include <ATen/ops/imag_meta.h>
683
+ #include <ATen/ops/index_meta.h>
684
+ #include <ATen/ops/index_add_meta.h>
685
+ #include <ATen/ops/index_copy_meta.h>
686
+ #include <ATen/ops/index_fill_meta.h>
687
+ #include <ATen/ops/index_put_meta.h>
688
+ #include <ATen/ops/index_reduce_meta.h>
689
+ #include <ATen/ops/index_select_meta.h>
690
+ #include <ATen/ops/index_select_backward_meta.h>
691
+ #include <ATen/ops/indices_meta.h>
692
+ #include <ATen/ops/indices_copy_meta.h>
693
+ #include <ATen/ops/infinitely_differentiable_gelu_backward_meta.h>
694
+ #include <ATen/ops/inner_meta.h>
695
+ #include <ATen/ops/instance_norm_meta.h>
696
+ #include <ATen/ops/int_repr_meta.h>
697
+ #include <ATen/ops/inverse_meta.h>
698
+ #include <ATen/ops/is_coalesced_meta.h>
699
+ #include <ATen/ops/is_complex_meta.h>
700
+ #include <ATen/ops/is_conj_meta.h>
701
+ #include <ATen/ops/is_distributed_meta.h>
702
+ #include <ATen/ops/is_floating_point_meta.h>
703
+ #include <ATen/ops/is_inference_meta.h>
704
+ #include <ATen/ops/is_leaf_meta.h>
705
+ #include <ATen/ops/is_neg_meta.h>
706
+ #include <ATen/ops/is_nonzero_meta.h>
707
+ #include <ATen/ops/is_pinned_meta.h>
708
+ #include <ATen/ops/is_same_size_meta.h>
709
+ #include <ATen/ops/is_set_to_meta.h>
710
+ #include <ATen/ops/is_signed_meta.h>
711
+ #include <ATen/ops/is_vulkan_available_meta.h>
712
+ #include <ATen/ops/isclose_meta.h>
713
+ #include <ATen/ops/isfinite_meta.h>
714
+ #include <ATen/ops/isin_meta.h>
715
+ #include <ATen/ops/isinf_meta.h>
716
+ #include <ATen/ops/isnan_meta.h>
717
+ #include <ATen/ops/isneginf_meta.h>
718
+ #include <ATen/ops/isposinf_meta.h>
719
+ #include <ATen/ops/isreal_meta.h>
720
+ #include <ATen/ops/istft_meta.h>
721
+ #include <ATen/ops/item_meta.h>
722
+ #include <ATen/ops/kaiser_window_meta.h>
723
+ #include <ATen/ops/kl_div_meta.h>
724
+ #include <ATen/ops/kron_meta.h>
725
+ #include <ATen/ops/kthvalue_meta.h>
726
+ #include <ATen/ops/l1_loss_meta.h>
727
+ #include <ATen/ops/layer_norm_meta.h>
728
+ #include <ATen/ops/lcm_meta.h>
729
+ #include <ATen/ops/ldexp_meta.h>
730
+ #include <ATen/ops/le_meta.h>
731
+ #include <ATen/ops/leaky_relu_meta.h>
732
+ #include <ATen/ops/leaky_relu_backward_meta.h>
733
+ #include <ATen/ops/lerp_meta.h>
734
+ #include <ATen/ops/less_meta.h>
735
+ #include <ATen/ops/less_equal_meta.h>
736
+ #include <ATen/ops/lgamma_meta.h>
737
+ #include <ATen/ops/lift_meta.h>
738
+ #include <ATen/ops/lift_fresh_meta.h>
739
+ #include <ATen/ops/lift_fresh_copy_meta.h>
740
+ #include <ATen/ops/linalg_cholesky_meta.h>
741
+ #include <ATen/ops/linalg_cholesky_ex_meta.h>
742
+ #include <ATen/ops/linalg_cond_meta.h>
743
+ #include <ATen/ops/linalg_cross_meta.h>
744
+ #include <ATen/ops/linalg_det_meta.h>
745
+ #include <ATen/ops/linalg_diagonal_meta.h>
746
+ #include <ATen/ops/linalg_eig_meta.h>
747
+ #include <ATen/ops/linalg_eigh_meta.h>
748
+ #include <ATen/ops/linalg_eigvals_meta.h>
749
+ #include <ATen/ops/linalg_eigvalsh_meta.h>
750
+ #include <ATen/ops/linalg_householder_product_meta.h>
751
+ #include <ATen/ops/linalg_inv_meta.h>
752
+ #include <ATen/ops/linalg_inv_ex_meta.h>
753
+ #include <ATen/ops/linalg_ldl_factor_meta.h>
754
+ #include <ATen/ops/linalg_ldl_factor_ex_meta.h>
755
+ #include <ATen/ops/linalg_ldl_solve_meta.h>
756
+ #include <ATen/ops/linalg_lstsq_meta.h>
757
+ #include <ATen/ops/linalg_lu_meta.h>
758
+ #include <ATen/ops/linalg_lu_factor_meta.h>
759
+ #include <ATen/ops/linalg_lu_factor_ex_meta.h>
760
+ #include <ATen/ops/linalg_lu_solve_meta.h>
761
+ #include <ATen/ops/linalg_matmul_meta.h>
762
+ #include <ATen/ops/linalg_matrix_exp_meta.h>
763
+ #include <ATen/ops/linalg_matrix_norm_meta.h>
764
+ #include <ATen/ops/linalg_matrix_power_meta.h>
765
+ #include <ATen/ops/linalg_matrix_rank_meta.h>
766
+ #include <ATen/ops/linalg_multi_dot_meta.h>
767
+ #include <ATen/ops/linalg_norm_meta.h>
768
+ #include <ATen/ops/linalg_pinv_meta.h>
769
+ #include <ATen/ops/linalg_qr_meta.h>
770
+ #include <ATen/ops/linalg_slogdet_meta.h>
771
+ #include <ATen/ops/linalg_solve_meta.h>
772
+ #include <ATen/ops/linalg_solve_ex_meta.h>
773
+ #include <ATen/ops/linalg_solve_triangular_meta.h>
774
+ #include <ATen/ops/linalg_svd_meta.h>
775
+ #include <ATen/ops/linalg_svdvals_meta.h>
776
+ #include <ATen/ops/linalg_tensorinv_meta.h>
777
+ #include <ATen/ops/linalg_tensorsolve_meta.h>
778
+ #include <ATen/ops/linalg_vander_meta.h>
779
+ #include <ATen/ops/linalg_vecdot_meta.h>
780
+ #include <ATen/ops/linalg_vector_norm_meta.h>
781
+ #include <ATen/ops/linear_meta.h>
782
+ #include <ATen/ops/linear_backward_meta.h>
783
+ #include <ATen/ops/linspace_meta.h>
784
+ #include <ATen/ops/log_meta.h>
785
+ #include <ATen/ops/log10_meta.h>
786
+ #include <ATen/ops/log1p_meta.h>
787
+ #include <ATen/ops/log2_meta.h>
788
+ #include <ATen/ops/log_normal_meta.h>
789
+ #include <ATen/ops/log_sigmoid_meta.h>
790
+ #include <ATen/ops/log_sigmoid_backward_meta.h>
791
+ #include <ATen/ops/log_sigmoid_forward_meta.h>
792
+ #include <ATen/ops/log_softmax_meta.h>
793
+ #include <ATen/ops/logaddexp_meta.h>
794
+ #include <ATen/ops/logaddexp2_meta.h>
795
+ #include <ATen/ops/logcumsumexp_meta.h>
796
+ #include <ATen/ops/logdet_meta.h>
797
+ #include <ATen/ops/logical_and_meta.h>
798
+ #include <ATen/ops/logical_not_meta.h>
799
+ #include <ATen/ops/logical_or_meta.h>
800
+ #include <ATen/ops/logical_xor_meta.h>
801
+ #include <ATen/ops/logit_meta.h>
802
+ #include <ATen/ops/logit_backward_meta.h>
803
+ #include <ATen/ops/logspace_meta.h>
804
+ #include <ATen/ops/logsumexp_meta.h>
805
+ #include <ATen/ops/lshift_meta.h>
806
+ #include <ATen/ops/lstm_meta.h>
807
+ #include <ATen/ops/lstm_cell_meta.h>
808
+ #include <ATen/ops/lstm_mps_backward_meta.h>
809
+ #include <ATen/ops/lt_meta.h>
810
+ #include <ATen/ops/lu_solve_meta.h>
811
+ #include <ATen/ops/lu_unpack_meta.h>
812
+ #include <ATen/ops/mH_meta.h>
813
+ #include <ATen/ops/mT_meta.h>
814
+ #include <ATen/ops/margin_ranking_loss_meta.h>
815
+ #include <ATen/ops/masked_fill_meta.h>
816
+ #include <ATen/ops/masked_scatter_meta.h>
817
+ #include <ATen/ops/masked_scatter_backward_meta.h>
818
+ #include <ATen/ops/masked_select_meta.h>
819
+ #include <ATen/ops/masked_select_backward_meta.h>
820
+ #include <ATen/ops/matmul_meta.h>
821
+ #include <ATen/ops/matmul_backward_meta.h>
822
+ #include <ATen/ops/matrix_H_meta.h>
823
+ #include <ATen/ops/matrix_exp_meta.h>
824
+ #include <ATen/ops/matrix_exp_backward_meta.h>
825
+ #include <ATen/ops/matrix_power_meta.h>
826
+ #include <ATen/ops/max_meta.h>
827
+ #include <ATen/ops/max_pool1d_meta.h>
828
+ #include <ATen/ops/max_pool1d_with_indices_meta.h>
829
+ #include <ATen/ops/max_pool2d_meta.h>
830
+ #include <ATen/ops/max_pool2d_backward_meta.h>
831
+ #include <ATen/ops/max_pool2d_with_indices_meta.h>
832
+ #include <ATen/ops/max_pool2d_with_indices_backward_meta.h>
833
+ #include <ATen/ops/max_pool3d_meta.h>
834
+ #include <ATen/ops/max_pool3d_with_indices_meta.h>
835
+ #include <ATen/ops/max_pool3d_with_indices_backward_meta.h>
836
+ #include <ATen/ops/max_unpool2d_meta.h>
837
+ #include <ATen/ops/max_unpool3d_meta.h>
838
+ #include <ATen/ops/maximum_meta.h>
839
+ #include <ATen/ops/mean_meta.h>
840
+ #include <ATen/ops/median_meta.h>
841
+ #include <ATen/ops/meshgrid_meta.h>
842
+ #include <ATen/ops/min_meta.h>
843
+ #include <ATen/ops/minimum_meta.h>
844
+ #include <ATen/ops/miopen_batch_norm_meta.h>
845
+ #include <ATen/ops/miopen_batch_norm_backward_meta.h>
846
+ #include <ATen/ops/miopen_convolution_meta.h>
847
+ #include <ATen/ops/miopen_convolution_add_relu_meta.h>
848
+ #include <ATen/ops/miopen_convolution_relu_meta.h>
849
+ #include <ATen/ops/miopen_convolution_transpose_meta.h>
850
+ #include <ATen/ops/miopen_depthwise_convolution_meta.h>
851
+ #include <ATen/ops/miopen_rnn_meta.h>
852
+ #include <ATen/ops/miopen_rnn_backward_meta.h>
853
+ #include <ATen/ops/mish_meta.h>
854
+ #include <ATen/ops/mish_backward_meta.h>
855
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_meta.h>
856
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_meta.h>
857
+ #include <ATen/ops/mkldnn_convolution_meta.h>
858
+ #include <ATen/ops/mkldnn_linear_meta.h>
859
+ #include <ATen/ops/mkldnn_linear_backward_meta.h>
860
+ #include <ATen/ops/mkldnn_linear_backward_input_meta.h>
861
+ #include <ATen/ops/mkldnn_linear_backward_weights_meta.h>
862
+ #include <ATen/ops/mkldnn_max_pool2d_meta.h>
863
+ #include <ATen/ops/mkldnn_max_pool2d_backward_meta.h>
864
+ #include <ATen/ops/mkldnn_max_pool3d_meta.h>
865
+ #include <ATen/ops/mkldnn_max_pool3d_backward_meta.h>
866
+ #include <ATen/ops/mkldnn_reorder_conv2d_weight_meta.h>
867
+ #include <ATen/ops/mkldnn_reorder_conv3d_weight_meta.h>
868
+ #include <ATen/ops/mkldnn_rnn_layer_meta.h>
869
+ #include <ATen/ops/mkldnn_rnn_layer_backward_meta.h>
870
+ #include <ATen/ops/mm_meta.h>
871
+ #include <ATen/ops/mode_meta.h>
872
+ #include <ATen/ops/moveaxis_meta.h>
873
+ #include <ATen/ops/movedim_meta.h>
874
+ #include <ATen/ops/mps_convolution_backward_meta.h>
875
+ #include <ATen/ops/mps_convolution_transpose_backward_meta.h>
876
+ #include <ATen/ops/mse_loss_meta.h>
877
+ #include <ATen/ops/mse_loss_backward_meta.h>
878
+ #include <ATen/ops/msort_meta.h>
879
+ #include <ATen/ops/mul_meta.h>
880
+ #include <ATen/ops/multi_margin_loss_meta.h>
881
+ #include <ATen/ops/multi_margin_loss_backward_meta.h>
882
+ #include <ATen/ops/multilabel_margin_loss_meta.h>
883
+ #include <ATen/ops/multilabel_margin_loss_backward_meta.h>
884
+ #include <ATen/ops/multilabel_margin_loss_forward_meta.h>
885
+ #include <ATen/ops/multinomial_meta.h>
886
+ #include <ATen/ops/multiply_meta.h>
887
+ #include <ATen/ops/mv_meta.h>
888
+ #include <ATen/ops/mvlgamma_meta.h>
889
+ #include <ATen/ops/nan_to_num_meta.h>
890
+ #include <ATen/ops/nanmean_meta.h>
891
+ #include <ATen/ops/nanmedian_meta.h>
892
+ #include <ATen/ops/nanquantile_meta.h>
893
+ #include <ATen/ops/nansum_meta.h>
894
+ #include <ATen/ops/narrow_meta.h>
895
+ #include <ATen/ops/narrow_copy_meta.h>
896
+ #include <ATen/ops/native_batch_norm_meta.h>
897
+ #include <ATen/ops/native_batch_norm_backward_meta.h>
898
+ #include <ATen/ops/native_channel_shuffle_meta.h>
899
+ #include <ATen/ops/native_dropout_meta.h>
900
+ #include <ATen/ops/native_dropout_backward_meta.h>
901
+ #include <ATen/ops/native_group_norm_meta.h>
902
+ #include <ATen/ops/native_group_norm_backward_meta.h>
903
+ #include <ATen/ops/native_layer_norm_meta.h>
904
+ #include <ATen/ops/native_layer_norm_backward_meta.h>
905
+ #include <ATen/ops/native_norm_meta.h>
906
+ #include <ATen/ops/ne_meta.h>
907
+ #include <ATen/ops/neg_meta.h>
908
+ #include <ATen/ops/negative_meta.h>
909
+ #include <ATen/ops/nested_to_padded_tensor_meta.h>
910
+ #include <ATen/ops/new_empty_meta.h>
911
+ #include <ATen/ops/new_empty_strided_meta.h>
912
+ #include <ATen/ops/new_full_meta.h>
913
+ #include <ATen/ops/new_ones_meta.h>
914
+ #include <ATen/ops/new_zeros_meta.h>
915
+ #include <ATen/ops/nextafter_meta.h>
916
+ #include <ATen/ops/nll_loss_meta.h>
917
+ #include <ATen/ops/nll_loss2d_meta.h>
918
+ #include <ATen/ops/nll_loss2d_backward_meta.h>
919
+ #include <ATen/ops/nll_loss2d_forward_meta.h>
920
+ #include <ATen/ops/nll_loss_backward_meta.h>
921
+ #include <ATen/ops/nll_loss_forward_meta.h>
922
+ #include <ATen/ops/nll_loss_nd_meta.h>
923
+ #include <ATen/ops/nonzero_meta.h>
924
+ #include <ATen/ops/nonzero_numpy_meta.h>
925
+ #include <ATen/ops/nonzero_static_meta.h>
926
+ #include <ATen/ops/norm_meta.h>
927
+ #include <ATen/ops/norm_except_dim_meta.h>
928
+ #include <ATen/ops/normal_meta.h>
929
+ #include <ATen/ops/not_equal_meta.h>
930
+ #include <ATen/ops/nuclear_norm_meta.h>
931
+ #include <ATen/ops/numpy_T_meta.h>
932
+ #include <ATen/ops/one_hot_meta.h>
933
+ #include <ATen/ops/ones_meta.h>
934
+ #include <ATen/ops/ones_like_meta.h>
935
+ #include <ATen/ops/or_meta.h>
936
+ #include <ATen/ops/orgqr_meta.h>
937
+ #include <ATen/ops/ormqr_meta.h>
938
+ #include <ATen/ops/outer_meta.h>
939
+ #include <ATen/ops/output_nr_meta.h>
940
+ #include <ATen/ops/pad_meta.h>
941
+ #include <ATen/ops/pad_sequence_meta.h>
942
+ #include <ATen/ops/pairwise_distance_meta.h>
943
+ #include <ATen/ops/pdist_meta.h>
944
+ #include <ATen/ops/permute_meta.h>
945
+ #include <ATen/ops/permute_copy_meta.h>
946
+ #include <ATen/ops/pin_memory_meta.h>
947
+ #include <ATen/ops/pinverse_meta.h>
948
+ #include <ATen/ops/pixel_shuffle_meta.h>
949
+ #include <ATen/ops/pixel_unshuffle_meta.h>
950
+ #include <ATen/ops/poisson_meta.h>
951
+ #include <ATen/ops/poisson_nll_loss_meta.h>
952
+ #include <ATen/ops/polar_meta.h>
953
+ #include <ATen/ops/polygamma_meta.h>
954
+ #include <ATen/ops/positive_meta.h>
955
+ #include <ATen/ops/pow_meta.h>
956
+ #include <ATen/ops/prelu_meta.h>
957
+ #include <ATen/ops/prod_meta.h>
958
+ #include <ATen/ops/promote_types_meta.h>
959
+ #include <ATen/ops/put_meta.h>
960
+ #include <ATen/ops/q_per_channel_axis_meta.h>
961
+ #include <ATen/ops/q_per_channel_scales_meta.h>
962
+ #include <ATen/ops/q_per_channel_zero_points_meta.h>
963
+ #include <ATen/ops/q_scale_meta.h>
964
+ #include <ATen/ops/q_zero_point_meta.h>
965
+ #include <ATen/ops/qr_meta.h>
966
+ #include <ATen/ops/qscheme_meta.h>
967
+ #include <ATen/ops/quantile_meta.h>
968
+ #include <ATen/ops/quantize_per_channel_meta.h>
969
+ #include <ATen/ops/quantize_per_tensor_meta.h>
970
+ #include <ATen/ops/quantize_per_tensor_dynamic_meta.h>
971
+ #include <ATen/ops/quantized_batch_norm_meta.h>
972
+ #include <ATen/ops/quantized_gru_cell_meta.h>
973
+ #include <ATen/ops/quantized_lstm_cell_meta.h>
974
+ #include <ATen/ops/quantized_max_pool1d_meta.h>
975
+ #include <ATen/ops/quantized_max_pool2d_meta.h>
976
+ #include <ATen/ops/quantized_max_pool3d_meta.h>
977
+ #include <ATen/ops/quantized_rnn_relu_cell_meta.h>
978
+ #include <ATen/ops/quantized_rnn_tanh_cell_meta.h>
979
+ #include <ATen/ops/rad2deg_meta.h>
980
+ #include <ATen/ops/rand_meta.h>
981
+ #include <ATen/ops/rand_like_meta.h>
982
+ #include <ATen/ops/randint_meta.h>
983
+ #include <ATen/ops/randint_like_meta.h>
984
+ #include <ATen/ops/randn_meta.h>
985
+ #include <ATen/ops/randn_like_meta.h>
986
+ #include <ATen/ops/random_meta.h>
987
+ #include <ATen/ops/randperm_meta.h>
988
+ #include <ATen/ops/range_meta.h>
989
+ #include <ATen/ops/ravel_meta.h>
990
+ #include <ATen/ops/real_meta.h>
991
+ #include <ATen/ops/reciprocal_meta.h>
992
+ #include <ATen/ops/record_stream_meta.h>
993
+ #include <ATen/ops/refine_names_meta.h>
994
+ #include <ATen/ops/reflection_pad1d_meta.h>
995
+ #include <ATen/ops/reflection_pad1d_backward_meta.h>
996
+ #include <ATen/ops/reflection_pad2d_meta.h>
997
+ #include <ATen/ops/reflection_pad2d_backward_meta.h>
998
+ #include <ATen/ops/reflection_pad3d_meta.h>
999
+ #include <ATen/ops/reflection_pad3d_backward_meta.h>
1000
+ #include <ATen/ops/relu_meta.h>
1001
+ #include <ATen/ops/relu6_meta.h>
1002
+ #include <ATen/ops/remainder_meta.h>
1003
+ #include <ATen/ops/rename_meta.h>
1004
+ #include <ATen/ops/renorm_meta.h>
1005
+ #include <ATen/ops/repeat_meta.h>
1006
+ #include <ATen/ops/repeat_interleave_meta.h>
1007
+ #include <ATen/ops/replication_pad1d_meta.h>
1008
+ #include <ATen/ops/replication_pad1d_backward_meta.h>
1009
+ #include <ATen/ops/replication_pad2d_meta.h>
1010
+ #include <ATen/ops/replication_pad2d_backward_meta.h>
1011
+ #include <ATen/ops/replication_pad3d_meta.h>
1012
+ #include <ATen/ops/replication_pad3d_backward_meta.h>
1013
+ #include <ATen/ops/requires_grad_meta.h>
1014
+ #include <ATen/ops/reshape_meta.h>
1015
+ #include <ATen/ops/reshape_as_meta.h>
1016
+ #include <ATen/ops/resize_meta.h>
1017
+ #include <ATen/ops/resize_as_meta.h>
1018
+ #include <ATen/ops/resize_as_sparse_meta.h>
1019
+ #include <ATen/ops/resolve_conj_meta.h>
1020
+ #include <ATen/ops/resolve_neg_meta.h>
1021
+ #include <ATen/ops/result_type_meta.h>
1022
+ #include <ATen/ops/retain_grad_meta.h>
1023
+ #include <ATen/ops/retains_grad_meta.h>
1024
+ #include <ATen/ops/rnn_relu_meta.h>
1025
+ #include <ATen/ops/rnn_relu_cell_meta.h>
1026
+ #include <ATen/ops/rnn_tanh_meta.h>
1027
+ #include <ATen/ops/rnn_tanh_cell_meta.h>
1028
+ #include <ATen/ops/roll_meta.h>
1029
+ #include <ATen/ops/rot90_meta.h>
1030
+ #include <ATen/ops/round_meta.h>
1031
+ #include <ATen/ops/row_indices_meta.h>
1032
+ #include <ATen/ops/row_indices_copy_meta.h>
1033
+ #include <ATen/ops/row_stack_meta.h>
1034
+ #include <ATen/ops/rrelu_meta.h>
1035
+ #include <ATen/ops/rrelu_with_noise_meta.h>
1036
+ #include <ATen/ops/rrelu_with_noise_backward_meta.h>
1037
+ #include <ATen/ops/rshift_meta.h>
1038
+ #include <ATen/ops/rsqrt_meta.h>
1039
+ #include <ATen/ops/rsub_meta.h>
1040
+ #include <ATen/ops/scalar_tensor_meta.h>
1041
+ #include <ATen/ops/scaled_dot_product_attention_meta.h>
1042
+ #include <ATen/ops/scatter_meta.h>
1043
+ #include <ATen/ops/scatter_add_meta.h>
1044
+ #include <ATen/ops/scatter_reduce_meta.h>
1045
+ #include <ATen/ops/searchsorted_meta.h>
1046
+ #include <ATen/ops/segment_reduce_meta.h>
1047
+ #include <ATen/ops/select_meta.h>
1048
+ #include <ATen/ops/select_backward_meta.h>
1049
+ #include <ATen/ops/select_copy_meta.h>
1050
+ #include <ATen/ops/select_scatter_meta.h>
1051
+ #include <ATen/ops/selu_meta.h>
1052
+ #include <ATen/ops/set_meta.h>
1053
+ #include <ATen/ops/set_data_meta.h>
1054
+ #include <ATen/ops/sgn_meta.h>
1055
+ #include <ATen/ops/sigmoid_meta.h>
1056
+ #include <ATen/ops/sigmoid_backward_meta.h>
1057
+ #include <ATen/ops/sign_meta.h>
1058
+ #include <ATen/ops/signbit_meta.h>
1059
+ #include <ATen/ops/silu_meta.h>
1060
+ #include <ATen/ops/silu_backward_meta.h>
1061
+ #include <ATen/ops/sin_meta.h>
1062
+ #include <ATen/ops/sinc_meta.h>
1063
+ #include <ATen/ops/sinh_meta.h>
1064
+ #include <ATen/ops/size_meta.h>
1065
+ #include <ATen/ops/slice_meta.h>
1066
+ #include <ATen/ops/slice_backward_meta.h>
1067
+ #include <ATen/ops/slice_copy_meta.h>
1068
+ #include <ATen/ops/slice_scatter_meta.h>
1069
+ #include <ATen/ops/slogdet_meta.h>
1070
+ #include <ATen/ops/slow_conv3d_meta.h>
1071
+ #include <ATen/ops/slow_conv3d_forward_meta.h>
1072
+ #include <ATen/ops/slow_conv_dilated2d_meta.h>
1073
+ #include <ATen/ops/slow_conv_dilated3d_meta.h>
1074
+ #include <ATen/ops/slow_conv_transpose2d_meta.h>
1075
+ #include <ATen/ops/slow_conv_transpose3d_meta.h>
1076
+ #include <ATen/ops/smm_meta.h>
1077
+ #include <ATen/ops/smooth_l1_loss_meta.h>
1078
+ #include <ATen/ops/smooth_l1_loss_backward_meta.h>
1079
+ #include <ATen/ops/soft_margin_loss_meta.h>
1080
+ #include <ATen/ops/soft_margin_loss_backward_meta.h>
1081
+ #include <ATen/ops/softmax_meta.h>
1082
+ #include <ATen/ops/softplus_meta.h>
1083
+ #include <ATen/ops/softplus_backward_meta.h>
1084
+ #include <ATen/ops/softshrink_meta.h>
1085
+ #include <ATen/ops/softshrink_backward_meta.h>
1086
+ #include <ATen/ops/sort_meta.h>
1087
+ #include <ATen/ops/sparse_bsc_tensor_meta.h>
1088
+ #include <ATen/ops/sparse_bsr_tensor_meta.h>
1089
+ #include <ATen/ops/sparse_compressed_tensor_meta.h>
1090
+ #include <ATen/ops/sparse_coo_tensor_meta.h>
1091
+ #include <ATen/ops/sparse_csc_tensor_meta.h>
1092
+ #include <ATen/ops/sparse_csr_tensor_meta.h>
1093
+ #include <ATen/ops/sparse_dim_meta.h>
1094
+ #include <ATen/ops/sparse_mask_meta.h>
1095
+ #include <ATen/ops/sparse_resize_meta.h>
1096
+ #include <ATen/ops/sparse_resize_and_clear_meta.h>
1097
+ #include <ATen/ops/sparse_sampled_addmm_meta.h>
1098
+ #include <ATen/ops/special_airy_ai_meta.h>
1099
+ #include <ATen/ops/special_bessel_j0_meta.h>
1100
+ #include <ATen/ops/special_bessel_j1_meta.h>
1101
+ #include <ATen/ops/special_bessel_y0_meta.h>
1102
+ #include <ATen/ops/special_bessel_y1_meta.h>
1103
+ #include <ATen/ops/special_chebyshev_polynomial_t_meta.h>
1104
+ #include <ATen/ops/special_chebyshev_polynomial_u_meta.h>
1105
+ #include <ATen/ops/special_chebyshev_polynomial_v_meta.h>
1106
+ #include <ATen/ops/special_chebyshev_polynomial_w_meta.h>
1107
+ #include <ATen/ops/special_digamma_meta.h>
1108
+ #include <ATen/ops/special_entr_meta.h>
1109
+ #include <ATen/ops/special_erf_meta.h>
1110
+ #include <ATen/ops/special_erfc_meta.h>
1111
+ #include <ATen/ops/special_erfcx_meta.h>
1112
+ #include <ATen/ops/special_erfinv_meta.h>
1113
+ #include <ATen/ops/special_exp2_meta.h>
1114
+ #include <ATen/ops/special_expit_meta.h>
1115
+ #include <ATen/ops/special_expm1_meta.h>
1116
+ #include <ATen/ops/special_gammainc_meta.h>
1117
+ #include <ATen/ops/special_gammaincc_meta.h>
1118
+ #include <ATen/ops/special_gammaln_meta.h>
1119
+ #include <ATen/ops/special_hermite_polynomial_h_meta.h>
1120
+ #include <ATen/ops/special_hermite_polynomial_he_meta.h>
1121
+ #include <ATen/ops/special_i0_meta.h>
1122
+ #include <ATen/ops/special_i0e_meta.h>
1123
+ #include <ATen/ops/special_i1_meta.h>
1124
+ #include <ATen/ops/special_i1e_meta.h>
1125
+ #include <ATen/ops/special_laguerre_polynomial_l_meta.h>
1126
+ #include <ATen/ops/special_legendre_polynomial_p_meta.h>
1127
+ #include <ATen/ops/special_log1p_meta.h>
1128
+ #include <ATen/ops/special_log_ndtr_meta.h>
1129
+ #include <ATen/ops/special_log_softmax_meta.h>
1130
+ #include <ATen/ops/special_logit_meta.h>
1131
+ #include <ATen/ops/special_logsumexp_meta.h>
1132
+ #include <ATen/ops/special_modified_bessel_i0_meta.h>
1133
+ #include <ATen/ops/special_modified_bessel_i1_meta.h>
1134
+ #include <ATen/ops/special_modified_bessel_k0_meta.h>
1135
+ #include <ATen/ops/special_modified_bessel_k1_meta.h>
1136
+ #include <ATen/ops/special_multigammaln_meta.h>
1137
+ #include <ATen/ops/special_ndtr_meta.h>
1138
+ #include <ATen/ops/special_ndtri_meta.h>
1139
+ #include <ATen/ops/special_polygamma_meta.h>
1140
+ #include <ATen/ops/special_psi_meta.h>
1141
+ #include <ATen/ops/special_round_meta.h>
1142
+ #include <ATen/ops/special_scaled_modified_bessel_k0_meta.h>
1143
+ #include <ATen/ops/special_scaled_modified_bessel_k1_meta.h>
1144
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_meta.h>
1145
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_meta.h>
1146
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_meta.h>
1147
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_meta.h>
1148
+ #include <ATen/ops/special_sinc_meta.h>
1149
+ #include <ATen/ops/special_softmax_meta.h>
1150
+ #include <ATen/ops/special_spherical_bessel_j0_meta.h>
1151
+ #include <ATen/ops/special_xlog1py_meta.h>
1152
+ #include <ATen/ops/special_xlogy_meta.h>
1153
+ #include <ATen/ops/special_zeta_meta.h>
1154
+ #include <ATen/ops/split_meta.h>
1155
+ #include <ATen/ops/split_copy_meta.h>
1156
+ #include <ATen/ops/split_with_sizes_meta.h>
1157
+ #include <ATen/ops/split_with_sizes_copy_meta.h>
1158
+ #include <ATen/ops/sqrt_meta.h>
1159
+ #include <ATen/ops/square_meta.h>
1160
+ #include <ATen/ops/squeeze_meta.h>
1161
+ #include <ATen/ops/squeeze_copy_meta.h>
1162
+ #include <ATen/ops/sspaddmm_meta.h>
1163
+ #include <ATen/ops/stack_meta.h>
1164
+ #include <ATen/ops/std_meta.h>
1165
+ #include <ATen/ops/std_mean_meta.h>
1166
+ #include <ATen/ops/stft_meta.h>
1167
+ #include <ATen/ops/stride_meta.h>
1168
+ #include <ATen/ops/sub_meta.h>
1169
+ #include <ATen/ops/subtract_meta.h>
1170
+ #include <ATen/ops/sum_meta.h>
1171
+ #include <ATen/ops/sum_to_size_meta.h>
1172
+ #include <ATen/ops/svd_meta.h>
1173
+ #include <ATen/ops/swapaxes_meta.h>
1174
+ #include <ATen/ops/swapdims_meta.h>
1175
+ #include <ATen/ops/sym_constrain_range_meta.h>
1176
+ #include <ATen/ops/sym_constrain_range_for_size_meta.h>
1177
+ #include <ATen/ops/sym_numel_meta.h>
1178
+ #include <ATen/ops/sym_size_meta.h>
1179
+ #include <ATen/ops/sym_storage_offset_meta.h>
1180
+ #include <ATen/ops/sym_stride_meta.h>
1181
+ #include <ATen/ops/t_meta.h>
1182
+ #include <ATen/ops/t_copy_meta.h>
1183
+ #include <ATen/ops/take_meta.h>
1184
+ #include <ATen/ops/take_along_dim_meta.h>
1185
+ #include <ATen/ops/tan_meta.h>
1186
+ #include <ATen/ops/tanh_meta.h>
1187
+ #include <ATen/ops/tanh_backward_meta.h>
1188
+ #include <ATen/ops/tensor_split_meta.h>
1189
+ #include <ATen/ops/tensordot_meta.h>
1190
+ #include <ATen/ops/thnn_conv2d_meta.h>
1191
+ #include <ATen/ops/threshold_meta.h>
1192
+ #include <ATen/ops/threshold_backward_meta.h>
1193
+ #include <ATen/ops/tile_meta.h>
1194
+ #include <ATen/ops/to_meta.h>
1195
+ #include <ATen/ops/to_dense_meta.h>
1196
+ #include <ATen/ops/to_dense_backward_meta.h>
1197
+ #include <ATen/ops/to_mkldnn_meta.h>
1198
+ #include <ATen/ops/to_mkldnn_backward_meta.h>
1199
+ #include <ATen/ops/to_padded_tensor_meta.h>
1200
+ #include <ATen/ops/to_sparse_meta.h>
1201
+ #include <ATen/ops/to_sparse_bsc_meta.h>
1202
+ #include <ATen/ops/to_sparse_bsr_meta.h>
1203
+ #include <ATen/ops/to_sparse_csc_meta.h>
1204
+ #include <ATen/ops/to_sparse_csr_meta.h>
1205
+ #include <ATen/ops/topk_meta.h>
1206
+ #include <ATen/ops/trace_meta.h>
1207
+ #include <ATen/ops/trace_backward_meta.h>
1208
+ #include <ATen/ops/transpose_meta.h>
1209
+ #include <ATen/ops/transpose_copy_meta.h>
1210
+ #include <ATen/ops/trapezoid_meta.h>
1211
+ #include <ATen/ops/trapz_meta.h>
1212
+ #include <ATen/ops/triangular_solve_meta.h>
1213
+ #include <ATen/ops/tril_meta.h>
1214
+ #include <ATen/ops/tril_indices_meta.h>
1215
+ #include <ATen/ops/triplet_margin_loss_meta.h>
1216
+ #include <ATen/ops/triu_meta.h>
1217
+ #include <ATen/ops/triu_indices_meta.h>
1218
+ #include <ATen/ops/true_divide_meta.h>
1219
+ #include <ATen/ops/trunc_meta.h>
1220
+ #include <ATen/ops/type_as_meta.h>
1221
+ #include <ATen/ops/unbind_meta.h>
1222
+ #include <ATen/ops/unbind_copy_meta.h>
1223
+ #include <ATen/ops/unflatten_meta.h>
1224
+ #include <ATen/ops/unflatten_dense_tensors_meta.h>
1225
+ #include <ATen/ops/unfold_meta.h>
1226
+ #include <ATen/ops/unfold_backward_meta.h>
1227
+ #include <ATen/ops/unfold_copy_meta.h>
1228
+ #include <ATen/ops/uniform_meta.h>
1229
+ #include <ATen/ops/unique_consecutive_meta.h>
1230
+ #include <ATen/ops/unique_dim_meta.h>
1231
+ #include <ATen/ops/unique_dim_consecutive_meta.h>
1232
+ #include <ATen/ops/unsafe_chunk_meta.h>
1233
+ #include <ATen/ops/unsafe_split_meta.h>
1234
+ #include <ATen/ops/unsafe_split_with_sizes_meta.h>
1235
+ #include <ATen/ops/unsqueeze_meta.h>
1236
+ #include <ATen/ops/unsqueeze_copy_meta.h>
1237
+ #include <ATen/ops/upsample_bicubic2d_meta.h>
1238
+ #include <ATen/ops/upsample_bicubic2d_backward_meta.h>
1239
+ #include <ATen/ops/upsample_bilinear2d_meta.h>
1240
+ #include <ATen/ops/upsample_bilinear2d_backward_meta.h>
1241
+ #include <ATen/ops/upsample_linear1d_meta.h>
1242
+ #include <ATen/ops/upsample_linear1d_backward_meta.h>
1243
+ #include <ATen/ops/upsample_nearest1d_meta.h>
1244
+ #include <ATen/ops/upsample_nearest1d_backward_meta.h>
1245
+ #include <ATen/ops/upsample_nearest2d_meta.h>
1246
+ #include <ATen/ops/upsample_nearest2d_backward_meta.h>
1247
+ #include <ATen/ops/upsample_nearest3d_meta.h>
1248
+ #include <ATen/ops/upsample_nearest3d_backward_meta.h>
1249
+ #include <ATen/ops/upsample_trilinear3d_meta.h>
1250
+ #include <ATen/ops/upsample_trilinear3d_backward_meta.h>
1251
+ #include <ATen/ops/value_selecting_reduction_backward_meta.h>
1252
+ #include <ATen/ops/values_meta.h>
1253
+ #include <ATen/ops/values_copy_meta.h>
1254
+ #include <ATen/ops/vander_meta.h>
1255
+ #include <ATen/ops/var_meta.h>
1256
+ #include <ATen/ops/var_mean_meta.h>
1257
+ #include <ATen/ops/vdot_meta.h>
1258
+ #include <ATen/ops/view_meta.h>
1259
+ #include <ATen/ops/view_as_meta.h>
1260
+ #include <ATen/ops/view_as_complex_meta.h>
1261
+ #include <ATen/ops/view_as_complex_copy_meta.h>
1262
+ #include <ATen/ops/view_as_real_meta.h>
1263
+ #include <ATen/ops/view_as_real_copy_meta.h>
1264
+ #include <ATen/ops/view_copy_meta.h>
1265
+ #include <ATen/ops/vsplit_meta.h>
1266
+ #include <ATen/ops/vstack_meta.h>
1267
+ #include <ATen/ops/where_meta.h>
1268
+ #include <ATen/ops/xlogy_meta.h>
1269
+ #include <ATen/ops/xor_meta.h>
1270
+ #include <ATen/ops/zero_meta.h>
1271
+ #include <ATen/ops/zeros_meta.h>
1272
+ #include <ATen/ops/zeros_like_meta.h>
1273
+
1274
+ namespace at {
1275
+
1276
+ namespace meta {
1277
+
1278
+
1279
+
1280
+ } // namespace meta
1281
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/MemoryOverlap.h>
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/core/MemoryFormat.h>
7
+ #include <c10/core/TensorImpl.h>
8
+ #include <c10/util/ArrayRef.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/util/Metaprogramming.h>
11
+ #include <c10/util/irange.h>
12
+
13
+ namespace at::native {
14
+ struct NestedTensorImpl;
15
+ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt);
16
+ int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor);
17
+
18
+ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
19
+ explicit NestedTensorImpl(
20
+ Storage storage,
21
+ c10::DispatchKeySet key_set,
22
+ const caffe2::TypeMeta data_type,
23
+ at::Tensor nested_sizes,
24
+ at::Tensor nested_strides,
25
+ at::Tensor storage_offsets);
26
+
27
+ explicit NestedTensorImpl(
28
+ at::Tensor buffer,
29
+ at::Tensor nested_sizes,
30
+ at::Tensor nested_strides,
31
+ at::Tensor storage_offsets);
32
+ // assume contiguous, `nested_strides` and `offsets`
33
+ // can be infered from `nested_sizes`
34
+ explicit NestedTensorImpl(at::Tensor buffer, at::Tensor nested_sizes);
35
+
36
+ // This constructor is used creating view tensors from nested tensors
37
+ explicit NestedTensorImpl(
38
+ c10::TensorImpl::ImplType impl_type,
39
+ const at::Tensor& base_tensor,
40
+ at::Tensor nested_sizes,
41
+ at::Tensor nested_strides,
42
+ at::Tensor storage_offsets);
43
+
44
+ // TODO: don't expose private implementation details like this; in
45
+ // particular, resizing this tensor will mess up our dim() and
46
+ // callers cannot fix it.
47
+ const Tensor& get_nested_sizes() const {
48
+ return nested_sizes_;
49
+ }
50
+ // TODO: don't expose private implementation details like this
51
+ const Tensor& get_nested_strides() const {
52
+ return nested_strides_;
53
+ }
54
+ const Tensor& get_storage_offsets() const {
55
+ return storage_offsets_;
56
+ }
57
+ // Returns nullopt if the ith dimension is irregular. The ith dimension
58
+ // of a NestedTensor is regular if the unbound tensors match in
59
+ // size at the (i-1)th dimension.
60
+ c10::optional<int64_t> opt_size(int64_t d) const;
61
+
62
+ int64_t size(int64_t d) const {
63
+ c10::optional<int64_t> optional_size = this->opt_size(d);
64
+ TORCH_CHECK(
65
+ optional_size.has_value(),
66
+ "Given dimension ",
67
+ d,
68
+ " is irregular and does not have a size.");
69
+ return *optional_size;
70
+ }
71
+ /**
72
+ * Return a view of the nested tensor as a 1 dimensional contiguous tensor.
73
+ *
74
+ * The buffer tensor created by this function shares the same storage_impl as
75
+ * the original nested tensor, and therefore can be seen as a view.
76
+ *
77
+ * @return A newly constructed view tensor
78
+ */
79
+ at::Tensor get_buffer() const {
80
+ TORCH_CHECK(
81
+ nested_tensor_impl_is_contiguous(this),
82
+ "NestedTensor must be contiguous to get buffer.");
83
+ return get_unsafe_storage_as_tensor();
84
+ }
85
+ /**
86
+ * If possible use get_buffer() instead. This function returns the storage
87
+ * as a tensor directly, which is not safe to use in general. If using this
88
+ * function, The caller must ensure to account for nested_sizes,
89
+ * nested_strides and storage_offsets.
90
+ *
91
+ * @return A newly constructed view tensor
92
+ */
93
+ at::Tensor get_unsafe_storage_as_tensor() const {
94
+ auto buffer_key_set_ = generate_buffer_key_set();
95
+ const auto buffer_size = get_buffer_size();
96
+ auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
97
+ c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
98
+ buffer_tensor_impl->set_sizes_contiguous(
99
+ c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
100
+ return Tensor(buffer_tensor_impl);
101
+ }
102
+
103
+ size_t get_buffer_size() const {
104
+ return storage_.nbytes() / data_type_.itemsize();
105
+ }
106
+
107
+ protected:
108
+ const char* tensorimpl_type_name() const override;
109
+
110
+ // TODO: numel_custom and is_contiguous_custom can be profitably overridden
111
+ // with real implementations
112
+ int64_t numel_custom() const override;
113
+ c10::SymInt sym_numel_custom() const override;
114
+ bool is_contiguous_custom(MemoryFormat) const override;
115
+ int64_t size_custom(int64_t d) const override {
116
+ return this->size(d);
117
+ }
118
+ c10::SymInt sym_size_custom(int64_t d) const override {
119
+ return c10::SymInt{this->size(d)};
120
+ }
121
+ IntArrayRef sizes_custom() const override;
122
+ c10::SymIntArrayRef sym_sizes_custom() const override;
123
+ IntArrayRef strides_custom() const override;
124
+ c10::SymIntArrayRef sym_strides_custom() const override;
125
+
126
+ // this one is real
127
+ int64_t dim_custom() const override;
128
+
129
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
130
+ const c10::VariableVersion& version_counter,
131
+ bool allow_tensor_metadata_change) const override;
132
+
133
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
134
+ c10::VariableVersion&& version_counter,
135
+ bool allow_tensor_metadata_change) const override;
136
+
137
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
138
+ copy_tensor_metadata(
139
+ /*src_impl=*/impl.get(),
140
+ /*dest_impl=*/this,
141
+ /*version_counter=*/version_counter(),
142
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
143
+ }
144
+
145
+ private:
146
+ // Must be called after any changes to our dim() to sync the state
147
+ // to TensorImpl.
148
+ void refresh_dim();
149
+
150
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
151
+ const at::Tensor nested_sizes_, nested_strides_;
152
+ // The starting positions of the underlying tensors in contiguous buffer
153
+ // i.e. the buffer memory offsets to get the underlying tensors
154
+ // The reason to keep this metadata is that, without strong enough constraint
155
+ // it cannot be derived from `nested_sizes_`
156
+ // and `nested_strides_`:
157
+ // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2]
158
+ // this can happen e.g. after slicing a nested tensor
159
+ // 2. when multiple tensors share a same memory
160
+ // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2]
161
+ // Some strong enough constraints are:
162
+ // 1. every underlying tensor is contiguous in memory
163
+ // && nesting in ascending order
164
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
165
+ const at::Tensor storage_offsets_;
166
+ // NOTE: -1 here means the size is missing
167
+ // Optional to allow it to be computed lazily from nested.
168
+ // TODO: maybe we can remove this metadata since
169
+ // we can compute it from `nested_sizes_`
170
+ mutable c10::optional<std::vector<int64_t>> opt_sizes_;
171
+
172
+ template <typename VariableVersion>
173
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
174
+ VariableVersion&& version_counter,
175
+ bool allow_tensor_metadata_change) const;
176
+
177
+ /**
178
+ * Generates a non-nested key_set from a nested tensor.
179
+ *
180
+ * For many nested tensor kernel implementations a buffer tensor
181
+ * is generated and redispatched to a non-nested kernel this function
182
+ * generates the key set used by that buffer tensor
183
+ *
184
+ * @return Appropriate key set for non-nested tensor
185
+ */
186
+ inline c10::DispatchKeySet generate_buffer_key_set() const {
187
+ auto buffer_key_set = this->key_set();
188
+ const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset);
189
+ // Remove nested tensor specific keys
190
+ buffer_key_set = buffer_key_set -
191
+ c10::DispatchKeySet{
192
+ c10::DispatchKey::NestedTensor,
193
+ c10::DispatchKey::AutogradNestedTensor};
194
+
195
+ // Add dense tensor specific keys
196
+ buffer_key_set =
197
+ buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense};
198
+ buffer_key_set = Autograd
199
+ ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set
200
+ : buffer_key_set;
201
+
202
+ return buffer_key_set;
203
+ }
204
+ };
205
+
206
+ inline NestedTensorImpl* get_nested_tensor_impl_or_null(
207
+ const at::Tensor& tensor) {
208
+ if (tensor.is_nested()) {
209
+ return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
210
+ }
211
+ return nullptr;
212
+ }
213
+
214
+ inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) {
215
+ TORCH_CHECK(
216
+ tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor.");
217
+ return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
218
+ }
219
+
220
+ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
221
+ int64_t ntensors = nt->size(0);
222
+ if (ntensors == 0) {
223
+ return true;
224
+ }
225
+ const Tensor &sizemat = nt->get_nested_sizes(),
226
+ &stridemat = nt->get_nested_strides();
227
+ int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr<int64_t>();
228
+ int64_t orig_dim = sizemat.size(1);
229
+ // nesting scalars
230
+ if (orig_dim == 0) {
231
+ // each scalar must be contiguous
232
+ // if there is blank memory between underlying scalars
233
+ for (int64_t i = 0; i < ntensors; i++) {
234
+ if (offsets_ptr[i] != i) {
235
+ return false;
236
+ }
237
+ }
238
+ }
239
+ // nesting tensors
240
+ else {
241
+ // if any underlying tensor is non-contiguous
242
+ const int64_t *sizemat_ptr = sizemat.data_ptr<int64_t>(),
243
+ *stridemat_ptr = stridemat.data_ptr<int64_t>();
244
+ for (int64_t i = 0; i < ntensors; i++) {
245
+ if (stridemat_ptr[orig_dim - 1] != 1) {
246
+ return false;
247
+ }
248
+ int64_t product = sizemat_ptr[orig_dim - 1];
249
+ for (int64_t j = orig_dim - 2; j >= 0; j--) {
250
+ if (stridemat_ptr[j] != product) {
251
+ return false;
252
+ }
253
+ product *= sizemat_ptr[j];
254
+ }
255
+ sizemat_ptr += orig_dim;
256
+ stridemat_ptr += orig_dim;
257
+ }
258
+ // if there is blank memory between underlying tensors
259
+ if (offsets_ptr[0] != 0) {
260
+ return false;
261
+ }
262
+ sizemat_ptr = sizemat.data_ptr<int64_t>();
263
+ stridemat_ptr = stridemat.data_ptr<int64_t>();
264
+ for (int64_t i = 1; i < ntensors; i++) {
265
+ if (offsets_ptr[i] !=
266
+ offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) {
267
+ return false;
268
+ }
269
+ sizemat_ptr += orig_dim;
270
+ stridemat_ptr += orig_dim;
271
+ }
272
+ }
273
+ // everything is fine
274
+ return true;
275
+ }
276
+
277
+ inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) {
278
+ return get_nested_tensor_impl(tensor)->get_nested_sizes();
279
+ }
280
+
281
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/BFloat16.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e5m2.h>
8
+ #include <c10/util/Half.h>
9
+
10
+ namespace at {
11
+
12
+ // For FP16 or BFloat16 inputs, ops should perform internal math in FP32.
13
+ template <typename scalar_t>
14
+ struct OpMathType {
15
+ using type = scalar_t;
16
+ };
17
+ template <>
18
+ struct OpMathType<at::Half> {
19
+ using type = float;
20
+ };
21
+ template <>
22
+ struct OpMathType<at::BFloat16> {
23
+ using type = float;
24
+ };
25
+ template <>
26
+ struct OpMathType<at::Float8_e5m2> {
27
+ using type = float;
28
+ };
29
+ template <>
30
+ struct OpMathType<at::Float8_e4m3fn> {
31
+ using type = float;
32
+ };
33
+ template <>
34
+ struct OpMathType<c10::complex<Half>> {
35
+ using type = c10::complex<float>;
36
+ };
37
+
38
+ template <typename T>
39
+ using opmath_type = typename OpMathType<T>::type;
40
+
41
+ namespace {
42
+
43
+ inline c10::ScalarType toOpMathType(const c10::ScalarType type) {
44
+ switch (type) {
45
+ #define DEFINE_CASE(scalar_t, TypeNum) \
46
+ case ScalarType::TypeNum: \
47
+ return CppTypeToScalarType<at::opmath_type<scalar_t>>::value;
48
+
49
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
50
+ #undef DEFINE_CASE
51
+
52
+ default:
53
+ TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
54
+ }
55
+ }
56
+
57
+ } // namespace
58
+
59
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/OpaqueTensorImpl.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/MemoryFormat.h>
4
+ #include <c10/core/SymIntArrayRef.h>
5
+ #include <c10/core/TensorImpl.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+ namespace at {
9
+
10
+ // An "Opaque" TensorImpl -- there are no strides and (for now)
11
+ // even data() is not supported (thus no pointer arithmetic).
12
+
13
+ // NOTE: We could allow data() in the future, but would have to ensure pointer
14
+ // arithmetic code is properly guarded.
15
+ //
16
+ // NOTE: This does not support resize_ (and other metadata-changing ops) because
17
+ // of `shallow_copy_and_detach`. We would need to define an interface to
18
+ // "shallow copy" in order to add support.
19
+
20
+ template <typename OpaqueHandle>
21
+ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
22
+ // public constructor for now...
23
+ OpaqueTensorImpl(
24
+ at::DispatchKeySet key_set,
25
+ const caffe2::TypeMeta data_type,
26
+ c10::Device device,
27
+ OpaqueHandle opaque_handle,
28
+ c10::IntArrayRef sizes,
29
+ bool is_non_overlapping_and_dense = true)
30
+ : TensorImpl(key_set, data_type, device),
31
+ opaque_handle_(std::move(opaque_handle)) {
32
+ set_storage_access_should_throw();
33
+ set_custom_sizes_strides(SizesStridesPolicy::CustomStrides);
34
+ sizes_and_strides_.set_sizes(sizes);
35
+ refresh_numel();
36
+ is_non_overlapping_and_dense_ = is_non_overlapping_and_dense;
37
+ }
38
+
39
+ // Destructor doesn't call release_resources because it's
40
+ // unnecessary; don't forget to change that if needed!
41
+ void release_resources() override {
42
+ TensorImpl::release_resources();
43
+ opaque_handle_ = {};
44
+ }
45
+
46
+ void set_size(int64_t dim, int64_t new_size) override {
47
+ AT_ERROR("opaque tensors do not have set_size");
48
+ }
49
+
50
+ void set_stride(int64_t dim, int64_t new_stride) override {
51
+ AT_ERROR("opaque tensors do not have set_stride");
52
+ }
53
+
54
+ void set_storage_offset(int64_t storage_offset) override {
55
+ AT_ERROR("opaque tensors do not have set_storage_offset");
56
+ }
57
+
58
+ #ifdef DEBUG
59
+ bool has_storage() const override {
60
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
61
+ !storage_, "OpaqueTensorImpl assumes that storage_ is never set");
62
+ return false;
63
+ }
64
+ #endif
65
+
66
+ /**
67
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
68
+ *
69
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
70
+ * see NOTE [ TensorImpl Shallow-Copying ].
71
+ */
72
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
73
+ const c10::VariableVersion& version_counter,
74
+ bool allow_tensor_metadata_change) const override {
75
+ auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
76
+ key_set(),
77
+ dtype(),
78
+ device(),
79
+ opaque_handle_,
80
+ sizes_and_strides_.sizes_arrayref());
81
+ copy_tensor_metadata(
82
+ /*src_opaque_impl=*/this,
83
+ /*dest_opaque_impl=*/impl.get(),
84
+ /*version_counter=*/version_counter,
85
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
86
+ impl->refresh_numel();
87
+ return impl;
88
+ }
89
+
90
+ /**
91
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
92
+ *
93
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
94
+ * see NOTE [ TensorImpl Shallow-Copying ].
95
+ */
96
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
97
+ c10::VariableVersion&& version_counter,
98
+ bool allow_tensor_metadata_change) const override {
99
+ auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
100
+ key_set(),
101
+ dtype(),
102
+ device(),
103
+ opaque_handle_,
104
+ sizes_and_strides_.sizes_arrayref());
105
+ copy_tensor_metadata(
106
+ /*src_opaque_impl=*/this,
107
+ /*dest_opaque_impl=*/impl.get(),
108
+ /*version_counter=*/std::move(version_counter),
109
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
110
+ impl->refresh_numel();
111
+ return impl;
112
+ }
113
+
114
+ /**
115
+ * Shallow-copies data from another TensorImpl into this TensorImpl.
116
+ *
117
+ * For why this function doesn't check this TensorImpl's
118
+ * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
119
+ */
120
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
121
+ AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
122
+ auto opaque_impl =
123
+ static_cast<const OpaqueTensorImpl<OpaqueHandle>*>(impl.get());
124
+ copy_tensor_metadata(
125
+ /*src_impl=*/opaque_impl,
126
+ /*dest_impl=*/this,
127
+ /*version_counter=*/version_counter(),
128
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
129
+ refresh_numel();
130
+ }
131
+
132
+ const OpaqueHandle& opaque_handle() const {
133
+ return opaque_handle_;
134
+ }
135
+
136
+ OpaqueHandle& unsafe_opaque_handle() {
137
+ return opaque_handle_;
138
+ }
139
+
140
+ protected:
141
+ /**
142
+ * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
143
+ * storage_offset) from one TensorImpl to another TensorImpl.
144
+ *
145
+ * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
146
+ * [ TensorImpl Shallow-Copying ].
147
+ */
148
+ static void copy_tensor_metadata(
149
+ const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
150
+ OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
151
+ const c10::VariableVersion& version_counter,
152
+ bool allow_tensor_metadata_change) {
153
+ TensorImpl::copy_tensor_metadata(
154
+ src_opaque_impl,
155
+ dest_opaque_impl,
156
+ version_counter,
157
+ allow_tensor_metadata_change);
158
+
159
+ // OpaqueTensorImpl-specific fields.
160
+ dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
161
+ }
162
+
163
+ static void copy_tensor_metadata(
164
+ const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
165
+ OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
166
+ c10::VariableVersion&& version_counter,
167
+ bool allow_tensor_metadata_change) {
168
+ TensorImpl::copy_tensor_metadata(
169
+ src_opaque_impl,
170
+ dest_opaque_impl,
171
+ std::move(version_counter),
172
+ allow_tensor_metadata_change);
173
+
174
+ // OpaqueTensorImpl-specific fields.
175
+ dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
176
+ }
177
+
178
+ private:
179
+ const char* tensorimpl_type_name() const override {
180
+ return "OpaqueTensorImpl";
181
+ }
182
+
183
+ OpaqueHandle opaque_handle_;
184
+ };
185
+
186
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <c10/core/thread_pool.h>
5
+
6
+ namespace at {
7
+
8
+ class TORCH_API PTThreadPool : public c10::ThreadPool {
9
+ public:
10
+ explicit PTThreadPool(int pool_size, int numa_node_id = -1)
11
+ : c10::ThreadPool(pool_size, numa_node_id, []() {
12
+ c10::setThreadName("PTThreadPool");
13
+ at::init_num_threads();
14
+ }) {}
15
+ };
16
+
17
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/PadNd.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Exception.h>
3
+ #include <c10/util/string_view.h>
4
+
5
+ namespace at {
6
+
7
+ enum class padding_mode {
8
+ reflect,
9
+ replicate,
10
+ circular,
11
+ constant,
12
+ };
13
+
14
+ static inline c10::string_view padding_mode_string(padding_mode m) {
15
+ switch (m) {
16
+ case padding_mode::reflect:
17
+ return "reflect";
18
+ case padding_mode::replicate:
19
+ return "replicate";
20
+ case padding_mode::circular:
21
+ return "circular";
22
+ case padding_mode::constant:
23
+ return "constant";
24
+ }
25
+ TORCH_CHECK(false, "Invalid padding mode (", static_cast<int64_t>(m), ")");
26
+ }
27
+
28
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/Parallel.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/Config.h>
3
+ #include <c10/macros/Macros.h>
4
+ #include <functional>
5
+ #include <string>
6
+
7
+ namespace at {
8
+
9
+ inline int64_t divup(int64_t x, int64_t y) {
10
+ return (x + y - 1) / y;
11
+ }
12
+
13
+ // Called during new thread initialization
14
+ TORCH_API void init_num_threads();
15
+
16
+ // Sets the number of threads to be used in parallel region
17
+ TORCH_API void set_num_threads(int);
18
+
19
+ // Returns the maximum number of threads that may be used in a parallel region
20
+ TORCH_API int get_num_threads();
21
+
22
+ // Returns the current thread number (starting from 0)
23
+ // in the current parallel region, or 0 in the sequential region
24
+ TORCH_API int get_thread_num();
25
+
26
+ // Checks whether the code runs in parallel region
27
+ TORCH_API bool in_parallel_region();
28
+
29
+ namespace internal {
30
+
31
+ // Initialise num_threads lazily at first parallel call
32
+ inline void lazy_init_num_threads() {
33
+ thread_local bool init = false;
34
+ if (C10_UNLIKELY(!init)) {
35
+ at::init_num_threads();
36
+ init = true;
37
+ }
38
+ }
39
+
40
+ TORCH_API void set_thread_num(int);
41
+
42
+ class TORCH_API ThreadIdGuard {
43
+ public:
44
+ ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) {
45
+ set_thread_num(new_id);
46
+ }
47
+
48
+ ~ThreadIdGuard() {
49
+ set_thread_num(old_id_);
50
+ }
51
+
52
+ private:
53
+ int old_id_;
54
+ };
55
+
56
+ } // namespace internal
57
+
58
+ /*
59
+ parallel_for
60
+
61
+ begin: index at which to start applying user function
62
+
63
+ end: index at which to stop applying user function
64
+
65
+ grain_size: number of elements per chunk. impacts the degree of parallelization
66
+
67
+ f: user function applied in parallel to the chunks, signature:
68
+ void f(int64_t begin, int64_t end)
69
+
70
+ Warning: parallel_for does NOT copy thread local
71
+ states from the current thread to the worker threads.
72
+ This means for example that Tensor operations CANNOT be used in the
73
+ body of your function, only data pointers.
74
+ */
75
+ template <class F>
76
+ inline void parallel_for(
77
+ const int64_t begin,
78
+ const int64_t end,
79
+ const int64_t grain_size,
80
+ const F& f);
81
+
82
+ /*
83
+ parallel_reduce
84
+
85
+ begin: index at which to start applying reduction
86
+
87
+ end: index at which to stop applying reduction
88
+
89
+ grain_size: number of elements per chunk. impacts number of elements in
90
+ intermediate results tensor and degree of parallelization.
91
+
92
+ ident: identity for binary combination function sf. sf(ident, x) needs to return
93
+ x.
94
+
95
+ f: function for reduction over a chunk. f needs to be of signature scalar_t
96
+ f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
97
+
98
+ sf: function to combine two partial results. sf needs to be of signature
99
+ scalar_t sf(scalar_t x, scalar_t y)
100
+
101
+ For example, you might have a tensor of 10000 entires and want to sum together
102
+ all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
103
+ an intermediate result tensor with 4 elements. Then it will execute the function
104
+ "f" you provide and pass the beginning and end index of these chunks, so
105
+ 0-2499, 2500-4999, etc. and the combination identity. It will then write out
106
+ the result from each of these chunks into the intermediate result tensor. After
107
+ that it'll reduce the partial results from each chunk into a single number using
108
+ the combination function sf and the identity ident. For a total summation this
109
+ would be "+" and 0 respectively. This is similar to tbb's approach [1], where
110
+ you need to provide a function to accumulate a subrange, a function to combine
111
+ two partial results and an identity.
112
+
113
+ Warning: parallel_reduce does NOT copy thread local
114
+ states from the current thread to the worker threads.
115
+ This means for example that Tensor operations CANNOT be used in the
116
+ body of your function, only data pointers.
117
+
118
+ [1] https://software.intel.com/en-us/node/506154
119
+ */
120
+ template <class scalar_t, class F, class SF>
121
+ inline scalar_t parallel_reduce(
122
+ const int64_t begin,
123
+ const int64_t end,
124
+ const int64_t grain_size,
125
+ const scalar_t ident,
126
+ const F& f,
127
+ const SF& sf);
128
+
129
+ // Returns a detailed string describing parallelization settings
130
+ TORCH_API std::string get_parallel_info();
131
+
132
+ // Sets number of threads used for inter-op parallelism
133
+ TORCH_API void set_num_interop_threads(int);
134
+
135
+ // Returns the number of threads used for inter-op parallelism
136
+ TORCH_API int get_num_interop_threads();
137
+
138
+ // Launches inter-op parallel task
139
+ TORCH_API void launch(std::function<void()> func);
140
+ namespace internal {
141
+ void launch_no_thread_state(std::function<void()> fn);
142
+ } // namespace internal
143
+
144
+ // Launches intra-op parallel task
145
+ TORCH_API void intraop_launch(std::function<void()> func);
146
+
147
+ // Returns number of intra-op threads used by default
148
+ TORCH_API int intraop_default_num_threads();
149
+
150
+ } // namespace at
151
+
152
+ #if AT_PARALLEL_OPENMP
153
+ #include <ATen/ParallelOpenMP.h> // IWYU pragma: keep
154
+ #elif AT_PARALLEL_NATIVE
155
+ #include <ATen/ParallelNative.h> // IWYU pragma: keep
156
+ #elif AT_PARALLEL_NATIVE_TBB
157
+ #include <ATen/ParallelNativeTBB.h> // IWYU pragma: keep
158
+ #endif
159
+
160
+ #include <ATen/Parallel-inl.h> // IWYU pragma: keep
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <functional>
6
+
7
+ namespace at {
8
+
9
+ // Launches intra-op parallel task, returns a future
10
+ TORCH_API c10::intrusive_ptr<c10::ivalue::Future> intraop_launch_future(
11
+ std::function<void()> func);
12
+
13
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <cstddef>
5
+ #include <exception>
6
+
7
+ #include <c10/util/Exception.h>
8
+
9
+ #ifdef _WIN32
10
+ #ifndef WIN32_LEAN_AND_MEAN
11
+ #define WIN32_LEAN_AND_MEAN
12
+ #endif
13
+ #endif
14
+ #include <tbb/tbb.h>
15
+
16
+ #define INTRA_OP_PARALLEL
17
+
18
+ namespace at::internal {
19
+
20
+ template <typename F>
21
+ inline void invoke_parallel(
22
+ const int64_t begin,
23
+ const int64_t end,
24
+ const int64_t grain_size,
25
+ const F& f) {
26
+ // Choose number of tasks based on grain size and number of threads.
27
+ int64_t chunk_size = divup((end - begin), get_num_threads());
28
+ // Make sure each task is at least grain_size size.
29
+ chunk_size = std::max(grain_size, chunk_size);
30
+
31
+ std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
32
+ std::exception_ptr eptr;
33
+ tbb::parallel_for(
34
+ tbb::blocked_range<int64_t>(begin, end, chunk_size),
35
+ [&eptr, &err_flag, f](const tbb::blocked_range<int64_t>& r) {
36
+ try {
37
+ internal::ThreadIdGuard tid_guard(
38
+ tbb::this_task_arena::current_thread_index());
39
+ f(r.begin(), r.end());
40
+ } catch (...) {
41
+ if (!err_flag.test_and_set()) {
42
+ eptr = std::current_exception();
43
+ }
44
+ }
45
+ },
46
+ tbb::static_partitioner{});
47
+ if (eptr) {
48
+ std::rethrow_exception(eptr);
49
+ }
50
+ }
51
+
52
+ } // namespace at::internal
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ScalarOps.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/Scalar.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/Functions.h>
8
+ #else
9
+ #include <ATen/ops/scalar_tensor.h>
10
+ #endif
11
+
12
+ namespace at::detail {
13
+ // When filling a number to 1-element CPU tensor, we want to skip
14
+ // everything but manipulate data ptr directly.
15
+ // Ideally this fast pass should be implemented in TensorIterator,
16
+ // but we also want to skip compute_types which in not avoidable
17
+ // in TensorIterator for now.
18
+ Tensor& scalar_fill(Tensor& self, const Scalar& value);
19
+ TORCH_API Tensor scalar_tensor_static(
20
+ const Scalar& s,
21
+ c10::optional<ScalarType> dtype_opt,
22
+ c10::optional<Device> device_opt);
23
+ } // namespace at::detail
24
+
25
+ // This is in the c10 namespace because we use ADL to find the functions in it.
26
+ namespace c10 {
27
+
28
+ // FIXME: this should be (and was) Scalar::toTensor, but there is currently no
29
+ // way to implement this without going through Derived Types (which are not part
30
+ // of core).
31
+ inline at::Tensor scalar_to_tensor(
32
+ const Scalar& s,
33
+ const Device device = at::kCPU) {
34
+ // This is the fast track we have for CPU scalar tensors.
35
+ if (device == at::kCPU) {
36
+ if (s.isFloatingPoint()) {
37
+ return at::detail::scalar_tensor_static(s, at::kDouble, at::kCPU);
38
+ } else if (s.isComplex()) {
39
+ return at::detail::scalar_tensor_static(s, at::kComplexDouble, at::kCPU);
40
+ } else if (s.isBoolean()) {
41
+ return at::detail::scalar_tensor_static(s, at::kBool, at::kCPU);
42
+ } else {
43
+ AT_ASSERT(s.isIntegral(false));
44
+ return at::detail::scalar_tensor_static(s, at::kLong, at::kCPU);
45
+ }
46
+ }
47
+ if (s.isFloatingPoint()) {
48
+ return at::scalar_tensor(s, at::device(device).dtype(at::kDouble));
49
+ } else if (s.isBoolean()) {
50
+ return at::scalar_tensor(s, at::device(device).dtype(at::kBool));
51
+ } else if (s.isComplex()) {
52
+ return at::scalar_tensor(s, at::device(device).dtype(at::kComplexDouble));
53
+ } else {
54
+ AT_ASSERT(s.isIntegral(false));
55
+ return at::scalar_tensor(s, at::device(device).dtype(at::kLong));
56
+ }
57
+ }
58
+
59
+ } // namespace c10
60
+
61
+ namespace at::native {
62
+
63
+ inline Tensor wrapped_scalar_tensor(
64
+ const Scalar& scalar,
65
+ const Device device = at::kCPU) {
66
+ auto tensor = scalar_to_tensor(scalar, device);
67
+ tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
68
+ return tensor;
69
+ }
70
+
71
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SequenceNumber.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <cstdint>
5
+
6
+ // A simple thread local enumeration, used to link forward and backward pass
7
+ // ops and is used by autograd and observers framework
8
+ namespace at::sequence_number {
9
+
10
+ TORCH_API uint64_t peek();
11
+ TORCH_API uint64_t get_and_increment();
12
+
13
+ } // namespace at::sequence_number
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/util/SmallVector.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorImpl.h ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/TensorImpl.h>
5
+ #include <c10/util/Exception.h>
6
+ namespace at {
7
+
8
+ // Struct implementing a sparse CSR tensor. It uses three 1-D tensors for
9
+ // denoting the data: `crow_indices_`, `col_indices_` and `values_`.
10
+ // The `crow_indices_` tensor is a integer tensor of shape `(size(0) + 1)`
11
+ // that represents the compressed row indices of the CSR tensor. The
12
+ // `col_indices_` tensor is an integer tensor of shape `(nnz())`
13
+ // that explicitly stores the column indices of each value of the sparse
14
+ // tensor. The `values_` tensor can be of any pytorch-supported data type
15
+ // and has shape `(nnz())`.
16
+ //
17
+ // Since the main advantage of the CSR format over the COO format is speed of
18
+ // computation, care must be taken to facilitate smooth interfacing of
19
+ // these data structures with optimized libraries such as MKL and MAGMA.
20
+ // Since the MKL interface for pytorch currently uses indexing with int32
21
+ // type, it is important to make sure that the `crow_indices` and `col_indices`
22
+ // are of type int32 when calling MKL routines such as SPMM or SPMV.
23
+ //
24
+ // If not calling MKL, it should be alright to use 64 bit integer tensors
25
+ // for indexing.
26
+ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
27
+ Tensor crow_indices_;
28
+ Tensor col_indices_;
29
+ Tensor values_;
30
+ Layout layout_;
31
+
32
+ public:
33
+ explicit SparseCsrTensorImpl(
34
+ at::DispatchKeySet,
35
+ at::Device device,
36
+ Layout layout,
37
+ const caffe2::TypeMeta);
38
+
39
+ void resize_(int64_t nnz, IntArrayRef size);
40
+ void resize_and_clear_(
41
+ int64_t sparse_dim,
42
+ int64_t dense_dim,
43
+ IntArrayRef size);
44
+ void resize_as_sparse_compressed_tensor_(const Tensor& src);
45
+ void set_member_tensors(
46
+ const Tensor& crow_indices,
47
+ const Tensor& col_indices,
48
+ const Tensor& values,
49
+ IntArrayRef size);
50
+
51
+ const Tensor& compressed_indices() const {
52
+ return crow_indices_;
53
+ }
54
+ const Tensor& plain_indices() const {
55
+ return col_indices_;
56
+ }
57
+ const Tensor& values() const {
58
+ return values_;
59
+ }
60
+ int64_t nnz() {
61
+ return col_indices_.size(-1);
62
+ }
63
+
64
+ inline int64_t batch_dim() const noexcept {
65
+ return crow_indices_.dim() - 1;
66
+ }
67
+
68
+ inline int64_t sparse_dim() const noexcept {
69
+ return 2;
70
+ }
71
+
72
+ inline int64_t dense_dim() const noexcept {
73
+ return values_.dim() - batch_dim() - block_dim() - 1;
74
+ }
75
+
76
+ private:
77
+ inline int64_t block_dim() const noexcept {
78
+ return (layout_ == kSparseBsr || layout_ == kSparseBsc ? 2 : 0);
79
+ }
80
+
81
+ protected:
82
+ IntArrayRef strides_custom() const override;
83
+ SymIntArrayRef sym_strides_custom() const override;
84
+ bool is_contiguous_custom(MemoryFormat) const override;
85
+
86
+ public:
87
+ void set_size(int64_t dim, int64_t new_size) override;
88
+ void set_stride(int64_t dim, int64_t new_stride) override;
89
+ void set_storage_offset(int64_t storage_offset) override;
90
+ Layout layout_impl() const override {
91
+ return layout_;
92
+ }
93
+ void set_layout(Layout layout) {
94
+ switch (layout) {
95
+ case kSparseCsr:
96
+ case kSparseCsc:
97
+ case kSparseBsr:
98
+ case kSparseBsc:
99
+ layout_ = layout;
100
+ break;
101
+ default:
102
+ TORCH_CHECK(false, "unsupported layout ", layout);
103
+ }
104
+ }
105
+
106
+ /**
107
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
108
+ *
109
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
110
+ * see NOTE [ TensorImpl Shallow-Copying ].
111
+ */
112
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
113
+ const c10::VariableVersion& version_counter,
114
+ bool allow_tensor_metadata_change) const override {
115
+ auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
116
+ key_set(), device(), layout_impl(), dtype());
117
+ copy_tensor_metadata(
118
+ /*src_sparse_impl=*/this,
119
+ /*dest_sparse_impl=*/impl.get(),
120
+ /*version_counter=*/version_counter,
121
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
122
+ impl->refresh_numel();
123
+ return impl;
124
+ }
125
+
126
+ /**
127
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
128
+ *
129
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
130
+ * see NOTE [ TensorImpl Shallow-Copying ].
131
+ */
132
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
133
+ c10::VariableVersion&& version_counter,
134
+ bool allow_tensor_metadata_change) const override {
135
+ auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
136
+ key_set(), device(), layout_impl(), dtype());
137
+ copy_tensor_metadata(
138
+ /*src_sparse_impl=*/this,
139
+ /*dest_sparse_impl=*/impl.get(),
140
+ /*version_counter=*/version_counter,
141
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
142
+ impl->refresh_numel();
143
+ return impl;
144
+ }
145
+
146
+ private:
147
+ explicit SparseCsrTensorImpl(
148
+ at::DispatchKeySet key_set,
149
+ const caffe2::TypeMeta data_type,
150
+ at::Tensor crow_indices,
151
+ at::Tensor col_indices,
152
+ at::Tensor values,
153
+ at::Layout layout);
154
+
155
+ const char* tensorimpl_type_name() const override;
156
+
157
+ /**
158
+ * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
159
+ * storage_offset) from one TensorImpl to another TensorImpl.
160
+ *
161
+ * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
162
+ * [ TensorImpl Shallow-Copying ].
163
+ */
164
+ static void copy_tensor_metadata(
165
+ const SparseCsrTensorImpl* src_sparse_impl,
166
+ SparseCsrTensorImpl* dest_sparse_impl,
167
+ const c10::VariableVersion& version_counter,
168
+ bool allow_tensor_metadata_change) {
169
+ TensorImpl::copy_tensor_metadata(
170
+ src_sparse_impl,
171
+ dest_sparse_impl,
172
+ version_counter,
173
+ allow_tensor_metadata_change);
174
+
175
+ // Sparse-specific fields
176
+ dest_sparse_impl->crow_indices_ = src_sparse_impl->compressed_indices();
177
+ dest_sparse_impl->col_indices_ = src_sparse_impl->plain_indices();
178
+ dest_sparse_impl->values_ = src_sparse_impl->values();
179
+ dest_sparse_impl->layout_ = src_sparse_impl->layout_impl();
180
+ }
181
+ };
182
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorUtils.h ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/SparseCsrTensorImpl.h>
4
+ #include <ATen/SparseTensorImpl.h>
5
+ #include <ATen/core/Tensor.h>
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/Functions.h>
9
+ #include <ATen/NativeFunctions.h>
10
+ #include <ATen/Operators.h>
11
+ #else
12
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
13
+ #include <ATen/ops/resize_as_sparse_native.h>
14
+ #endif
15
+
16
+ #define AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(LAYOUT, NAME, ...) \
17
+ [&] { \
18
+ const auto& the_layout = LAYOUT; \
19
+ switch (the_layout) { \
20
+ case kSparseCsr: \
21
+ case kSparseCsc: \
22
+ case kSparseBsr: \
23
+ case kSparseBsc: \
24
+ return __VA_ARGS__(); \
25
+ default: \
26
+ AT_ERROR( \
27
+ NAME, \
28
+ " expected sparse compressed tensor layout but got ", \
29
+ the_layout); \
30
+ } \
31
+ }()
32
+
33
+ #define AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( \
34
+ LAYOUT, NAME, ROW_DIM_ACTION, COLUMN_DIM_ACTION) \
35
+ [&]() { \
36
+ const auto& the_layout = LAYOUT; \
37
+ switch (the_layout) { \
38
+ case kSparseCsr: \
39
+ case kSparseBsr: \
40
+ return (ROW_DIM_ACTION)(); \
41
+ case kSparseCsc: \
42
+ case kSparseBsc: \
43
+ return (COLUMN_DIM_ACTION)(); \
44
+ default: \
45
+ AT_ERROR( \
46
+ NAME, \
47
+ " expected sparse compressed tensor layout but got ", \
48
+ the_layout); \
49
+ } \
50
+ }()
51
+
52
+ #define AT_DISPATCH_PLAIN_SPARSE_COMPRESSED_LAYOUTS( \
53
+ LAYOUT, NAME, NO_BLOCK_ACTION, BLOCK_ACTION) \
54
+ [&]() { \
55
+ const auto& the_layout = LAYOUT; \
56
+ switch (the_layout) { \
57
+ case kSparseCsr: \
58
+ case kSparseCsc: \
59
+ return (NO_BLOCK_ACTION)(); \
60
+ case kSparseBsr: \
61
+ case kSparseBsc: \
62
+ return (BLOCK_ACTION)(); \
63
+ default: \
64
+ AT_ERROR( \
65
+ NAME, \
66
+ " expected sparse compressed tensor layout but got ", \
67
+ the_layout); \
68
+ } \
69
+ }()
70
+
71
+ #define AT_DISPATCH_SPARSE_ROW_COMPRESSED_LAYOUTS( \
72
+ LAYOUT, NAME, ROW_DIM_ACTION) \
73
+ [&]() { \
74
+ const auto& the_layout = LAYOUT; \
75
+ switch (the_layout) { \
76
+ case kSparseCsr: \
77
+ case kSparseBsr: \
78
+ return (ROW_DIM_ACTION)(); \
79
+ default: \
80
+ AT_ERROR( \
81
+ NAME, \
82
+ " expected sparse row compressed tensor layout but got ", \
83
+ the_layout); \
84
+ } \
85
+ }()
86
+
87
+ #define AT_DISPATCH_SPARSE_COL_COMPRESSED_LAYOUTS( \
88
+ LAYOUT, NAME, COL_DIM_ACTION) \
89
+ [&]() { \
90
+ const auto& the_layout = LAYOUT; \
91
+ switch (the_layout) { \
92
+ case kSparseCsc: \
93
+ case kSparseBsc: \
94
+ return (COL_DIM_ACTION)(); \
95
+ default: \
96
+ AT_ERROR( \
97
+ NAME, \
98
+ " expected sparse column compressed tensor layout but got ", \
99
+ the_layout); \
100
+ } \
101
+ }()
102
+
103
+ #define AT_DISPATCH_SPARSE_COMPRESSED_NONBLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \
104
+ [&]() { \
105
+ const auto& the_layout = LAYOUT; \
106
+ switch (the_layout) { \
107
+ case kSparseCsr: \
108
+ case kSparseCsc: \
109
+ return (ACTION)(); \
110
+ default: \
111
+ AT_ERROR( \
112
+ NAME, \
113
+ " expected sparse compressed (non-block) tensor layout but got ", \
114
+ the_layout); \
115
+ } \
116
+ }()
117
+
118
+ #define AT_DISPATCH_SPARSE_COMPRESSED_BLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \
119
+ [&]() { \
120
+ const auto& the_layout = LAYOUT; \
121
+ switch (the_layout) { \
122
+ case kSparseBsr: \
123
+ case kSparseBsc: \
124
+ return (ACTION)(); \
125
+ default: \
126
+ AT_ERROR( \
127
+ NAME, \
128
+ " expected sparse compressed block tensor layout but got ", \
129
+ the_layout); \
130
+ } \
131
+ }()
132
+
133
+ #define AT_DISPATCH_SPARSE_VALUE_TYPES(TYPE, NAME, ...) \
134
+ AT_DISPATCH_SWITCH( \
135
+ TYPE, \
136
+ NAME, \
137
+ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \
138
+ kComplexHalf, kHalf, kBool, kBFloat16, __VA_ARGS__))
139
+
140
+ namespace at {
141
+ namespace sparse_csr {
142
+
143
+ using SparseCsrTensor = Tensor;
144
+
145
+ inline bool is_sparse_compressed(const Layout& layout) {
146
+ switch (layout) {
147
+ case kSparseCsr:
148
+ case kSparseCsc:
149
+ case kSparseBsr:
150
+ case kSparseBsc:
151
+ return true;
152
+ default:;
153
+ }
154
+ return false;
155
+ }
156
+
157
+ inline bool is_sparse_compressed(const Tensor& self) {
158
+ return is_sparse_compressed(self.layout());
159
+ }
160
+
161
+ inline SparseCsrTensorImpl* get_sparse_csr_impl(const SparseCsrTensor& self) {
162
+ AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(
163
+ self.layout(), "get_sparse_csr_impl", [&] {});
164
+ return static_cast<SparseCsrTensorImpl*>(self.unsafeGetTensorImpl());
165
+ }
166
+
167
+ inline std::string layoutToString(
168
+ Layout layout,
169
+ bool upper = false,
170
+ bool lower = false) {
171
+ switch (layout) {
172
+ case kSparseCsr:
173
+ return (upper ? "CSR" : (lower ? "csr" : "Csr"));
174
+ case kSparseCsc:
175
+ return (upper ? "CSC" : (lower ? "csc" : "Csc"));
176
+ case kSparseBsr:
177
+ return (upper ? "BSR" : (lower ? "bsr" : "Bsr"));
178
+ case kSparseBsc:
179
+ return (upper ? "BSC" : (lower ? "bsc" : "Bsc"));
180
+ default:
181
+ TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
182
+ return "";
183
+ }
184
+ }
185
+
186
+ inline bool isCompressedRow(Layout layout) {
187
+ return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
188
+ layout, "isCompressedRow", [&] { return true; }, [&] { return false; });
189
+ }
190
+
191
+ inline bool isCompressedColumn(Layout layout) {
192
+ return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
193
+ layout,
194
+ "isCompressedColumn",
195
+ [&] { return false; },
196
+ [&] { return true; });
197
+ }
198
+
199
+ inline std::string compressedIndicesName(Layout layout) {
200
+ return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
201
+ layout,
202
+ "compressedIndicesName",
203
+ [&] { return "crow_indices"; },
204
+ [&] { return "ccol_indices"; });
205
+ }
206
+
207
+ inline std::string plainIndicesName(Layout layout) {
208
+ return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
209
+ layout,
210
+ "plainIndicesName",
211
+ [&] { return "col_indices"; },
212
+ [&] { return "row_indices"; });
213
+ }
214
+
215
+ inline std::string compressedDimName(Layout layout) {
216
+ switch (layout) {
217
+ case kSparseCsr:
218
+ return "row";
219
+ case kSparseCsc:
220
+ return "column";
221
+ case kSparseBsr:
222
+ return "row block";
223
+ case kSparseBsc:
224
+ return "column block";
225
+ default:
226
+ TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
227
+ return "";
228
+ }
229
+ }
230
+
231
+ inline std::string plainDimName(Layout layout) {
232
+ switch (layout) {
233
+ case kSparseCsr:
234
+ return "column";
235
+ case kSparseCsc:
236
+ return "row";
237
+ case kSparseBsr:
238
+ return "column block";
239
+ case kSparseBsc:
240
+ return "row block";
241
+ default:
242
+ TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
243
+ return "";
244
+ }
245
+ }
246
+
247
+ inline int rowDimension(Layout layout, IntArrayRef size) {
248
+ return size.size() - (isCompressedRow(layout) ? 2 : 1);
249
+ }
250
+
251
+ inline int columnDimension(Layout layout, IntArrayRef size) {
252
+ return size.size() - (isCompressedColumn(layout) ? 2 : 1);
253
+ }
254
+
255
+ inline int compressedDimension(
256
+ Layout layout,
257
+ IntArrayRef size,
258
+ size_t dense_ndim = 0) {
259
+ return size.size() - dense_ndim - (isCompressedRow(layout) ? 2 : 1);
260
+ }
261
+
262
+ inline int plainDimension(
263
+ Layout layout,
264
+ IntArrayRef size,
265
+ size_t dense_ndim = 0) {
266
+ return size.size() - dense_ndim - (isCompressedRow(layout) ? 1 : 2);
267
+ }
268
+
269
+ inline int64_t numBatchDimensions(Tensor const& self) {
270
+ return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
271
+ self.layout(),
272
+ "numBatchDimensions",
273
+ [&self] { return self.crow_indices().dim() - 1; },
274
+ [&self] { return self.ccol_indices().dim() - 1; });
275
+ }
276
+
277
+ inline std::pair<Tensor, Tensor> getCompressedPlainIndices(Tensor const& self) {
278
+ return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS(
279
+ self.layout(),
280
+ "getCompressedPlainIndices",
281
+ [&self] {
282
+ return std::make_pair(self.crow_indices(), self.col_indices());
283
+ },
284
+ [&self] {
285
+ return std::make_pair(self.ccol_indices(), self.row_indices());
286
+ });
287
+ }
288
+
289
+ inline Layout flip_compressed_layout(Layout layout) {
290
+ switch (layout) {
291
+ case kSparseCsr:
292
+ return kSparseCsc;
293
+ case kSparseCsc:
294
+ return kSparseCsr;
295
+ case kSparseBsr:
296
+ return kSparseBsc;
297
+ case kSparseBsc:
298
+ return kSparseBsr;
299
+ default:
300
+ TORCH_CHECK(false, "Not a sparse compressed layout:", layout);
301
+ return kSparseCsr;
302
+ }
303
+ }
304
+
305
+ inline DimVector getBlockSize(Tensor const& self) {
306
+ int64_t n_batch = numBatchDimensions(self);
307
+ return at::DimVector(self.values().sizes().slice(n_batch + 1, 2));
308
+ }
309
+
310
+ inline at::OptionalArray<at::SymInt> getSymIntBlockSize(Tensor const& self) {
311
+ if (self.layout() == at::kSparseBsr || self.layout() == at::kSparseBsc) {
312
+ int64_t n_batch = numBatchDimensions(self);
313
+ return self.values().sym_sizes().slice(n_batch + 1, 2).vec();
314
+ } else {
315
+ return {};
316
+ }
317
+ }
318
+
319
+ template <typename binary_op_t, typename binary_op_out_t>
320
+ inline bool only_sparse_compressed_binary_op_trivial_cases(
321
+ const Tensor& self,
322
+ const Tensor& other,
323
+ const Scalar& alpha,
324
+ Tensor& out,
325
+ const binary_op_t& binary_op,
326
+ const binary_op_out_t& binary_op_out) {
327
+ // Only sparse compressed! Just like the name says :)
328
+ TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(self));
329
+ TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(other));
330
+ TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(out));
331
+
332
+ // Bypass BLAS if there are matches in (self, other, out)
333
+ if (self.is_same(out) && self.is_same(other)) {
334
+ binary_op_out(self.values(), other.values(), alpha);
335
+ return true;
336
+ }
337
+ if (self.is_same(other)) {
338
+ Tensor compressed_indices, plain_indices;
339
+ std::tie(compressed_indices, plain_indices) =
340
+ at::sparse_csr::getCompressedPlainIndices(self);
341
+ static_cast<SparseCsrTensorImpl*>(out.unsafeGetTensorImpl())
342
+ ->set_member_tensors(
343
+ compressed_indices,
344
+ plain_indices,
345
+ binary_op(self.values(), other.values(), alpha),
346
+ self.sizes());
347
+ return true;
348
+ }
349
+ return false;
350
+ }
351
+
352
+ inline bool only_sparse_compressed_add_trivial_cases(
353
+ const Tensor& self,
354
+ const Tensor& other,
355
+ const Scalar& alpha,
356
+ Tensor& out) {
357
+ return only_sparse_compressed_binary_op_trivial_cases(
358
+ self,
359
+ other,
360
+ alpha,
361
+ out,
362
+ [](const Tensor& v1, const Tensor& v2, const Scalar& alpha) {
363
+ return v1.add(v2, alpha);
364
+ },
365
+ [](const Tensor& v1, const Tensor& v2, const Scalar& alpha) {
366
+ return v1.add_(v2, alpha);
367
+ });
368
+ }
369
+
370
+ inline Tensor to_type(Tensor input, ScalarType dtype) {
371
+ Tensor compressed_indices, plain_indices;
372
+ std::tie(compressed_indices, plain_indices) =
373
+ at::sparse_csr::getCompressedPlainIndices(input);
374
+ return at::_sparse_compressed_tensor_unsafe(
375
+ std::move(compressed_indices),
376
+ std::move(plain_indices),
377
+ std::move(input.values()).to(dtype),
378
+ input.sizes(),
379
+ dtype,
380
+ input.layout(),
381
+ input.device(),
382
+ input.options().pinned_memory_opt());
383
+ }
384
+
385
+ template <typename acc_t, typename scalar_t>
386
+ inline std::tuple<Tensor, Tensor> create_acc_buffer(
387
+ TensorOptions option,
388
+ ScalarType type,
389
+ int64_t nnz = -1) {
390
+ Tensor new_values, new_values_acc;
391
+ constexpr bool need_acc = !std::is_same<scalar_t, acc_t>::value;
392
+ bool is_integral = at::isIntegralType(type, /*includeBool=*/true);
393
+ if constexpr (need_acc) {
394
+ auto acc_dtype = CppTypeToScalarType<acc_t>::value;
395
+ new_values_acc = at::empty({}, option.dtype(acc_dtype));
396
+ new_values = is_integral ? new_values_acc : at::empty({}, option);
397
+ } else {
398
+ new_values = new_values_acc = at::empty({}, option);
399
+ }
400
+ if (nnz != -1) {
401
+ return std::make_tuple(
402
+ new_values.resize_(nnz), new_values_acc.resize_(nnz));
403
+ } else {
404
+ return std::make_tuple(new_values, new_values_acc);
405
+ }
406
+ }
407
+
408
+ inline void copy_from_acc_buffer(Tensor& new_values, Tensor& new_values_acc) {
409
+ if (!new_values_acc.is_same(new_values)) {
410
+ new_values.copy_(new_values_acc);
411
+ }
412
+ }
413
+
414
+ } // namespace sparse_csr
415
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/TensorImpl.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/Functions.h>
10
+ #else
11
+ #include <ATen/ops/empty.h>
12
+ #include <ATen/ops/resize.h>
13
+ #endif
14
+
15
+ namespace at {
16
+ struct TORCH_API SparseTensorImpl : public TensorImpl {
17
+ // Stored in COO format, indices + values.
18
+
19
+ // INVARIANTS:
20
+ // sparse_dim: range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
21
+ // dense_dim : range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
22
+ // _indices.shape: dimensionality: 2, shape: (sparse_dim, nnz)
23
+ // _values.shape: dimensionality: 1 + dense_dim. shape: (nnz,
24
+ // shape[sparse_dim:])
25
+
26
+ int64_t sparse_dim_ = 0; // number of sparse dimensions
27
+ int64_t dense_dim_ = 0; // number of dense dimensions
28
+
29
+ Tensor indices_; // always a LongTensor
30
+ Tensor values_;
31
+
32
+ // A sparse tensor is 'coalesced' if every index occurs at most once in
33
+ // the indices tensor, and the indices are in sorted order. (This means
34
+ // that it is very easy to convert a coalesced tensor to CSR format: you
35
+ // need only compute CSR format indices.)
36
+ //
37
+ // Most math operations can only be performed on coalesced sparse tensors,
38
+ // because many algorithms proceed by merging two sorted lists (of indices).
39
+ bool coalesced_ = false;
40
+
41
+ // compute_numel with integer multiplication overflow check, see gh-57542
42
+ void refresh_numel() {
43
+ TensorImpl::safe_refresh_numel();
44
+ }
45
+
46
+ public:
47
+ // Public for now...
48
+ explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
49
+
50
+ void release_resources() override;
51
+
52
+ int64_t nnz() const {
53
+ return values_.size(0);
54
+ }
55
+
56
+ c10::SymInt sym_nnz() const {
57
+ return values_.sym_size(0);
58
+ }
59
+ int64_t sparse_dim() const {
60
+ return sparse_dim_;
61
+ }
62
+ int64_t dense_dim() const {
63
+ return dense_dim_;
64
+ }
65
+ bool coalesced() const {
66
+ return coalesced_;
67
+ }
68
+ Tensor indices() const {
69
+ return indices_;
70
+ }
71
+ Tensor values() const {
72
+ return values_;
73
+ }
74
+
75
+ void set_size(int64_t dim, int64_t new_size) override;
76
+ void set_stride(int64_t dim, int64_t new_stride) override;
77
+ void set_storage_offset(int64_t storage_offset) override;
78
+
79
+ #ifdef DEBUG
80
+ bool has_storage() const override;
81
+ #endif
82
+
83
+ // WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim
84
+ // with respect to indices and values
85
+ void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
86
+ TORCH_CHECK(
87
+ allow_tensor_metadata_change(),
88
+ "raw_resize_ ",
89
+ err_msg_tensor_metadata_change_not_allowed);
90
+ TORCH_CHECK(
91
+ !has_symbolic_sizes_strides_,
92
+ "raw_resize_ called on tensor with symbolic shape")
93
+ set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
94
+ sparse_dim_ = sparse_dim;
95
+ dense_dim_ = dense_dim;
96
+ refresh_numel();
97
+ }
98
+
99
+ // NOTE: This function preserves invariants of sparse_dim/dense_dim with
100
+ // respect to indices and values.
101
+ //
102
+ // NOTE: This function supports the following cases:
103
+ // 1. When we keep the number of dense dimensions unchanged, and NOT shrinking
104
+ // the size of any of the dense dimensions.
105
+ // 2. When we keep the number of sparse dimensions unchanged, and NOT
106
+ // shrinking the size of any of the sparse dimensions.
107
+ // 3. When the sparse tensor has zero nnz, in which case we are free to change
108
+ // the shapes of both its sparse and dense dimensions.
109
+ //
110
+ // This function DOESN'T support (and will throw an error) the following
111
+ // cases:
112
+ // 1. When we attempt to change the number of sparse dimensions on a non-empty
113
+ // sparse tensor (such an operation will invalidate the indices stored).
114
+ // 2. When we attempt to change the number of dense dimensions on a non-empty
115
+ // sparse tensor (such an operation will behave differently from an equivalent
116
+ // dense tensor's resize method, and for API consistency we don't support it).
117
+ // 3. When we attempt to shrink the size of any of the dense dimensions on a
118
+ // non-empty sparse tensor (such an operation will behave differently from an
119
+ // equivalent dense tensor's resize method, and for API consistency we don't
120
+ // support it).
121
+ // 4. When we attempt to shrink the size of any of the sparse dimensions on a
122
+ // non-empty sparse tensor (this could make some of the stored indices
123
+ // out-of-bound and thus unsafe).
124
+ template <typename T>
125
+ void _resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<T> size) {
126
+ TORCH_CHECK(
127
+ allow_tensor_metadata_change(),
128
+ "resize_ ",
129
+ err_msg_tensor_metadata_change_not_allowed);
130
+ TORCH_CHECK(
131
+ !has_symbolic_sizes_strides_,
132
+ "resize_ called on tensor with symbolic shape")
133
+ TORCH_CHECK(
134
+ sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
135
+ "number of dimensions must be sparse_dim (",
136
+ sparse_dim,
137
+ ") + dense_dim (",
138
+ dense_dim,
139
+ "), but got ",
140
+ size.size());
141
+ if (nnz() > 0) {
142
+ auto alt_options_msg =
143
+ "You could try the following options:\n\
144
+ 1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\
145
+ 2. If you need to resize this tensor, you have the following options:\n\
146
+ 1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\
147
+ 2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor.";
148
+
149
+ TORCH_CHECK(
150
+ sparse_dim == sparse_dim_,
151
+ "changing the number of sparse dimensions (from ",
152
+ sparse_dim_,
153
+ " to ",
154
+ sparse_dim,
155
+ ") on a non-empty sparse tensor is not supported.\n",
156
+ alt_options_msg);
157
+
158
+ TORCH_CHECK(
159
+ dense_dim == dense_dim_,
160
+ "changing the number of dense dimensions (from ",
161
+ dense_dim_,
162
+ " to ",
163
+ dense_dim,
164
+ ") on a non-empty sparse tensor is not supported.\n",
165
+ alt_options_msg);
166
+
167
+ bool shrinking_sparse_dims = false;
168
+ bool shrinking_dense_dim = false;
169
+ auto sparse_size_original = generic_sizes<T>().slice(0, sparse_dim);
170
+ auto sparse_size_new = size.slice(0, sparse_dim);
171
+ for (const auto i : c10::irange(sparse_dim)) {
172
+ if (sparse_size_new[i] < sparse_size_original[i]) {
173
+ shrinking_sparse_dims = true;
174
+ break;
175
+ }
176
+ }
177
+ auto dense_size_original = generic_sizes<T>().slice(sparse_dim);
178
+ auto dense_size_new = size.slice(sparse_dim);
179
+ for (const auto i : c10::irange(dense_dim)) {
180
+ if (dense_size_new[i] < dense_size_original[i]) {
181
+ shrinking_dense_dim = true;
182
+ break;
183
+ }
184
+ }
185
+
186
+ TORCH_CHECK(
187
+ !shrinking_sparse_dims,
188
+ "shrinking the size of sparse dimensions (from ",
189
+ sparse_size_original,
190
+ " to ",
191
+ sparse_size_new,
192
+ ") on a non-empty sparse tensor is not supported.\n",
193
+ alt_options_msg);
194
+
195
+ TORCH_CHECK(
196
+ !shrinking_dense_dim,
197
+ "shrinking the size of dense dimensions (from ",
198
+ dense_size_original,
199
+ " to ",
200
+ dense_size_new,
201
+ ") on a non-empty sparse tensor is not supported.\n",
202
+ alt_options_msg);
203
+ }
204
+
205
+ auto sizes_and_strides = generic_sizes<T>();
206
+ const bool size_equals_sizes = std::equal(
207
+ size.begin(),
208
+ size.end(),
209
+ sizes_and_strides.begin(),
210
+ sizes_and_strides.end());
211
+ if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) ||
212
+ (dense_dim != dense_dim_)) {
213
+ auto nnz = at::symint::sizes<T>(values())[0];
214
+ std::vector<T> values_size = {nnz};
215
+ auto dense_size = size.slice(sparse_dim);
216
+ values_size.insert(
217
+ values_size.end(), dense_size.begin(), dense_size.end());
218
+ at::symint::resize_<T>(values_, values_size);
219
+ at::symint::resize_<T>(indices_, {T(sparse_dim), nnz});
220
+ }
221
+
222
+ if (!size_equals_sizes) {
223
+ set_sizes_and_strides(size, std::vector<T>(size.size()));
224
+ }
225
+ sparse_dim_ = sparse_dim;
226
+ dense_dim_ = dense_dim;
227
+ refresh_numel();
228
+ }
229
+
230
+ void resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<int64_t> size) {
231
+ return _resize_(sparse_dim, dense_dim, size);
232
+ }
233
+
234
+ void resize_(
235
+ int64_t sparse_dim,
236
+ int64_t dense_dim,
237
+ ArrayRef<c10::SymInt> size) {
238
+ return _resize_(sparse_dim, dense_dim, size);
239
+ }
240
+
241
+ // NOTE: this function will resize the sparse tensor and also set `indices`
242
+ // and `values` to empty.
243
+ void resize_and_clear_(
244
+ int64_t sparse_dim,
245
+ int64_t dense_dim,
246
+ IntArrayRef size) {
247
+ TORCH_CHECK(
248
+ allow_tensor_metadata_change(),
249
+ "resize_and_clear_ ",
250
+ err_msg_tensor_metadata_change_not_allowed);
251
+ TORCH_CHECK(
252
+ !has_symbolic_sizes_strides_,
253
+ "resize_and_clear_ called on tensor with symbolic shape")
254
+ TORCH_CHECK(
255
+ sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
256
+ "number of dimensions must be sparse_dim (",
257
+ sparse_dim,
258
+ ") + dense_dim (",
259
+ dense_dim,
260
+ "), but got ",
261
+ size.size());
262
+
263
+ set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
264
+ sparse_dim_ = sparse_dim;
265
+ dense_dim_ = dense_dim;
266
+
267
+ auto empty_indices = at::empty({sparse_dim, 0}, indices().options());
268
+ std::vector<int64_t> values_size = {0};
269
+ auto dense_size = sizes().slice(sparse_dim);
270
+ values_size.insert(values_size.end(), dense_size.begin(), dense_size.end());
271
+ auto empty_values = at::empty(values_size, values().options());
272
+ set_indices_and_values_unsafe(empty_indices, empty_values);
273
+ refresh_numel();
274
+ }
275
+
276
+ void set_coalesced(bool coalesced) {
277
+ TORCH_CHECK(
278
+ allow_tensor_metadata_change(),
279
+ "set_coalesced ",
280
+ err_msg_tensor_metadata_change_not_allowed);
281
+ coalesced_ = coalesced;
282
+ }
283
+
284
+ // NOTE: this function is only used internally and not exposed to Python
285
+ // frontend
286
+ void set_nnz_and_narrow(int64_t new_nnz) {
287
+ TORCH_CHECK(
288
+ allow_tensor_metadata_change(),
289
+ "set_nnz_and_narrow ",
290
+ err_msg_tensor_metadata_change_not_allowed);
291
+ AT_ASSERT(new_nnz <= nnz());
292
+ indices_ = indices_.narrow(1, 0, new_nnz);
293
+ values_ = values_.narrow(0, 0, new_nnz);
294
+ if (new_nnz < 2) {
295
+ coalesced_ = true;
296
+ }
297
+ }
298
+
299
+ // Takes indices and values and directly puts them into the sparse tensor, no
300
+ // copy. NOTE: this function is unsafe because it doesn't check whether any
301
+ // indices are out of boundaries of `sizes`, so it should ONLY be used where
302
+ // we know that the indices are guaranteed to be within bounds. This used to
303
+ // be called THSTensor_(_move) NB: This used to be able to avoid a refcount
304
+ // bump, but I was too lazy to make it happen
305
+ void set_indices_and_values_unsafe(
306
+ const Tensor& indices,
307
+ const Tensor& values);
308
+
309
+ /**
310
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
311
+ *
312
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
313
+ * see NOTE [ TensorImpl Shallow-Copying ].
314
+ */
315
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
316
+ const c10::VariableVersion& version_counter,
317
+ bool allow_tensor_metadata_change) const override {
318
+ auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
319
+ copy_tensor_metadata(
320
+ /*src_impl=*/this,
321
+ /*dest_impl=*/impl.get(),
322
+ /*version_counter=*/version_counter,
323
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
324
+ impl->refresh_numel();
325
+ return impl;
326
+ }
327
+
328
+ /**
329
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
330
+ *
331
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
332
+ * see NOTE [ TensorImpl Shallow-Copying ].
333
+ */
334
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
335
+ c10::VariableVersion&& version_counter,
336
+ bool allow_tensor_metadata_change) const override {
337
+ auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
338
+ copy_tensor_metadata(
339
+ /*src_impl=*/this,
340
+ /*dest_impl=*/impl.get(),
341
+ /*version_counter=*/std::move(version_counter),
342
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
343
+ impl->refresh_numel();
344
+ return impl;
345
+ }
346
+
347
+ /**
348
+ * Shallow-copies data from another TensorImpl into this TensorImpl.
349
+ *
350
+ * For why this function doesn't check this TensorImpl's
351
+ * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
352
+ */
353
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
354
+ AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
355
+ auto sparse_impl = static_cast<const SparseTensorImpl*>(impl.get());
356
+ copy_tensor_metadata(
357
+ /*src_impl=*/sparse_impl,
358
+ /*dest_impl=*/this,
359
+ /*version_counter=*/version_counter(),
360
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
361
+ refresh_numel();
362
+ }
363
+
364
+ private:
365
+ explicit SparseTensorImpl(
366
+ at::DispatchKeySet,
367
+ const caffe2::TypeMeta,
368
+ at::Tensor indices,
369
+ at::Tensor values);
370
+
371
+ /**
372
+ * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
373
+ * storage_offset) from one TensorImpl to another TensorImpl.
374
+ *
375
+ * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
376
+ * [ TensorImpl Shallow-Copying ].
377
+ */
378
+ static void copy_tensor_metadata(
379
+ const SparseTensorImpl* src_sparse_impl,
380
+ SparseTensorImpl* dest_sparse_impl,
381
+ const c10::VariableVersion& version_counter,
382
+ bool allow_tensor_metadata_change) {
383
+ TensorImpl::copy_tensor_metadata(
384
+ src_sparse_impl,
385
+ dest_sparse_impl,
386
+ version_counter,
387
+ allow_tensor_metadata_change);
388
+
389
+ // Sparse-specific fields
390
+ dest_sparse_impl->sparse_dim_ = src_sparse_impl->sparse_dim();
391
+ dest_sparse_impl->dense_dim_ = src_sparse_impl->dense_dim();
392
+ dest_sparse_impl->indices_ = src_sparse_impl->indices();
393
+ dest_sparse_impl->values_ = src_sparse_impl->values();
394
+ dest_sparse_impl->coalesced_ = src_sparse_impl->coalesced();
395
+ }
396
+
397
+ const char* tensorimpl_type_name() const override;
398
+ };
399
+
400
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/StorageUtils.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Storage.h>
4
+ #include <c10/core/StorageImpl.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace at {
8
+
9
+ class TensorBase;
10
+
11
+ // Here we define a series of utils to create/manipulate ATen backed
12
+ // c10 storage implementations.
13
+
14
+ /**
15
+ * Create a new shared memory storage impl managed by file descriptor
16
+ *
17
+ * @param size size in bytes
18
+ */
19
+ C10_EXPORT c10::intrusive_ptr<c10::StorageImpl> new_shm_fd_storage(size_t size);
20
+
21
+ /**
22
+ * Copy src to dst
23
+ * Caller must guarantee the validness of the storage objects
24
+ * during the entire copy process, esp. when it's async.
25
+ *
26
+ * This can probably live in c10 namespace later if needed,
27
+ * but for now keep it in at to keep implementation simple.
28
+ *
29
+ * @param dst dst tensor
30
+ * @param src src tensor
31
+ * @param non_blocking (default false) whether this operation blocks caller
32
+ */
33
+ C10_EXPORT void storage_copy(
34
+ c10::Storage& dst,
35
+ const c10::Storage& src,
36
+ bool non_blocking = false);
37
+
38
+ /**
39
+ * In place change the storage to shm based.
40
+ *
41
+ * This is only applicable to CPU tensors not already shared.
42
+ * Otherwise, it's a no op to mirror the THP tensor behavior:
43
+ * https://pytorch.org/docs/stable/generated/torch.Tensor.share_memory_.html
44
+ *
45
+ * @param t a tensor
46
+ */
47
+ C10_EXPORT void share_memory_(TensorBase& t);
48
+
49
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <c10/core/WrapDimMinimal.h>
5
+
6
+ namespace at {
7
+
8
+ // Return if the tensor geometry represented by `sizes` and `strides` is
9
+ // contiguous Although we cache is_contiguous in tensor now, this is till useful
10
+ // because it allows checking if a particular geometry is contiguous without
11
+ // explicitly constructing a tensor, e.g., when you want to choose a kernel
12
+ // strategy based on whether a subgeometry is contiguous.
13
+ TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
14
+
15
+ struct TORCH_API TensorGeometry {
16
+ TensorGeometry() = default;
17
+
18
+ explicit TensorGeometry(c10::SymIntArrayRef sizes)
19
+ : sizes_(sizes.vec()),
20
+ strides_(sizes.size()),
21
+ has_symbolic_sizes_strides_(
22
+ !c10::asIntArrayRefSlowOpt(sizes).has_value()) {
23
+ int64_t dim = sizes.size();
24
+ c10::SymInt expected_stride = 1;
25
+ for (int64_t i = dim - 1; i >= 0; i--) {
26
+ strides_[i] = expected_stride;
27
+ expected_stride *= sizes_[i];
28
+ }
29
+ numel_ = expected_stride;
30
+ }
31
+
32
+ explicit TensorGeometry(const TensorBase& t)
33
+ : sizes_(t.sym_sizes().vec()),
34
+ strides_(t.sym_strides().vec()),
35
+ storage_offset_(t.sym_storage_offset()),
36
+ numel_(t.sym_numel()),
37
+ has_symbolic_sizes_strides_(
38
+ t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {}
39
+
40
+ // true if the tensor is contiguous
41
+ bool is_contiguous() const;
42
+
43
+ int64_t dim() const {
44
+ return sizes_.size();
45
+ }
46
+
47
+ int64_t size(int64_t dim) const {
48
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
49
+ dim = c10::maybe_wrap_dim(dim, this->dim());
50
+ return sizes_.at(static_cast<size_t>(dim)).as_int_unchecked();
51
+ }
52
+ c10::IntArrayRef sizes() const {
53
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
54
+ return c10::asIntArrayRefUnchecked(sizes_);
55
+ }
56
+ int64_t stride(int64_t dim) const {
57
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
58
+ dim = c10::maybe_wrap_dim(dim, this->dim());
59
+ return strides_.at(static_cast<size_t>(dim)).as_int_unchecked();
60
+ }
61
+ c10::IntArrayRef strides() const {
62
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
63
+ return c10::asIntArrayRefUnchecked(strides_);
64
+ }
65
+ int64_t storage_offset() const {
66
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
67
+ return storage_offset_.as_int_unchecked();
68
+ }
69
+ int64_t numel() const {
70
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
71
+ return numel_.as_int_unchecked();
72
+ }
73
+
74
+ c10::SymInt sym_size(int64_t dim) const {
75
+ dim = c10::maybe_wrap_dim(dim, this->dim());
76
+ return sizes_.at(static_cast<size_t>(dim));
77
+ }
78
+ c10::SymIntArrayRef sym_sizes() const {
79
+ return sizes_;
80
+ }
81
+ c10::SymInt sym_stride(int64_t dim) const {
82
+ dim = c10::maybe_wrap_dim(dim, this->dim());
83
+ return strides_.at(static_cast<size_t>(dim));
84
+ }
85
+ c10::SymIntArrayRef sym_strides() const {
86
+ return strides_;
87
+ }
88
+ c10::SymInt sym_storage_offset() const {
89
+ return storage_offset_;
90
+ }
91
+ c10::SymInt sym_numel() const {
92
+ return numel_;
93
+ }
94
+
95
+ TensorGeometry transpose(int64_t dim0, int64_t dim1) {
96
+ TensorGeometry r = *this; // copy
97
+ TORCH_CHECK(
98
+ dim0 < dim(),
99
+ "transpose: dim0=",
100
+ dim0,
101
+ " out of range (dim=",
102
+ dim(),
103
+ ")")
104
+ TORCH_CHECK(
105
+ dim1 < dim(),
106
+ "transpose: dim1=",
107
+ dim1,
108
+ " out of range (dim=",
109
+ dim(),
110
+ ")")
111
+ std::swap(r.sizes_[dim0], r.sizes_[dim1]);
112
+ std::swap(r.strides_[dim0], r.strides_[dim1]);
113
+ return r;
114
+ }
115
+
116
+ std::vector<c10::SymInt>& mutable_sizes() {
117
+ return sizes_;
118
+ }
119
+ std::vector<c10::SymInt>& mutable_strides() {
120
+ return strides_;
121
+ }
122
+ c10::SymInt& mutable_storage_offset() {
123
+ return storage_offset_;
124
+ }
125
+ void recompute() {
126
+ // recalculate numel after a change
127
+ c10::SymInt numel = 1;
128
+ for (const auto& i : sizes_) {
129
+ numel = numel * i;
130
+ }
131
+ numel_ = std::move(numel);
132
+ has_symbolic_sizes_strides_ =
133
+ !c10::asIntArrayRefSlowOpt(sizes_).has_value();
134
+ }
135
+
136
+ private:
137
+ std::vector<c10::SymInt> sizes_;
138
+ std::vector<c10::SymInt> strides_;
139
+ c10::SymInt storage_offset_;
140
+ c10::SymInt numel_;
141
+ bool has_symbolic_sizes_strides_{false};
142
+ };
143
+
144
+ } // namespace at