applied-ai-018 commited on
Commit
83d18ca
·
verified ·
1 Parent(s): fe58839

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ATen.h +37 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h +153 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Backend.h +2 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h +33 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h +29 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h +576 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h +29 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h +29 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h +323 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h +29 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h +25 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Context.h +560 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h +25 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Device.h +2 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h +27 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h +41 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Dimname.h +1 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h +30 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Formatting.h +1 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h +126 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Functions.h +1427 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InferSize.h +87 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h +15 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h +31 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h +139 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h +109 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h +42 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h +324 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h +215 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h +283 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h +203 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h +69 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h +17 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h +13 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h +52 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h +54 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h +0 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h +0 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Scalar.h +3 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h +2 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h +400 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h +144 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h +735 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h +137 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h +75 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOperators.h +51 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h +2 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h +86 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h +21 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Version.h +18 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ATen.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(_MSC_VER) && __cplusplus < 201703L
4
+ #error C++17 or later compatible compiler is required to use ATen.
5
+ #endif
6
+
7
+ #include <ATen/Context.h>
8
+ #include <ATen/Device.h>
9
+ #include <ATen/DeviceGuard.h>
10
+ #include <ATen/DimVector.h>
11
+ #include <ATen/Dispatch.h>
12
+ #include <ATen/Formatting.h>
13
+ #include <ATen/Functions.h>
14
+ #include <ATen/NamedTensor.h>
15
+ #include <ATen/ScalarOps.h>
16
+ #include <ATen/Tensor.h>
17
+ #include <ATen/TensorGeometry.h>
18
+ #include <ATen/TensorIndexing.h>
19
+ #include <ATen/TensorOperators.h>
20
+ #include <ATen/Version.h>
21
+ #include <ATen/core/ATenGeneral.h>
22
+ #include <ATen/core/Generator.h>
23
+ #include <ATen/core/Reduction.h>
24
+ #include <ATen/core/Scalar.h>
25
+ #include <ATen/core/UnsafeFromTH.h>
26
+ #include <ATen/core/ivalue.h>
27
+ #include <ATen/core/jit_type.h>
28
+ #include <c10/core/Allocator.h>
29
+ #include <c10/core/InferenceMode.h>
30
+ #include <c10/core/Layout.h>
31
+ #include <c10/core/Storage.h>
32
+ #include <c10/core/TensorOptions.h>
33
+ #include <c10/util/Exception.h>
34
+
35
+ // TODO: try to remove this
36
+ // There is some back story, see https://github.com/pytorch/pytorch/issues/48684
37
+ #include <ATen/NativeFunctions.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/AccumulateType.h ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/Config.h>
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e4m3fnuz.h>
8
+ #include <c10/util/Float8_e5m2.h>
9
+ #include <c10/util/Float8_e5m2fnuz.h>
10
+ #include <c10/util/Half.h>
11
+
12
+ // Defines the accumulation type for a scalar type.
13
+ // Example:
14
+ // using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>;
15
+ //
16
+ // Accumulation types are an important concept in numeric computing
17
+ // because you frequently want to perform intermediate computations
18
+ // at a higher precision than the input and output precision, to avoid
19
+ // compounding internal rounding errors. Accumulation is the most
20
+ // well-known intermediate computation (it is of great importance for
21
+ // sum reduction and matrix multiply, for example), but in PyTorch
22
+ // acc_type ends up getting used for all sorts of other intermediate
23
+ // computations, so it perhaps would be more accurately (ahem) called an
24
+ // "accurate" type. acc_type is especially important for reduced
25
+ // precision operations like float16 and bfloat16, where relatively
26
+ // benign looking inputs can easily end up overflowing/underflowing.
27
+ //
28
+ // acc_type is parametrized by whether or not you are running on CUDA
29
+ // or not, because on CUDA double precision operations are expensive
30
+ // and so by default, we don't actually want to use double as an
31
+ // acc_type on CUDA. A lot of things are typed out below, but
32
+ // basically, the table is generated by a few rules:
33
+ //
34
+ // If bool:
35
+ // Use 'bool' as acc_type.
36
+ // If floating point:
37
+ // If CUDA, use 'float' as acc_type (unless scalar_t is double),
38
+ // otherwise (CPU) use 'double'
39
+ // If integral:
40
+ // Use 'int64_t' as acc_type
41
+ //
42
+ // You're not forced to use this template; if you happen to know
43
+ // something specific about your use case, you can specify your own
44
+ // desired behavior. This template, however, will give you a reasonable
45
+ // default that will work for all dtypes supported in PyTorch.
46
+
47
+ #if defined(__CUDACC__)
48
+ #include <cuda.h>
49
+ #include <cuda_fp16.h>
50
+ #elif defined(__HIPCC__)
51
+ #include <hip/hip_fp16.h>
52
+ #include <hip/hip_runtime.h>
53
+ #endif
54
+
55
+ namespace at {
56
+
57
+ template <typename T, c10::DeviceType D>
58
+ struct AccumulateTypeDevice {};
59
+
60
+ template <typename T, bool>
61
+ struct AccumulateType {};
62
+
63
+ template <typename T>
64
+ struct AccumulateType<T, false> {
65
+ using type = typename AccumulateTypeDevice<T, c10::DeviceType::CPU>::type;
66
+ };
67
+
68
+ template <typename T>
69
+ struct AccumulateType<T, true> {
70
+ using type = typename AccumulateTypeDevice<T, c10::DeviceType::CUDA>::type;
71
+ };
72
+
73
+ template <typename T, c10::DeviceType device>
74
+ using acc_type_device = typename AccumulateTypeDevice<T, device>::type;
75
+
76
+ template <typename T, bool is_cuda>
77
+ using acc_type = typename AccumulateType<T, is_cuda>::type;
78
+
79
+ #define ACC_TYPE(t, acc_t, device_type) \
80
+ template <> \
81
+ struct AccumulateTypeDevice<t, device_type> { \
82
+ using type = acc_t; \
83
+ };
84
+ #define MPS_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::MPS)
85
+ #define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA)
86
+ #define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU)
87
+
88
+ MPS_ACC_TYPE(BFloat16, float);
89
+ MPS_ACC_TYPE(Half, float);
90
+ MPS_ACC_TYPE(Float8_e5m2, float);
91
+ MPS_ACC_TYPE(Float8_e4m3fn, float);
92
+ MPS_ACC_TYPE(Float8_e5m2fnuz, float);
93
+ MPS_ACC_TYPE(Float8_e4m3fnuz, float);
94
+ MPS_ACC_TYPE(float, float);
95
+ MPS_ACC_TYPE(double, float);
96
+ MPS_ACC_TYPE(int8_t, int64_t);
97
+ MPS_ACC_TYPE(uint8_t, int64_t);
98
+ MPS_ACC_TYPE(char, int64_t);
99
+ MPS_ACC_TYPE(int16_t, int64_t);
100
+ MPS_ACC_TYPE(int32_t, int64_t);
101
+ MPS_ACC_TYPE(int64_t, int64_t);
102
+ MPS_ACC_TYPE(bool, bool);
103
+ MPS_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
104
+ MPS_ACC_TYPE(c10::complex<float>, c10::complex<float>);
105
+ MPS_ACC_TYPE(c10::complex<double>, c10::complex<float>);
106
+
107
+ #if defined(__CUDACC__) || defined(__HIPCC__)
108
+ CUDA_ACC_TYPE(half, float);
109
+ #endif
110
+ CUDA_ACC_TYPE(BFloat16, float);
111
+ CUDA_ACC_TYPE(Half, float);
112
+ CUDA_ACC_TYPE(Float8_e5m2, float);
113
+ CUDA_ACC_TYPE(Float8_e4m3fn, float);
114
+ CUDA_ACC_TYPE(Float8_e5m2fnuz, float);
115
+ CUDA_ACC_TYPE(Float8_e4m3fnuz, float);
116
+ CUDA_ACC_TYPE(float, float);
117
+ CUDA_ACC_TYPE(double, double);
118
+ CUDA_ACC_TYPE(int8_t, int64_t);
119
+ CUDA_ACC_TYPE(uint8_t, int64_t);
120
+ CUDA_ACC_TYPE(char, int64_t);
121
+ CUDA_ACC_TYPE(int16_t, int64_t);
122
+ CUDA_ACC_TYPE(int32_t, int64_t);
123
+ CUDA_ACC_TYPE(int64_t, int64_t);
124
+ CUDA_ACC_TYPE(bool, bool);
125
+ CUDA_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
126
+ CUDA_ACC_TYPE(c10::complex<float>, c10::complex<float>);
127
+ CUDA_ACC_TYPE(c10::complex<double>, c10::complex<double>);
128
+
129
+ CPU_ACC_TYPE(BFloat16, float);
130
+ CPU_ACC_TYPE(Half, float);
131
+ CPU_ACC_TYPE(Float8_e5m2, float);
132
+ CPU_ACC_TYPE(Float8_e4m3fn, float);
133
+ CPU_ACC_TYPE(Float8_e5m2fnuz, float);
134
+ CPU_ACC_TYPE(Float8_e4m3fnuz, float);
135
+ CPU_ACC_TYPE(float, double);
136
+ CPU_ACC_TYPE(double, double);
137
+ CPU_ACC_TYPE(int8_t, int64_t);
138
+ CPU_ACC_TYPE(uint8_t, int64_t);
139
+ CPU_ACC_TYPE(char, int64_t);
140
+ CPU_ACC_TYPE(int16_t, int64_t);
141
+ CPU_ACC_TYPE(int32_t, int64_t);
142
+ CPU_ACC_TYPE(int64_t, int64_t);
143
+ CPU_ACC_TYPE(bool, bool);
144
+ CPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
145
+ CPU_ACC_TYPE(c10::complex<float>, c10::complex<double>);
146
+ CPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
147
+
148
+ TORCH_API c10::ScalarType toAccumulateType(
149
+ c10::ScalarType type,
150
+ c10::DeviceType device);
151
+ TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda);
152
+
153
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Backend.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/Backend.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFixedAllocator.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ // This file creates a fake allocator that just throws exceptions if
7
+ // it is actually used.
8
+
9
+ // state passed to the allocator is the std::function<void(void*)> called
10
+ // when the blob is release by ATen
11
+
12
+ namespace at {
13
+
14
+ static cpu_fixed_malloc(void*, ptrdiff_t) {
15
+ AT_ERROR("attempting to resize a tensor view of an external blob");
16
+ }
17
+
18
+ static cpu_fixed_realloc(void*, void*, ptrdiff_t) {
19
+ AT_ERROR("attempting to resize a tensor view of an external blob");
20
+ }
21
+
22
+ static cpu_fixed_free(void* state, void* allocation) {
23
+ auto on_release = static_cast<std::function<void(void*)>*>(state);
24
+ (*on_release)(allocation);
25
+ delete on_release;
26
+ }
27
+
28
+ static Allocator CPU_fixed_allocator = {
29
+ cpu_fixed_malloc,
30
+ cpu_fixed_realloc,
31
+ cpu_fixed_free};
32
+
33
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CPUFunctions_inl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUFunctions_inl.h ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_cpu_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_adaptive_avg_pool2d_cpu_dispatch.h>
20
+ #include <ATen/ops/_adaptive_avg_pool2d_backward_cpu_dispatch.h>
21
+ #include <ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h>
22
+ #include <ATen/ops/_adaptive_avg_pool3d_backward_cpu_dispatch.h>
23
+ #include <ATen/ops/_add_relu_cpu_dispatch.h>
24
+ #include <ATen/ops/_addmm_activation_cpu_dispatch.h>
25
+ #include <ATen/ops/_aminmax_cpu_dispatch.h>
26
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_cpu_dispatch.h>
27
+ #include <ATen/ops/_amp_update_scale_cpu_dispatch.h>
28
+ #include <ATen/ops/_assert_async_cpu_dispatch.h>
29
+ #include <ATen/ops/_cdist_backward_cpu_dispatch.h>
30
+ #include <ATen/ops/_cdist_forward_cpu_dispatch.h>
31
+ #include <ATen/ops/_cholesky_solve_helper_cpu_dispatch.h>
32
+ #include <ATen/ops/_compute_linear_combination_cpu_dispatch.h>
33
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_cpu_dispatch.h>
34
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h>
35
+ #include <ATen/ops/_convert_weight_to_int4pack_cpu_dispatch.h>
36
+ #include <ATen/ops/_ctc_loss_cpu_dispatch.h>
37
+ #include <ATen/ops/_ctc_loss_backward_cpu_dispatch.h>
38
+ #include <ATen/ops/_cummax_helper_cpu_dispatch.h>
39
+ #include <ATen/ops/_cummin_helper_cpu_dispatch.h>
40
+ #include <ATen/ops/_dirichlet_grad_cpu_dispatch.h>
41
+ #include <ATen/ops/_efficientzerotensor_cpu_dispatch.h>
42
+ #include <ATen/ops/_embedding_bag_cpu_dispatch.h>
43
+ #include <ATen/ops/_embedding_bag_dense_backward_cpu_dispatch.h>
44
+ #include <ATen/ops/_embedding_bag_forward_only_cpu_dispatch.h>
45
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h>
46
+ #include <ATen/ops/_empty_affine_quantized_cpu_dispatch.h>
47
+ #include <ATen/ops/_empty_per_channel_affine_quantized_cpu_dispatch.h>
48
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_cpu_dispatch.h>
49
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_cpu_dispatch.h>
50
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_cpu_dispatch.h>
51
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_cpu_dispatch.h>
52
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_cpu_dispatch.h>
53
+ #include <ATen/ops/_fft_c2c_cpu_dispatch.h>
54
+ #include <ATen/ops/_fft_c2r_cpu_dispatch.h>
55
+ #include <ATen/ops/_fft_r2c_cpu_dispatch.h>
56
+ #include <ATen/ops/_foobar_cpu_dispatch.h>
57
+ #include <ATen/ops/_foreach_abs_cpu_dispatch.h>
58
+ #include <ATen/ops/_foreach_acos_cpu_dispatch.h>
59
+ #include <ATen/ops/_foreach_add_cpu_dispatch.h>
60
+ #include <ATen/ops/_foreach_addcdiv_cpu_dispatch.h>
61
+ #include <ATen/ops/_foreach_addcmul_cpu_dispatch.h>
62
+ #include <ATen/ops/_foreach_asin_cpu_dispatch.h>
63
+ #include <ATen/ops/_foreach_atan_cpu_dispatch.h>
64
+ #include <ATen/ops/_foreach_ceil_cpu_dispatch.h>
65
+ #include <ATen/ops/_foreach_clamp_max_cpu_dispatch.h>
66
+ #include <ATen/ops/_foreach_clamp_min_cpu_dispatch.h>
67
+ #include <ATen/ops/_foreach_copy_cpu_dispatch.h>
68
+ #include <ATen/ops/_foreach_cos_cpu_dispatch.h>
69
+ #include <ATen/ops/_foreach_cosh_cpu_dispatch.h>
70
+ #include <ATen/ops/_foreach_div_cpu_dispatch.h>
71
+ #include <ATen/ops/_foreach_erf_cpu_dispatch.h>
72
+ #include <ATen/ops/_foreach_erfc_cpu_dispatch.h>
73
+ #include <ATen/ops/_foreach_exp_cpu_dispatch.h>
74
+ #include <ATen/ops/_foreach_expm1_cpu_dispatch.h>
75
+ #include <ATen/ops/_foreach_floor_cpu_dispatch.h>
76
+ #include <ATen/ops/_foreach_frac_cpu_dispatch.h>
77
+ #include <ATen/ops/_foreach_lerp_cpu_dispatch.h>
78
+ #include <ATen/ops/_foreach_lgamma_cpu_dispatch.h>
79
+ #include <ATen/ops/_foreach_log_cpu_dispatch.h>
80
+ #include <ATen/ops/_foreach_log10_cpu_dispatch.h>
81
+ #include <ATen/ops/_foreach_log1p_cpu_dispatch.h>
82
+ #include <ATen/ops/_foreach_log2_cpu_dispatch.h>
83
+ #include <ATen/ops/_foreach_maximum_cpu_dispatch.h>
84
+ #include <ATen/ops/_foreach_minimum_cpu_dispatch.h>
85
+ #include <ATen/ops/_foreach_mul_cpu_dispatch.h>
86
+ #include <ATen/ops/_foreach_neg_cpu_dispatch.h>
87
+ #include <ATen/ops/_foreach_norm_cpu_dispatch.h>
88
+ #include <ATen/ops/_foreach_pow_cpu_dispatch.h>
89
+ #include <ATen/ops/_foreach_reciprocal_cpu_dispatch.h>
90
+ #include <ATen/ops/_foreach_round_cpu_dispatch.h>
91
+ #include <ATen/ops/_foreach_sigmoid_cpu_dispatch.h>
92
+ #include <ATen/ops/_foreach_sign_cpu_dispatch.h>
93
+ #include <ATen/ops/_foreach_sin_cpu_dispatch.h>
94
+ #include <ATen/ops/_foreach_sinh_cpu_dispatch.h>
95
+ #include <ATen/ops/_foreach_sqrt_cpu_dispatch.h>
96
+ #include <ATen/ops/_foreach_sub_cpu_dispatch.h>
97
+ #include <ATen/ops/_foreach_tan_cpu_dispatch.h>
98
+ #include <ATen/ops/_foreach_tanh_cpu_dispatch.h>
99
+ #include <ATen/ops/_foreach_trunc_cpu_dispatch.h>
100
+ #include <ATen/ops/_foreach_zero_cpu_dispatch.h>
101
+ #include <ATen/ops/_functional_assert_async_cpu_dispatch.h>
102
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper_cpu_dispatch.h>
103
+ #include <ATen/ops/_fused_sdp_choice_cpu_dispatch.h>
104
+ #include <ATen/ops/_histogramdd_bin_edges_cpu_dispatch.h>
105
+ #include <ATen/ops/_histogramdd_from_bin_cts_cpu_dispatch.h>
106
+ #include <ATen/ops/_histogramdd_from_bin_tensors_cpu_dispatch.h>
107
+ #include <ATen/ops/_index_put_impl_cpu_dispatch.h>
108
+ #include <ATen/ops/_linalg_det_cpu_dispatch.h>
109
+ #include <ATen/ops/_linalg_eigh_cpu_dispatch.h>
110
+ #include <ATen/ops/_linalg_eigvals_cpu_dispatch.h>
111
+ #include <ATen/ops/_linalg_slogdet_cpu_dispatch.h>
112
+ #include <ATen/ops/_linalg_solve_ex_cpu_dispatch.h>
113
+ #include <ATen/ops/_linalg_svd_cpu_dispatch.h>
114
+ #include <ATen/ops/_local_scalar_dense_cpu_dispatch.h>
115
+ #include <ATen/ops/_log_softmax_cpu_dispatch.h>
116
+ #include <ATen/ops/_log_softmax_backward_data_cpu_dispatch.h>
117
+ #include <ATen/ops/_logcumsumexp_cpu_dispatch.h>
118
+ #include <ATen/ops/_make_dep_token_cpu_dispatch.h>
119
+ #include <ATen/ops/_make_per_channel_quantized_tensor_cpu_dispatch.h>
120
+ #include <ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h>
121
+ #include <ATen/ops/_masked_softmax_cpu_dispatch.h>
122
+ #include <ATen/ops/_masked_softmax_backward_cpu_dispatch.h>
123
+ #include <ATen/ops/_native_batch_norm_legit_cpu_dispatch.h>
124
+ #include <ATen/ops/_native_multi_head_attention_cpu_dispatch.h>
125
+ #include <ATen/ops/_nested_from_padded_cpu_dispatch.h>
126
+ #include <ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h>
127
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned_cpu_dispatch.h>
128
+ #include <ATen/ops/_nested_view_from_buffer_cpu_dispatch.h>
129
+ #include <ATen/ops/_pdist_backward_cpu_dispatch.h>
130
+ #include <ATen/ops/_pdist_forward_cpu_dispatch.h>
131
+ #include <ATen/ops/_prelu_kernel_cpu_dispatch.h>
132
+ #include <ATen/ops/_prelu_kernel_backward_cpu_dispatch.h>
133
+ #include <ATen/ops/_reshape_alias_cpu_dispatch.h>
134
+ #include <ATen/ops/_sample_dirichlet_cpu_dispatch.h>
135
+ #include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_cpu_dispatch.h>
136
+ #include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_cpu_dispatch.h>
137
+ #include <ATen/ops/_segment_reduce_backward_cpu_dispatch.h>
138
+ #include <ATen/ops/_slow_conv2d_backward_cpu_dispatch.h>
139
+ #include <ATen/ops/_slow_conv2d_forward_cpu_dispatch.h>
140
+ #include <ATen/ops/_softmax_cpu_dispatch.h>
141
+ #include <ATen/ops/_softmax_backward_data_cpu_dispatch.h>
142
+ #include <ATen/ops/_spdiags_cpu_dispatch.h>
143
+ #include <ATen/ops/_stack_cpu_dispatch.h>
144
+ #include <ATen/ops/_standard_gamma_cpu_dispatch.h>
145
+ #include <ATen/ops/_standard_gamma_grad_cpu_dispatch.h>
146
+ #include <ATen/ops/_test_functorch_fallback_cpu_dispatch.h>
147
+ #include <ATen/ops/_test_optional_filled_intlist_cpu_dispatch.h>
148
+ #include <ATen/ops/_test_optional_floatlist_cpu_dispatch.h>
149
+ #include <ATen/ops/_test_optional_intlist_cpu_dispatch.h>
150
+ #include <ATen/ops/_to_sparse_cpu_dispatch.h>
151
+ #include <ATen/ops/_to_sparse_bsc_cpu_dispatch.h>
152
+ #include <ATen/ops/_to_sparse_bsr_cpu_dispatch.h>
153
+ #include <ATen/ops/_to_sparse_csc_cpu_dispatch.h>
154
+ #include <ATen/ops/_to_sparse_csr_cpu_dispatch.h>
155
+ #include <ATen/ops/_transform_bias_rescale_qkv_cpu_dispatch.h>
156
+ #include <ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h>
157
+ #include <ATen/ops/_unique_cpu_dispatch.h>
158
+ #include <ATen/ops/_unique2_cpu_dispatch.h>
159
+ #include <ATen/ops/_upsample_bicubic2d_aa_cpu_dispatch.h>
160
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_cpu_dispatch.h>
161
+ #include <ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h>
162
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_cpu_dispatch.h>
163
+ #include <ATen/ops/_upsample_nearest_exact1d_cpu_dispatch.h>
164
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h>
165
+ #include <ATen/ops/_upsample_nearest_exact2d_cpu_dispatch.h>
166
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_cpu_dispatch.h>
167
+ #include <ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h>
168
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h>
169
+ #include <ATen/ops/_validate_compressed_sparse_indices_cpu_dispatch.h>
170
+ #include <ATen/ops/_weight_int4pack_mm_cpu_dispatch.h>
171
+ #include <ATen/ops/_weight_int8pack_mm_cpu_dispatch.h>
172
+ #include <ATen/ops/_weight_norm_interface_cpu_dispatch.h>
173
+ #include <ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h>
174
+ #include <ATen/ops/abs_cpu_dispatch.h>
175
+ #include <ATen/ops/acos_cpu_dispatch.h>
176
+ #include <ATen/ops/acosh_cpu_dispatch.h>
177
+ #include <ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h>
178
+ #include <ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h>
179
+ #include <ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h>
180
+ #include <ATen/ops/adaptive_max_pool2d_cpu_dispatch.h>
181
+ #include <ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h>
182
+ #include <ATen/ops/adaptive_max_pool3d_cpu_dispatch.h>
183
+ #include <ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h>
184
+ #include <ATen/ops/add_cpu_dispatch.h>
185
+ #include <ATen/ops/addbmm_cpu_dispatch.h>
186
+ #include <ATen/ops/addcdiv_cpu_dispatch.h>
187
+ #include <ATen/ops/addcmul_cpu_dispatch.h>
188
+ #include <ATen/ops/addmm_cpu_dispatch.h>
189
+ #include <ATen/ops/addmv_cpu_dispatch.h>
190
+ #include <ATen/ops/addr_cpu_dispatch.h>
191
+ #include <ATen/ops/all_cpu_dispatch.h>
192
+ #include <ATen/ops/amax_cpu_dispatch.h>
193
+ #include <ATen/ops/amin_cpu_dispatch.h>
194
+ #include <ATen/ops/aminmax_cpu_dispatch.h>
195
+ #include <ATen/ops/angle_cpu_dispatch.h>
196
+ #include <ATen/ops/any_cpu_dispatch.h>
197
+ #include <ATen/ops/arange_cpu_dispatch.h>
198
+ #include <ATen/ops/argmax_cpu_dispatch.h>
199
+ #include <ATen/ops/argmin_cpu_dispatch.h>
200
+ #include <ATen/ops/argsort_cpu_dispatch.h>
201
+ #include <ATen/ops/as_strided_cpu_dispatch.h>
202
+ #include <ATen/ops/asin_cpu_dispatch.h>
203
+ #include <ATen/ops/asinh_cpu_dispatch.h>
204
+ #include <ATen/ops/atan_cpu_dispatch.h>
205
+ #include <ATen/ops/atan2_cpu_dispatch.h>
206
+ #include <ATen/ops/atanh_cpu_dispatch.h>
207
+ #include <ATen/ops/avg_pool2d_cpu_dispatch.h>
208
+ #include <ATen/ops/avg_pool2d_backward_cpu_dispatch.h>
209
+ #include <ATen/ops/avg_pool3d_cpu_dispatch.h>
210
+ #include <ATen/ops/avg_pool3d_backward_cpu_dispatch.h>
211
+ #include <ATen/ops/baddbmm_cpu_dispatch.h>
212
+ #include <ATen/ops/batch_norm_update_stats_cpu_dispatch.h>
213
+ #include <ATen/ops/bernoulli_cpu_dispatch.h>
214
+ #include <ATen/ops/binary_cross_entropy_cpu_dispatch.h>
215
+ #include <ATen/ops/binary_cross_entropy_backward_cpu_dispatch.h>
216
+ #include <ATen/ops/bincount_cpu_dispatch.h>
217
+ #include <ATen/ops/binomial_cpu_dispatch.h>
218
+ #include <ATen/ops/bitwise_and_cpu_dispatch.h>
219
+ #include <ATen/ops/bitwise_left_shift_cpu_dispatch.h>
220
+ #include <ATen/ops/bitwise_not_cpu_dispatch.h>
221
+ #include <ATen/ops/bitwise_or_cpu_dispatch.h>
222
+ #include <ATen/ops/bitwise_right_shift_cpu_dispatch.h>
223
+ #include <ATen/ops/bitwise_xor_cpu_dispatch.h>
224
+ #include <ATen/ops/bmm_cpu_dispatch.h>
225
+ #include <ATen/ops/bucketize_cpu_dispatch.h>
226
+ #include <ATen/ops/cat_cpu_dispatch.h>
227
+ #include <ATen/ops/cauchy_cpu_dispatch.h>
228
+ #include <ATen/ops/ceil_cpu_dispatch.h>
229
+ #include <ATen/ops/channel_shuffle_cpu_dispatch.h>
230
+ #include <ATen/ops/cholesky_cpu_dispatch.h>
231
+ #include <ATen/ops/cholesky_inverse_cpu_dispatch.h>
232
+ #include <ATen/ops/clamp_cpu_dispatch.h>
233
+ #include <ATen/ops/clamp_max_cpu_dispatch.h>
234
+ #include <ATen/ops/clamp_min_cpu_dispatch.h>
235
+ #include <ATen/ops/col2im_cpu_dispatch.h>
236
+ #include <ATen/ops/complex_cpu_dispatch.h>
237
+ #include <ATen/ops/conj_physical_cpu_dispatch.h>
238
+ #include <ATen/ops/copysign_cpu_dispatch.h>
239
+ #include <ATen/ops/cos_cpu_dispatch.h>
240
+ #include <ATen/ops/cosh_cpu_dispatch.h>
241
+ #include <ATen/ops/count_nonzero_cpu_dispatch.h>
242
+ #include <ATen/ops/cumprod_cpu_dispatch.h>
243
+ #include <ATen/ops/cumsum_cpu_dispatch.h>
244
+ #include <ATen/ops/dense_dim_cpu_dispatch.h>
245
+ #include <ATen/ops/dequantize_cpu_dispatch.h>
246
+ #include <ATen/ops/digamma_cpu_dispatch.h>
247
+ #include <ATen/ops/div_cpu_dispatch.h>
248
+ #include <ATen/ops/dot_cpu_dispatch.h>
249
+ #include <ATen/ops/elu_cpu_dispatch.h>
250
+ #include <ATen/ops/elu_backward_cpu_dispatch.h>
251
+ #include <ATen/ops/embedding_dense_backward_cpu_dispatch.h>
252
+ #include <ATen/ops/embedding_renorm_cpu_dispatch.h>
253
+ #include <ATen/ops/empty_cpu_dispatch.h>
254
+ #include <ATen/ops/empty_strided_cpu_dispatch.h>
255
+ #include <ATen/ops/eq_cpu_dispatch.h>
256
+ #include <ATen/ops/equal_cpu_dispatch.h>
257
+ #include <ATen/ops/erf_cpu_dispatch.h>
258
+ #include <ATen/ops/erfc_cpu_dispatch.h>
259
+ #include <ATen/ops/erfinv_cpu_dispatch.h>
260
+ #include <ATen/ops/exp_cpu_dispatch.h>
261
+ #include <ATen/ops/exp2_cpu_dispatch.h>
262
+ #include <ATen/ops/expm1_cpu_dispatch.h>
263
+ #include <ATen/ops/exponential_cpu_dispatch.h>
264
+ #include <ATen/ops/eye_cpu_dispatch.h>
265
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_cpu_dispatch.h>
266
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h>
267
+ #include <ATen/ops/fill_cpu_dispatch.h>
268
+ #include <ATen/ops/flip_cpu_dispatch.h>
269
+ #include <ATen/ops/floor_cpu_dispatch.h>
270
+ #include <ATen/ops/floor_divide_cpu_dispatch.h>
271
+ #include <ATen/ops/fmax_cpu_dispatch.h>
272
+ #include <ATen/ops/fmin_cpu_dispatch.h>
273
+ #include <ATen/ops/fmod_cpu_dispatch.h>
274
+ #include <ATen/ops/frac_cpu_dispatch.h>
275
+ #include <ATen/ops/fractional_max_pool2d_cpu_dispatch.h>
276
+ #include <ATen/ops/fractional_max_pool2d_backward_cpu_dispatch.h>
277
+ #include <ATen/ops/fractional_max_pool3d_cpu_dispatch.h>
278
+ #include <ATen/ops/fractional_max_pool3d_backward_cpu_dispatch.h>
279
+ #include <ATen/ops/frexp_cpu_dispatch.h>
280
+ #include <ATen/ops/from_file_cpu_dispatch.h>
281
+ #include <ATen/ops/gather_cpu_dispatch.h>
282
+ #include <ATen/ops/gcd_cpu_dispatch.h>
283
+ #include <ATen/ops/ge_cpu_dispatch.h>
284
+ #include <ATen/ops/gelu_cpu_dispatch.h>
285
+ #include <ATen/ops/gelu_backward_cpu_dispatch.h>
286
+ #include <ATen/ops/geometric_cpu_dispatch.h>
287
+ #include <ATen/ops/geqrf_cpu_dispatch.h>
288
+ #include <ATen/ops/glu_cpu_dispatch.h>
289
+ #include <ATen/ops/glu_backward_cpu_dispatch.h>
290
+ #include <ATen/ops/glu_backward_jvp_cpu_dispatch.h>
291
+ #include <ATen/ops/glu_jvp_cpu_dispatch.h>
292
+ #include <ATen/ops/grid_sampler_2d_cpu_dispatch.h>
293
+ #include <ATen/ops/grid_sampler_2d_backward_cpu_dispatch.h>
294
+ #include <ATen/ops/grid_sampler_3d_cpu_dispatch.h>
295
+ #include <ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h>
296
+ #include <ATen/ops/gt_cpu_dispatch.h>
297
+ #include <ATen/ops/hardshrink_cpu_dispatch.h>
298
+ #include <ATen/ops/hardshrink_backward_cpu_dispatch.h>
299
+ #include <ATen/ops/hardsigmoid_cpu_dispatch.h>
300
+ #include <ATen/ops/hardsigmoid_backward_cpu_dispatch.h>
301
+ #include <ATen/ops/hardswish_cpu_dispatch.h>
302
+ #include <ATen/ops/hardswish_backward_cpu_dispatch.h>
303
+ #include <ATen/ops/hardtanh_cpu_dispatch.h>
304
+ #include <ATen/ops/hardtanh_backward_cpu_dispatch.h>
305
+ #include <ATen/ops/heaviside_cpu_dispatch.h>
306
+ #include <ATen/ops/histc_cpu_dispatch.h>
307
+ #include <ATen/ops/histogram_cpu_dispatch.h>
308
+ #include <ATen/ops/huber_loss_cpu_dispatch.h>
309
+ #include <ATen/ops/huber_loss_backward_cpu_dispatch.h>
310
+ #include <ATen/ops/hypot_cpu_dispatch.h>
311
+ #include <ATen/ops/i0_cpu_dispatch.h>
312
+ #include <ATen/ops/igamma_cpu_dispatch.h>
313
+ #include <ATen/ops/igammac_cpu_dispatch.h>
314
+ #include <ATen/ops/im2col_cpu_dispatch.h>
315
+ #include <ATen/ops/index_cpu_dispatch.h>
316
+ #include <ATen/ops/index_add_cpu_dispatch.h>
317
+ #include <ATen/ops/index_copy_cpu_dispatch.h>
318
+ #include <ATen/ops/index_fill_cpu_dispatch.h>
319
+ #include <ATen/ops/index_reduce_cpu_dispatch.h>
320
+ #include <ATen/ops/index_select_cpu_dispatch.h>
321
+ #include <ATen/ops/is_set_to_cpu_dispatch.h>
322
+ #include <ATen/ops/isin_cpu_dispatch.h>
323
+ #include <ATen/ops/isnan_cpu_dispatch.h>
324
+ #include <ATen/ops/isneginf_cpu_dispatch.h>
325
+ #include <ATen/ops/isposinf_cpu_dispatch.h>
326
+ #include <ATen/ops/kthvalue_cpu_dispatch.h>
327
+ #include <ATen/ops/lcm_cpu_dispatch.h>
328
+ #include <ATen/ops/le_cpu_dispatch.h>
329
+ #include <ATen/ops/leaky_relu_cpu_dispatch.h>
330
+ #include <ATen/ops/leaky_relu_backward_cpu_dispatch.h>
331
+ #include <ATen/ops/lerp_cpu_dispatch.h>
332
+ #include <ATen/ops/lgamma_cpu_dispatch.h>
333
+ #include <ATen/ops/linalg_cholesky_ex_cpu_dispatch.h>
334
+ #include <ATen/ops/linalg_cross_cpu_dispatch.h>
335
+ #include <ATen/ops/linalg_eig_cpu_dispatch.h>
336
+ #include <ATen/ops/linalg_eigvals_cpu_dispatch.h>
337
+ #include <ATen/ops/linalg_householder_product_cpu_dispatch.h>
338
+ #include <ATen/ops/linalg_inv_ex_cpu_dispatch.h>
339
+ #include <ATen/ops/linalg_ldl_factor_ex_cpu_dispatch.h>
340
+ #include <ATen/ops/linalg_ldl_solve_cpu_dispatch.h>
341
+ #include <ATen/ops/linalg_lstsq_cpu_dispatch.h>
342
+ #include <ATen/ops/linalg_lu_cpu_dispatch.h>
343
+ #include <ATen/ops/linalg_lu_factor_ex_cpu_dispatch.h>
344
+ #include <ATen/ops/linalg_lu_solve_cpu_dispatch.h>
345
+ #include <ATen/ops/linalg_matrix_exp_cpu_dispatch.h>
346
+ #include <ATen/ops/linalg_qr_cpu_dispatch.h>
347
+ #include <ATen/ops/linalg_solve_triangular_cpu_dispatch.h>
348
+ #include <ATen/ops/linalg_vector_norm_cpu_dispatch.h>
349
+ #include <ATen/ops/linspace_cpu_dispatch.h>
350
+ #include <ATen/ops/log_cpu_dispatch.h>
351
+ #include <ATen/ops/log10_cpu_dispatch.h>
352
+ #include <ATen/ops/log1p_cpu_dispatch.h>
353
+ #include <ATen/ops/log2_cpu_dispatch.h>
354
+ #include <ATen/ops/log_normal_cpu_dispatch.h>
355
+ #include <ATen/ops/log_sigmoid_backward_cpu_dispatch.h>
356
+ #include <ATen/ops/log_sigmoid_forward_cpu_dispatch.h>
357
+ #include <ATen/ops/logaddexp_cpu_dispatch.h>
358
+ #include <ATen/ops/logaddexp2_cpu_dispatch.h>
359
+ #include <ATen/ops/logical_and_cpu_dispatch.h>
360
+ #include <ATen/ops/logical_not_cpu_dispatch.h>
361
+ #include <ATen/ops/logical_or_cpu_dispatch.h>
362
+ #include <ATen/ops/logical_xor_cpu_dispatch.h>
363
+ #include <ATen/ops/logit_cpu_dispatch.h>
364
+ #include <ATen/ops/logit_backward_cpu_dispatch.h>
365
+ #include <ATen/ops/logspace_cpu_dispatch.h>
366
+ #include <ATen/ops/lshift_cpu_dispatch.h>
367
+ #include <ATen/ops/lt_cpu_dispatch.h>
368
+ #include <ATen/ops/lu_unpack_cpu_dispatch.h>
369
+ #include <ATen/ops/masked_fill_cpu_dispatch.h>
370
+ #include <ATen/ops/masked_scatter_cpu_dispatch.h>
371
+ #include <ATen/ops/masked_select_cpu_dispatch.h>
372
+ #include <ATen/ops/max_cpu_dispatch.h>
373
+ #include <ATen/ops/max_pool2d_with_indices_cpu_dispatch.h>
374
+ #include <ATen/ops/max_pool2d_with_indices_backward_cpu_dispatch.h>
375
+ #include <ATen/ops/max_pool3d_with_indices_cpu_dispatch.h>
376
+ #include <ATen/ops/max_pool3d_with_indices_backward_cpu_dispatch.h>
377
+ #include <ATen/ops/max_unpool2d_cpu_dispatch.h>
378
+ #include <ATen/ops/max_unpool3d_cpu_dispatch.h>
379
+ #include <ATen/ops/maximum_cpu_dispatch.h>
380
+ #include <ATen/ops/mean_cpu_dispatch.h>
381
+ #include <ATen/ops/median_cpu_dispatch.h>
382
+ #include <ATen/ops/min_cpu_dispatch.h>
383
+ #include <ATen/ops/minimum_cpu_dispatch.h>
384
+ #include <ATen/ops/mish_cpu_dispatch.h>
385
+ #include <ATen/ops/mish_backward_cpu_dispatch.h>
386
+ #include <ATen/ops/mkldnn_rnn_layer_cpu_dispatch.h>
387
+ #include <ATen/ops/mkldnn_rnn_layer_backward_cpu_dispatch.h>
388
+ #include <ATen/ops/mm_cpu_dispatch.h>
389
+ #include <ATen/ops/mode_cpu_dispatch.h>
390
+ #include <ATen/ops/mse_loss_cpu_dispatch.h>
391
+ #include <ATen/ops/mse_loss_backward_cpu_dispatch.h>
392
+ #include <ATen/ops/mul_cpu_dispatch.h>
393
+ #include <ATen/ops/multi_margin_loss_cpu_dispatch.h>
394
+ #include <ATen/ops/multi_margin_loss_backward_cpu_dispatch.h>
395
+ #include <ATen/ops/multilabel_margin_loss_backward_cpu_dispatch.h>
396
+ #include <ATen/ops/multilabel_margin_loss_forward_cpu_dispatch.h>
397
+ #include <ATen/ops/multinomial_cpu_dispatch.h>
398
+ #include <ATen/ops/mvlgamma_cpu_dispatch.h>
399
+ #include <ATen/ops/nan_to_num_cpu_dispatch.h>
400
+ #include <ATen/ops/nanmedian_cpu_dispatch.h>
401
+ #include <ATen/ops/nansum_cpu_dispatch.h>
402
+ #include <ATen/ops/narrow_copy_cpu_dispatch.h>
403
+ #include <ATen/ops/native_batch_norm_cpu_dispatch.h>
404
+ #include <ATen/ops/native_batch_norm_backward_cpu_dispatch.h>
405
+ #include <ATen/ops/native_channel_shuffle_cpu_dispatch.h>
406
+ #include <ATen/ops/native_dropout_cpu_dispatch.h>
407
+ #include <ATen/ops/native_dropout_backward_cpu_dispatch.h>
408
+ #include <ATen/ops/native_group_norm_cpu_dispatch.h>
409
+ #include <ATen/ops/native_group_norm_backward_cpu_dispatch.h>
410
+ #include <ATen/ops/native_layer_norm_cpu_dispatch.h>
411
+ #include <ATen/ops/native_layer_norm_backward_cpu_dispatch.h>
412
+ #include <ATen/ops/ne_cpu_dispatch.h>
413
+ #include <ATen/ops/neg_cpu_dispatch.h>
414
+ #include <ATen/ops/nextafter_cpu_dispatch.h>
415
+ #include <ATen/ops/nll_loss2d_backward_cpu_dispatch.h>
416
+ #include <ATen/ops/nll_loss2d_forward_cpu_dispatch.h>
417
+ #include <ATen/ops/nll_loss_backward_cpu_dispatch.h>
418
+ #include <ATen/ops/nll_loss_forward_cpu_dispatch.h>
419
+ #include <ATen/ops/nonzero_cpu_dispatch.h>
420
+ #include <ATen/ops/nonzero_static_cpu_dispatch.h>
421
+ #include <ATen/ops/norm_cpu_dispatch.h>
422
+ #include <ATen/ops/normal_cpu_dispatch.h>
423
+ #include <ATen/ops/ormqr_cpu_dispatch.h>
424
+ #include <ATen/ops/pixel_shuffle_cpu_dispatch.h>
425
+ #include <ATen/ops/pixel_unshuffle_cpu_dispatch.h>
426
+ #include <ATen/ops/poisson_cpu_dispatch.h>
427
+ #include <ATen/ops/polar_cpu_dispatch.h>
428
+ #include <ATen/ops/polygamma_cpu_dispatch.h>
429
+ #include <ATen/ops/pow_cpu_dispatch.h>
430
+ #include <ATen/ops/prod_cpu_dispatch.h>
431
+ #include <ATen/ops/put_cpu_dispatch.h>
432
+ #include <ATen/ops/quantize_per_channel_cpu_dispatch.h>
433
+ #include <ATen/ops/quantize_per_tensor_cpu_dispatch.h>
434
+ #include <ATen/ops/quantize_per_tensor_dynamic_cpu_dispatch.h>
435
+ #include <ATen/ops/random_cpu_dispatch.h>
436
+ #include <ATen/ops/randperm_cpu_dispatch.h>
437
+ #include <ATen/ops/range_cpu_dispatch.h>
438
+ #include <ATen/ops/reciprocal_cpu_dispatch.h>
439
+ #include <ATen/ops/reflection_pad1d_cpu_dispatch.h>
440
+ #include <ATen/ops/reflection_pad1d_backward_cpu_dispatch.h>
441
+ #include <ATen/ops/reflection_pad2d_cpu_dispatch.h>
442
+ #include <ATen/ops/reflection_pad2d_backward_cpu_dispatch.h>
443
+ #include <ATen/ops/reflection_pad3d_cpu_dispatch.h>
444
+ #include <ATen/ops/reflection_pad3d_backward_cpu_dispatch.h>
445
+ #include <ATen/ops/relu_cpu_dispatch.h>
446
+ #include <ATen/ops/remainder_cpu_dispatch.h>
447
+ #include <ATen/ops/renorm_cpu_dispatch.h>
448
+ #include <ATen/ops/repeat_interleave_cpu_dispatch.h>
449
+ #include <ATen/ops/replication_pad1d_cpu_dispatch.h>
450
+ #include <ATen/ops/replication_pad1d_backward_cpu_dispatch.h>
451
+ #include <ATen/ops/replication_pad2d_cpu_dispatch.h>
452
+ #include <ATen/ops/replication_pad2d_backward_cpu_dispatch.h>
453
+ #include <ATen/ops/replication_pad3d_cpu_dispatch.h>
454
+ #include <ATen/ops/replication_pad3d_backward_cpu_dispatch.h>
455
+ #include <ATen/ops/resize_cpu_dispatch.h>
456
+ #include <ATen/ops/roll_cpu_dispatch.h>
457
+ #include <ATen/ops/round_cpu_dispatch.h>
458
+ #include <ATen/ops/rrelu_with_noise_cpu_dispatch.h>
459
+ #include <ATen/ops/rshift_cpu_dispatch.h>
460
+ #include <ATen/ops/rsqrt_cpu_dispatch.h>
461
+ #include <ATen/ops/rsub_cpu_dispatch.h>
462
+ #include <ATen/ops/scatter_cpu_dispatch.h>
463
+ #include <ATen/ops/scatter_add_cpu_dispatch.h>
464
+ #include <ATen/ops/scatter_reduce_cpu_dispatch.h>
465
+ #include <ATen/ops/searchsorted_cpu_dispatch.h>
466
+ #include <ATen/ops/segment_reduce_cpu_dispatch.h>
467
+ #include <ATen/ops/set_cpu_dispatch.h>
468
+ #include <ATen/ops/sgn_cpu_dispatch.h>
469
+ #include <ATen/ops/sigmoid_cpu_dispatch.h>
470
+ #include <ATen/ops/sigmoid_backward_cpu_dispatch.h>
471
+ #include <ATen/ops/sign_cpu_dispatch.h>
472
+ #include <ATen/ops/signbit_cpu_dispatch.h>
473
+ #include <ATen/ops/silu_cpu_dispatch.h>
474
+ #include <ATen/ops/silu_backward_cpu_dispatch.h>
475
+ #include <ATen/ops/sin_cpu_dispatch.h>
476
+ #include <ATen/ops/sinc_cpu_dispatch.h>
477
+ #include <ATen/ops/sinh_cpu_dispatch.h>
478
+ #include <ATen/ops/slow_conv3d_forward_cpu_dispatch.h>
479
+ #include <ATen/ops/slow_conv_dilated2d_cpu_dispatch.h>
480
+ #include <ATen/ops/slow_conv_dilated3d_cpu_dispatch.h>
481
+ #include <ATen/ops/slow_conv_transpose2d_cpu_dispatch.h>
482
+ #include <ATen/ops/slow_conv_transpose3d_cpu_dispatch.h>
483
+ #include <ATen/ops/smooth_l1_loss_cpu_dispatch.h>
484
+ #include <ATen/ops/smooth_l1_loss_backward_cpu_dispatch.h>
485
+ #include <ATen/ops/softplus_cpu_dispatch.h>
486
+ #include <ATen/ops/softplus_backward_cpu_dispatch.h>
487
+ #include <ATen/ops/softshrink_cpu_dispatch.h>
488
+ #include <ATen/ops/softshrink_backward_cpu_dispatch.h>
489
+ #include <ATen/ops/sort_cpu_dispatch.h>
490
+ #include <ATen/ops/sparse_dim_cpu_dispatch.h>
491
+ #include <ATen/ops/special_airy_ai_cpu_dispatch.h>
492
+ #include <ATen/ops/special_bessel_j0_cpu_dispatch.h>
493
+ #include <ATen/ops/special_bessel_j1_cpu_dispatch.h>
494
+ #include <ATen/ops/special_bessel_y0_cpu_dispatch.h>
495
+ #include <ATen/ops/special_bessel_y1_cpu_dispatch.h>
496
+ #include <ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h>
497
+ #include <ATen/ops/special_chebyshev_polynomial_u_cpu_dispatch.h>
498
+ #include <ATen/ops/special_chebyshev_polynomial_v_cpu_dispatch.h>
499
+ #include <ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h>
500
+ #include <ATen/ops/special_entr_cpu_dispatch.h>
501
+ #include <ATen/ops/special_erfcx_cpu_dispatch.h>
502
+ #include <ATen/ops/special_hermite_polynomial_h_cpu_dispatch.h>
503
+ #include <ATen/ops/special_hermite_polynomial_he_cpu_dispatch.h>
504
+ #include <ATen/ops/special_i0e_cpu_dispatch.h>
505
+ #include <ATen/ops/special_i1_cpu_dispatch.h>
506
+ #include <ATen/ops/special_i1e_cpu_dispatch.h>
507
+ #include <ATen/ops/special_laguerre_polynomial_l_cpu_dispatch.h>
508
+ #include <ATen/ops/special_legendre_polynomial_p_cpu_dispatch.h>
509
+ #include <ATen/ops/special_log_ndtr_cpu_dispatch.h>
510
+ #include <ATen/ops/special_modified_bessel_i0_cpu_dispatch.h>
511
+ #include <ATen/ops/special_modified_bessel_i1_cpu_dispatch.h>
512
+ #include <ATen/ops/special_modified_bessel_k0_cpu_dispatch.h>
513
+ #include <ATen/ops/special_modified_bessel_k1_cpu_dispatch.h>
514
+ #include <ATen/ops/special_ndtri_cpu_dispatch.h>
515
+ #include <ATen/ops/special_scaled_modified_bessel_k0_cpu_dispatch.h>
516
+ #include <ATen/ops/special_scaled_modified_bessel_k1_cpu_dispatch.h>
517
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_cpu_dispatch.h>
518
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_cpu_dispatch.h>
519
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_cpu_dispatch.h>
520
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_cpu_dispatch.h>
521
+ #include <ATen/ops/special_spherical_bessel_j0_cpu_dispatch.h>
522
+ #include <ATen/ops/special_xlog1py_cpu_dispatch.h>
523
+ #include <ATen/ops/special_zeta_cpu_dispatch.h>
524
+ #include <ATen/ops/sqrt_cpu_dispatch.h>
525
+ #include <ATen/ops/sspaddmm_cpu_dispatch.h>
526
+ #include <ATen/ops/std_cpu_dispatch.h>
527
+ #include <ATen/ops/std_mean_cpu_dispatch.h>
528
+ #include <ATen/ops/sub_cpu_dispatch.h>
529
+ #include <ATen/ops/sum_cpu_dispatch.h>
530
+ #include <ATen/ops/take_cpu_dispatch.h>
531
+ #include <ATen/ops/tan_cpu_dispatch.h>
532
+ #include <ATen/ops/tanh_cpu_dispatch.h>
533
+ #include <ATen/ops/tanh_backward_cpu_dispatch.h>
534
+ #include <ATen/ops/threshold_cpu_dispatch.h>
535
+ #include <ATen/ops/threshold_backward_cpu_dispatch.h>
536
+ #include <ATen/ops/to_mkldnn_cpu_dispatch.h>
537
+ #include <ATen/ops/topk_cpu_dispatch.h>
538
+ #include <ATen/ops/trace_cpu_dispatch.h>
539
+ #include <ATen/ops/triangular_solve_cpu_dispatch.h>
540
+ #include <ATen/ops/tril_cpu_dispatch.h>
541
+ #include <ATen/ops/tril_indices_cpu_dispatch.h>
542
+ #include <ATen/ops/triu_cpu_dispatch.h>
543
+ #include <ATen/ops/triu_indices_cpu_dispatch.h>
544
+ #include <ATen/ops/trunc_cpu_dispatch.h>
545
+ #include <ATen/ops/unfold_cpu_dispatch.h>
546
+ #include <ATen/ops/unfold_backward_cpu_dispatch.h>
547
+ #include <ATen/ops/uniform_cpu_dispatch.h>
548
+ #include <ATen/ops/unique_consecutive_cpu_dispatch.h>
549
+ #include <ATen/ops/unique_dim_cpu_dispatch.h>
550
+ #include <ATen/ops/unique_dim_consecutive_cpu_dispatch.h>
551
+ #include <ATen/ops/upsample_bicubic2d_cpu_dispatch.h>
552
+ #include <ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h>
553
+ #include <ATen/ops/upsample_bilinear2d_cpu_dispatch.h>
554
+ #include <ATen/ops/upsample_bilinear2d_backward_cpu_dispatch.h>
555
+ #include <ATen/ops/upsample_linear1d_cpu_dispatch.h>
556
+ #include <ATen/ops/upsample_linear1d_backward_cpu_dispatch.h>
557
+ #include <ATen/ops/upsample_nearest1d_cpu_dispatch.h>
558
+ #include <ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h>
559
+ #include <ATen/ops/upsample_nearest2d_cpu_dispatch.h>
560
+ #include <ATen/ops/upsample_nearest2d_backward_cpu_dispatch.h>
561
+ #include <ATen/ops/upsample_nearest3d_cpu_dispatch.h>
562
+ #include <ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h>
563
+ #include <ATen/ops/upsample_trilinear3d_cpu_dispatch.h>
564
+ #include <ATen/ops/upsample_trilinear3d_backward_cpu_dispatch.h>
565
+ #include <ATen/ops/var_cpu_dispatch.h>
566
+ #include <ATen/ops/var_mean_cpu_dispatch.h>
567
+ #include <ATen/ops/vdot_cpu_dispatch.h>
568
+ #include <ATen/ops/view_cpu_dispatch.h>
569
+ #include <ATen/ops/view_as_complex_cpu_dispatch.h>
570
+ #include <ATen/ops/view_as_real_cpu_dispatch.h>
571
+ #include <ATen/ops/where_cpu_dispatch.h>
572
+ #include <ATen/ops/xlogy_cpu_dispatch.h>
573
+ #include <ATen/ops/zero_cpu_dispatch.h>
574
+
575
+
576
+
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeExplicitAutogradFunctions_inl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeexplicitautogradnonfunctional_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_addmm_activation_compositeexplicitautogradnonfunctional_dispatch.h>
20
+ #include <ATen/ops/_conj_copy_compositeexplicitautogradnonfunctional_dispatch.h>
21
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_compositeexplicitautogradnonfunctional_dispatch.h>
22
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_compositeexplicitautogradnonfunctional_dispatch.h>
23
+ #include <ATen/ops/_fw_primal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
24
+ #include <ATen/ops/_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
25
+ #include <ATen/ops/_linalg_det_compositeexplicitautogradnonfunctional_dispatch.h>
26
+ #include <ATen/ops/_linalg_eigh_compositeexplicitautogradnonfunctional_dispatch.h>
27
+ #include <ATen/ops/_linalg_slogdet_compositeexplicitautogradnonfunctional_dispatch.h>
28
+ #include <ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h>
29
+ #include <ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h>
30
+ #include <ATen/ops/_log_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
31
+ #include <ATen/ops/_log_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
32
+ #include <ATen/ops/_make_dual_copy_compositeexplicitautogradnonfunctional_dispatch.h>
33
+ #include <ATen/ops/_neg_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
34
+ #include <ATen/ops/_nested_get_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
35
+ #include <ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h>
36
+ #include <ATen/ops/_nested_view_from_jagged_copy_compositeexplicitautogradnonfunctional_dispatch.h>
37
+ #include <ATen/ops/_reshape_alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
38
+ #include <ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h>
39
+ #include <ATen/ops/_softmax_backward_data_compositeexplicitautogradnonfunctional_dispatch.h>
40
+ #include <ATen/ops/_sparse_broadcast_to_copy_compositeexplicitautogradnonfunctional_dispatch.h>
41
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
42
+ #include <ATen/ops/_trilinear_compositeexplicitautogradnonfunctional_dispatch.h>
43
+ #include <ATen/ops/_upsample_bicubic2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
44
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
45
+ #include <ATen/ops/_upsample_bilinear2d_aa_compositeexplicitautogradnonfunctional_dispatch.h>
46
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h>
47
+ #include <ATen/ops/_upsample_nearest_exact1d_compositeexplicitautogradnonfunctional_dispatch.h>
48
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
49
+ #include <ATen/ops/_upsample_nearest_exact2d_compositeexplicitautogradnonfunctional_dispatch.h>
50
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
51
+ #include <ATen/ops/_upsample_nearest_exact3d_compositeexplicitautogradnonfunctional_dispatch.h>
52
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
53
+ #include <ATen/ops/_values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
54
+ #include <ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h>
55
+ #include <ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h>
56
+ #include <ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
57
+ #include <ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
58
+ #include <ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
59
+ #include <ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
60
+ #include <ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h>
61
+ #include <ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h>
62
+ #include <ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h>
63
+ #include <ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h>
64
+ #include <ATen/ops/addmv_compositeexplicitautogradnonfunctional_dispatch.h>
65
+ #include <ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h>
66
+ #include <ATen/ops/all_compositeexplicitautogradnonfunctional_dispatch.h>
67
+ #include <ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h>
68
+ #include <ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h>
69
+ #include <ATen/ops/aminmax_compositeexplicitautogradnonfunctional_dispatch.h>
70
+ #include <ATen/ops/any_compositeexplicitautogradnonfunctional_dispatch.h>
71
+ #include <ATen/ops/argmax_compositeexplicitautogradnonfunctional_dispatch.h>
72
+ #include <ATen/ops/argmin_compositeexplicitautogradnonfunctional_dispatch.h>
73
+ #include <ATen/ops/as_strided_compositeexplicitautogradnonfunctional_dispatch.h>
74
+ #include <ATen/ops/as_strided_copy_compositeexplicitautogradnonfunctional_dispatch.h>
75
+ #include <ATen/ops/as_strided_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
76
+ #include <ATen/ops/asin_compositeexplicitautogradnonfunctional_dispatch.h>
77
+ #include <ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h>
78
+ #include <ATen/ops/atan_compositeexplicitautogradnonfunctional_dispatch.h>
79
+ #include <ATen/ops/atan2_compositeexplicitautogradnonfunctional_dispatch.h>
80
+ #include <ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h>
81
+ #include <ATen/ops/avg_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
82
+ #include <ATen/ops/avg_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
83
+ #include <ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
84
+ #include <ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
85
+ #include <ATen/ops/baddbmm_compositeexplicitautogradnonfunctional_dispatch.h>
86
+ #include <ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h>
87
+ #include <ATen/ops/bitwise_and_compositeexplicitautogradnonfunctional_dispatch.h>
88
+ #include <ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h>
89
+ #include <ATen/ops/bitwise_not_compositeexplicitautogradnonfunctional_dispatch.h>
90
+ #include <ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h>
91
+ #include <ATen/ops/bitwise_right_shift_compositeexplicitautogradnonfunctional_dispatch.h>
92
+ #include <ATen/ops/bitwise_xor_compositeexplicitautogradnonfunctional_dispatch.h>
93
+ #include <ATen/ops/bmm_compositeexplicitautogradnonfunctional_dispatch.h>
94
+ #include <ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h>
95
+ #include <ATen/ops/ccol_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
96
+ #include <ATen/ops/ceil_compositeexplicitautogradnonfunctional_dispatch.h>
97
+ #include <ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h>
98
+ #include <ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h>
99
+ #include <ATen/ops/clamp_min_compositeexplicitautogradnonfunctional_dispatch.h>
100
+ #include <ATen/ops/col_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
101
+ #include <ATen/ops/copy_compositeexplicitautogradnonfunctional_dispatch.h>
102
+ #include <ATen/ops/copysign_compositeexplicitautogradnonfunctional_dispatch.h>
103
+ #include <ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h>
104
+ #include <ATen/ops/cosh_compositeexplicitautogradnonfunctional_dispatch.h>
105
+ #include <ATen/ops/crow_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
106
+ #include <ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h>
107
+ #include <ATen/ops/cumsum_compositeexplicitautogradnonfunctional_dispatch.h>
108
+ #include <ATen/ops/detach_copy_compositeexplicitautogradnonfunctional_dispatch.h>
109
+ #include <ATen/ops/diag_embed_compositeexplicitautogradnonfunctional_dispatch.h>
110
+ #include <ATen/ops/diagonal_copy_compositeexplicitautogradnonfunctional_dispatch.h>
111
+ #include <ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
112
+ #include <ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h>
113
+ #include <ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h>
114
+ #include <ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h>
115
+ #include <ATen/ops/elu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
116
+ #include <ATen/ops/eq_compositeexplicitautogradnonfunctional_dispatch.h>
117
+ #include <ATen/ops/erf_compositeexplicitautogradnonfunctional_dispatch.h>
118
+ #include <ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h>
119
+ #include <ATen/ops/erfinv_compositeexplicitautogradnonfunctional_dispatch.h>
120
+ #include <ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h>
121
+ #include <ATen/ops/exp2_compositeexplicitautogradnonfunctional_dispatch.h>
122
+ #include <ATen/ops/expand_copy_compositeexplicitautogradnonfunctional_dispatch.h>
123
+ #include <ATen/ops/expm1_compositeexplicitautogradnonfunctional_dispatch.h>
124
+ #include <ATen/ops/floor_compositeexplicitautogradnonfunctional_dispatch.h>
125
+ #include <ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h>
126
+ #include <ATen/ops/fmin_compositeexplicitautogradnonfunctional_dispatch.h>
127
+ #include <ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h>
128
+ #include <ATen/ops/frac_compositeexplicitautogradnonfunctional_dispatch.h>
129
+ #include <ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h>
130
+ #include <ATen/ops/fractional_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
131
+ #include <ATen/ops/fractional_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h>
132
+ #include <ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h>
133
+ #include <ATen/ops/gcd_compositeexplicitautogradnonfunctional_dispatch.h>
134
+ #include <ATen/ops/ge_compositeexplicitautogradnonfunctional_dispatch.h>
135
+ #include <ATen/ops/gelu_compositeexplicitautogradnonfunctional_dispatch.h>
136
+ #include <ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
137
+ #include <ATen/ops/glu_compositeexplicitautogradnonfunctional_dispatch.h>
138
+ #include <ATen/ops/gt_compositeexplicitautogradnonfunctional_dispatch.h>
139
+ #include <ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h>
140
+ #include <ATen/ops/hardshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
141
+ #include <ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
142
+ #include <ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
143
+ #include <ATen/ops/heaviside_compositeexplicitautogradnonfunctional_dispatch.h>
144
+ #include <ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h>
145
+ #include <ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h>
146
+ #include <ATen/ops/igamma_compositeexplicitautogradnonfunctional_dispatch.h>
147
+ #include <ATen/ops/igammac_compositeexplicitautogradnonfunctional_dispatch.h>
148
+ #include <ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h>
149
+ #include <ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h>
150
+ #include <ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h>
151
+ #include <ATen/ops/index_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
152
+ #include <ATen/ops/indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
153
+ #include <ATen/ops/isin_compositeexplicitautogradnonfunctional_dispatch.h>
154
+ #include <ATen/ops/isneginf_compositeexplicitautogradnonfunctional_dispatch.h>
155
+ #include <ATen/ops/isposinf_compositeexplicitautogradnonfunctional_dispatch.h>
156
+ #include <ATen/ops/lcm_compositeexplicitautogradnonfunctional_dispatch.h>
157
+ #include <ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h>
158
+ #include <ATen/ops/leaky_relu_compositeexplicitautogradnonfunctional_dispatch.h>
159
+ #include <ATen/ops/leaky_relu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
160
+ #include <ATen/ops/lerp_compositeexplicitautogradnonfunctional_dispatch.h>
161
+ #include <ATen/ops/lgamma_compositeexplicitautogradnonfunctional_dispatch.h>
162
+ #include <ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h>
163
+ #include <ATen/ops/linalg_cholesky_ex_compositeexplicitautogradnonfunctional_dispatch.h>
164
+ #include <ATen/ops/linalg_cross_compositeexplicitautogradnonfunctional_dispatch.h>
165
+ #include <ATen/ops/linalg_inv_ex_compositeexplicitautogradnonfunctional_dispatch.h>
166
+ #include <ATen/ops/linalg_ldl_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
167
+ #include <ATen/ops/linalg_ldl_solve_compositeexplicitautogradnonfunctional_dispatch.h>
168
+ #include <ATen/ops/linalg_lu_compositeexplicitautogradnonfunctional_dispatch.h>
169
+ #include <ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h>
170
+ #include <ATen/ops/linalg_lu_solve_compositeexplicitautogradnonfunctional_dispatch.h>
171
+ #include <ATen/ops/linalg_pinv_compositeexplicitautogradnonfunctional_dispatch.h>
172
+ #include <ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h>
173
+ #include <ATen/ops/linalg_vector_norm_compositeexplicitautogradnonfunctional_dispatch.h>
174
+ #include <ATen/ops/log_compositeexplicitautogradnonfunctional_dispatch.h>
175
+ #include <ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h>
176
+ #include <ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h>
177
+ #include <ATen/ops/log2_compositeexplicitautogradnonfunctional_dispatch.h>
178
+ #include <ATen/ops/logaddexp_compositeexplicitautogradnonfunctional_dispatch.h>
179
+ #include <ATen/ops/logaddexp2_compositeexplicitautogradnonfunctional_dispatch.h>
180
+ #include <ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h>
181
+ #include <ATen/ops/logsumexp_compositeexplicitautogradnonfunctional_dispatch.h>
182
+ #include <ATen/ops/lt_compositeexplicitautogradnonfunctional_dispatch.h>
183
+ #include <ATen/ops/lu_unpack_compositeexplicitautogradnonfunctional_dispatch.h>
184
+ #include <ATen/ops/max_compositeexplicitautogradnonfunctional_dispatch.h>
185
+ #include <ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h>
186
+ #include <ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h>
187
+ #include <ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h>
188
+ #include <ATen/ops/mean_compositeexplicitautogradnonfunctional_dispatch.h>
189
+ #include <ATen/ops/min_compositeexplicitautogradnonfunctional_dispatch.h>
190
+ #include <ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h>
191
+ #include <ATen/ops/mish_compositeexplicitautogradnonfunctional_dispatch.h>
192
+ #include <ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h>
193
+ #include <ATen/ops/mse_loss_compositeexplicitautogradnonfunctional_dispatch.h>
194
+ #include <ATen/ops/mul_compositeexplicitautogradnonfunctional_dispatch.h>
195
+ #include <ATen/ops/narrow_copy_compositeexplicitautogradnonfunctional_dispatch.h>
196
+ #include <ATen/ops/ne_compositeexplicitautogradnonfunctional_dispatch.h>
197
+ #include <ATen/ops/neg_compositeexplicitautogradnonfunctional_dispatch.h>
198
+ #include <ATen/ops/new_empty_strided_compositeexplicitautogradnonfunctional_dispatch.h>
199
+ #include <ATen/ops/nextafter_compositeexplicitautogradnonfunctional_dispatch.h>
200
+ #include <ATen/ops/nll_loss_backward_compositeexplicitautogradnonfunctional_dispatch.h>
201
+ #include <ATen/ops/nll_loss_forward_compositeexplicitautogradnonfunctional_dispatch.h>
202
+ #include <ATen/ops/norm_compositeexplicitautogradnonfunctional_dispatch.h>
203
+ #include <ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h>
204
+ #include <ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h>
205
+ #include <ATen/ops/pixel_unshuffle_compositeexplicitautogradnonfunctional_dispatch.h>
206
+ #include <ATen/ops/polygamma_compositeexplicitautogradnonfunctional_dispatch.h>
207
+ #include <ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h>
208
+ #include <ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h>
209
+ #include <ATen/ops/reciprocal_compositeexplicitautogradnonfunctional_dispatch.h>
210
+ #include <ATen/ops/reflection_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
211
+ #include <ATen/ops/reflection_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
212
+ #include <ATen/ops/reflection_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
213
+ #include <ATen/ops/reflection_pad3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
214
+ #include <ATen/ops/remainder_compositeexplicitautogradnonfunctional_dispatch.h>
215
+ #include <ATen/ops/renorm_compositeexplicitautogradnonfunctional_dispatch.h>
216
+ #include <ATen/ops/replication_pad1d_compositeexplicitautogradnonfunctional_dispatch.h>
217
+ #include <ATen/ops/replication_pad1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
218
+ #include <ATen/ops/replication_pad2d_compositeexplicitautogradnonfunctional_dispatch.h>
219
+ #include <ATen/ops/replication_pad3d_compositeexplicitautogradnonfunctional_dispatch.h>
220
+ #include <ATen/ops/round_compositeexplicitautogradnonfunctional_dispatch.h>
221
+ #include <ATen/ops/row_indices_copy_compositeexplicitautogradnonfunctional_dispatch.h>
222
+ #include <ATen/ops/rsqrt_compositeexplicitautogradnonfunctional_dispatch.h>
223
+ #include <ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h>
224
+ #include <ATen/ops/scatter_add_compositeexplicitautogradnonfunctional_dispatch.h>
225
+ #include <ATen/ops/scatter_reduce_compositeexplicitautogradnonfunctional_dispatch.h>
226
+ #include <ATen/ops/select_backward_compositeexplicitautogradnonfunctional_dispatch.h>
227
+ #include <ATen/ops/select_copy_compositeexplicitautogradnonfunctional_dispatch.h>
228
+ #include <ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
229
+ #include <ATen/ops/sgn_compositeexplicitautogradnonfunctional_dispatch.h>
230
+ #include <ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h>
231
+ #include <ATen/ops/sigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h>
232
+ #include <ATen/ops/sign_compositeexplicitautogradnonfunctional_dispatch.h>
233
+ #include <ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h>
234
+ #include <ATen/ops/silu_compositeexplicitautogradnonfunctional_dispatch.h>
235
+ #include <ATen/ops/silu_backward_compositeexplicitautogradnonfunctional_dispatch.h>
236
+ #include <ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h>
237
+ #include <ATen/ops/sinc_compositeexplicitautogradnonfunctional_dispatch.h>
238
+ #include <ATen/ops/sinh_compositeexplicitautogradnonfunctional_dispatch.h>
239
+ #include <ATen/ops/slice_copy_compositeexplicitautogradnonfunctional_dispatch.h>
240
+ #include <ATen/ops/slice_scatter_compositeexplicitautogradnonfunctional_dispatch.h>
241
+ #include <ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h>
242
+ #include <ATen/ops/smooth_l1_loss_compositeexplicitautogradnonfunctional_dispatch.h>
243
+ #include <ATen/ops/softplus_compositeexplicitautogradnonfunctional_dispatch.h>
244
+ #include <ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h>
245
+ #include <ATen/ops/softshrink_compositeexplicitautogradnonfunctional_dispatch.h>
246
+ #include <ATen/ops/softshrink_backward_compositeexplicitautogradnonfunctional_dispatch.h>
247
+ #include <ATen/ops/sort_compositeexplicitautogradnonfunctional_dispatch.h>
248
+ #include <ATen/ops/special_airy_ai_compositeexplicitautogradnonfunctional_dispatch.h>
249
+ #include <ATen/ops/special_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
250
+ #include <ATen/ops/special_bessel_j1_compositeexplicitautogradnonfunctional_dispatch.h>
251
+ #include <ATen/ops/special_bessel_y0_compositeexplicitautogradnonfunctional_dispatch.h>
252
+ #include <ATen/ops/special_bessel_y1_compositeexplicitautogradnonfunctional_dispatch.h>
253
+ #include <ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
254
+ #include <ATen/ops/special_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
255
+ #include <ATen/ops/special_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
256
+ #include <ATen/ops/special_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
257
+ #include <ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h>
258
+ #include <ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h>
259
+ #include <ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h>
260
+ #include <ATen/ops/special_hermite_polynomial_he_compositeexplicitautogradnonfunctional_dispatch.h>
261
+ #include <ATen/ops/special_i0e_compositeexplicitautogradnonfunctional_dispatch.h>
262
+ #include <ATen/ops/special_i1_compositeexplicitautogradnonfunctional_dispatch.h>
263
+ #include <ATen/ops/special_i1e_compositeexplicitautogradnonfunctional_dispatch.h>
264
+ #include <ATen/ops/special_laguerre_polynomial_l_compositeexplicitautogradnonfunctional_dispatch.h>
265
+ #include <ATen/ops/special_legendre_polynomial_p_compositeexplicitautogradnonfunctional_dispatch.h>
266
+ #include <ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h>
267
+ #include <ATen/ops/special_modified_bessel_i0_compositeexplicitautogradnonfunctional_dispatch.h>
268
+ #include <ATen/ops/special_modified_bessel_i1_compositeexplicitautogradnonfunctional_dispatch.h>
269
+ #include <ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
270
+ #include <ATen/ops/special_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
271
+ #include <ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h>
272
+ #include <ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h>
273
+ #include <ATen/ops/special_scaled_modified_bessel_k1_compositeexplicitautogradnonfunctional_dispatch.h>
274
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_compositeexplicitautogradnonfunctional_dispatch.h>
275
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautogradnonfunctional_dispatch.h>
276
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_compositeexplicitautogradnonfunctional_dispatch.h>
277
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_compositeexplicitautogradnonfunctional_dispatch.h>
278
+ #include <ATen/ops/special_spherical_bessel_j0_compositeexplicitautogradnonfunctional_dispatch.h>
279
+ #include <ATen/ops/special_xlog1py_compositeexplicitautogradnonfunctional_dispatch.h>
280
+ #include <ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h>
281
+ #include <ATen/ops/split_copy_compositeexplicitautogradnonfunctional_dispatch.h>
282
+ #include <ATen/ops/split_with_sizes_copy_compositeexplicitautogradnonfunctional_dispatch.h>
283
+ #include <ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h>
284
+ #include <ATen/ops/squeeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
285
+ #include <ATen/ops/sub_compositeexplicitautogradnonfunctional_dispatch.h>
286
+ #include <ATen/ops/sum_compositeexplicitautogradnonfunctional_dispatch.h>
287
+ #include <ATen/ops/t_copy_compositeexplicitautogradnonfunctional_dispatch.h>
288
+ #include <ATen/ops/tan_compositeexplicitautogradnonfunctional_dispatch.h>
289
+ #include <ATen/ops/tanh_compositeexplicitautogradnonfunctional_dispatch.h>
290
+ #include <ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h>
291
+ #include <ATen/ops/threshold_compositeexplicitautogradnonfunctional_dispatch.h>
292
+ #include <ATen/ops/threshold_backward_compositeexplicitautogradnonfunctional_dispatch.h>
293
+ #include <ATen/ops/topk_compositeexplicitautogradnonfunctional_dispatch.h>
294
+ #include <ATen/ops/transpose_copy_compositeexplicitautogradnonfunctional_dispatch.h>
295
+ #include <ATen/ops/triangular_solve_compositeexplicitautogradnonfunctional_dispatch.h>
296
+ #include <ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h>
297
+ #include <ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h>
298
+ #include <ATen/ops/trunc_compositeexplicitautogradnonfunctional_dispatch.h>
299
+ #include <ATen/ops/unbind_copy_compositeexplicitautogradnonfunctional_dispatch.h>
300
+ #include <ATen/ops/unfold_copy_compositeexplicitautogradnonfunctional_dispatch.h>
301
+ #include <ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h>
302
+ #include <ATen/ops/upsample_bicubic2d_compositeexplicitautogradnonfunctional_dispatch.h>
303
+ #include <ATen/ops/upsample_bicubic2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
304
+ #include <ATen/ops/upsample_bilinear2d_compositeexplicitautogradnonfunctional_dispatch.h>
305
+ #include <ATen/ops/upsample_bilinear2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
306
+ #include <ATen/ops/upsample_linear1d_compositeexplicitautogradnonfunctional_dispatch.h>
307
+ #include <ATen/ops/upsample_linear1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
308
+ #include <ATen/ops/upsample_nearest1d_compositeexplicitautogradnonfunctional_dispatch.h>
309
+ #include <ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
310
+ #include <ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h>
311
+ #include <ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
312
+ #include <ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h>
313
+ #include <ATen/ops/upsample_nearest3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
314
+ #include <ATen/ops/upsample_trilinear3d_compositeexplicitautogradnonfunctional_dispatch.h>
315
+ #include <ATen/ops/upsample_trilinear3d_backward_compositeexplicitautogradnonfunctional_dispatch.h>
316
+ #include <ATen/ops/values_copy_compositeexplicitautogradnonfunctional_dispatch.h>
317
+ #include <ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h>
318
+ #include <ATen/ops/view_as_real_copy_compositeexplicitautogradnonfunctional_dispatch.h>
319
+ #include <ATen/ops/view_copy_compositeexplicitautogradnonfunctional_dispatch.h>
320
+ #include <ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h>
321
+
322
+
323
+
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeImplicitAutogradFunctions_inl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_compositeimplicitautogradnestedtensor_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/randn_like_compositeimplicitautogradnestedtensor_dispatch.h>
20
+ #include <ATen/ops/reshape_compositeimplicitautogradnestedtensor_dispatch.h>
21
+ #include <ATen/ops/reshape_as_compositeimplicitautogradnestedtensor_dispatch.h>
22
+ #include <ATen/ops/zeros_like_compositeimplicitautogradnestedtensor_dispatch.h>
23
+
24
+
25
+
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Context.h ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CPUGeneratorImpl.h>
4
+ #include <ATen/DeviceAccelerator.h>
5
+ #include <ATen/LinalgBackend.h>
6
+ #include <ATen/core/ATenGeneral.h>
7
+ #include <ATen/core/DeprecatedTypeProperties.h>
8
+ #include <ATen/core/Generator.h>
9
+ #include <ATen/core/LegacyTypeDispatch.h>
10
+ #include <ATen/detail/AcceleratorHooksInterface.h>
11
+ #include <ATen/detail/CUDAHooksInterface.h>
12
+ #include <ATen/detail/HIPHooksInterface.h>
13
+ #include <ATen/detail/IPUHooksInterface.h>
14
+ #include <ATen/detail/MPSHooksInterface.h>
15
+ #include <ATen/detail/MTIAHooksInterface.h>
16
+ #include <ATen/detail/ORTHooksInterface.h>
17
+ #include <ATen/detail/PrivateUse1HooksInterface.h>
18
+ #include <ATen/detail/XPUHooksInterface.h>
19
+ #include <c10/core/QEngine.h>
20
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
21
+ #include <c10/util/CallOnce.h>
22
+ #include <c10/util/Exception.h>
23
+ #include <c10/util/env.h>
24
+ #include <c10/util/irange.h>
25
+
26
+ #include <cstdint>
27
+ #include <mutex>
28
+
29
+ namespace at {
30
+
31
+ class Tensor;
32
+
33
+ enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM };
34
+
35
+ class TORCH_API Context {
36
+ public:
37
+ Context();
38
+
39
+ const Generator& defaultGenerator(Device device) {
40
+ c10::DeviceType device_type = device.type();
41
+ initCUDAIfNeeded(device_type);
42
+ initHIPIfNeeded(device_type);
43
+ if (device_type == at::kCPU) {
44
+ return at::detail::getDefaultCPUGenerator();
45
+ } else if (device_type == at::kCUDA) {
46
+ return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index());
47
+ } else if (device_type == at::kMPS) {
48
+ return at::detail::getMPSHooks().getDefaultMPSGenerator();
49
+ } else if (device_type == at::kXPU) {
50
+ return at::detail::getXPUHooks().getDefaultXPUGenerator(device.index());
51
+ } else if (device_type == at::kIPU) {
52
+ return at::detail::getIPUHooks().getDefaultIPUGenerator(device.index());
53
+ } else if (device_type == at::kPrivateUse1) {
54
+ return at::GetPrivateUse1HooksInterface()->getDefaultGenerator(
55
+ device.index());
56
+ } else {
57
+ AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
58
+ }
59
+ }
60
+ const AcceleratorHooksInterface& getAcceleratorHooksInterface(
61
+ c10::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
62
+ c10::DeviceType device_type = opt_device_type.has_value()
63
+ ? opt_device_type.value()
64
+ : at::getAccelerator(true).value();
65
+ if (device_type == at::kCUDA) {
66
+ return at::detail::getCUDAHooks();
67
+ } else if (device_type == at::kMPS) {
68
+ return at::detail::getMPSHooks();
69
+ } else if (device_type == at::kPrivateUse1) {
70
+ return at::detail::getPrivateUse1Hooks();
71
+ } else {
72
+ AT_ERROR(
73
+ c10::DeviceTypeName(device_type), " device type not an accelerator.");
74
+ }
75
+ }
76
+ Device getDeviceFromPtr(void* data, c10::DeviceType device_type) {
77
+ initCUDAIfNeeded(device_type);
78
+ initHIPIfNeeded(device_type);
79
+ initXPUIfNeeded(device_type);
80
+ if (device_type == at::kCPU) {
81
+ return c10::DeviceType::CPU;
82
+ } else if (device_type == at::kCUDA) {
83
+ return at::detail::getCUDAHooks().getDeviceFromPtr(data);
84
+ } else if (device_type == at::kXPU) {
85
+ return at::detail::getXPUHooks().getDeviceFromPtr(data);
86
+ } else if (device_type == at::kPrivateUse1) {
87
+ return at::GetPrivateUse1HooksInterface()->getDeviceFromPtr(data);
88
+ } else {
89
+ AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
90
+ }
91
+ }
92
+ static bool isPinnedPtr(const void* data) {
93
+ return detail::getCUDAHooks().isPinnedPtr(data);
94
+ }
95
+ static bool hasOpenMP();
96
+ static bool hasMKL();
97
+ static bool hasLAPACK();
98
+ static bool hasMKLDNN();
99
+ static bool hasMAGMA() {
100
+ return detail::getCUDAHooks().hasMAGMA();
101
+ }
102
+ static bool hasCUDA() {
103
+ return detail::getCUDAHooks().hasCUDA();
104
+ }
105
+ static bool hasMTIA() {
106
+ return detail::getMTIAHooks().hasMTIA();
107
+ }
108
+ static bool hasCUDART() {
109
+ return detail::getCUDAHooks().hasCUDART();
110
+ }
111
+ static long versionCUDART() {
112
+ return detail::getCUDAHooks().versionCUDART();
113
+ }
114
+ static bool hasCuDNN() {
115
+ return detail::getCUDAHooks().hasCuDNN();
116
+ }
117
+ static long versionCuDNN() {
118
+ return detail::getCUDAHooks().versionCuDNN();
119
+ }
120
+ static bool hasCuSOLVER() {
121
+ return detail::getCUDAHooks().hasCuSOLVER();
122
+ }
123
+ static bool hasHIP() {
124
+ return detail::getHIPHooks().hasHIP();
125
+ }
126
+ static bool hasMPS() {
127
+ return detail::getMPSHooks().hasMPS();
128
+ }
129
+ static bool hasIPU() {
130
+ return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
131
+ }
132
+ static bool hasXLA() {
133
+ return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
134
+ }
135
+ static bool hasXPU() {
136
+ return detail::getXPUHooks().hasXPU();
137
+ }
138
+ static bool hasLazy() {
139
+ return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
140
+ }
141
+ static bool hasORT() {
142
+ return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT);
143
+ }
144
+ // defined in header so that getNonVariableType has ability to inline
145
+ // call_once check. getNonVariableType is called fairly frequently
146
+ void lazyInitCUDA() {
147
+ c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); });
148
+ }
149
+ void lazyInitHIP() {
150
+ c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); });
151
+ }
152
+ void lazyInitXPU() {
153
+ c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); });
154
+ }
155
+ void lazyInitPrivateUse1() {
156
+ c10::call_once(thp_init, [&] {
157
+ if (isPrivateUse1HooksRegistered()) {
158
+ at::GetPrivateUse1HooksInterface()->initPrivateUse1();
159
+ }
160
+ });
161
+ }
162
+ static const at::cuda::NVRTC& getNVRTC() {
163
+ return detail::getCUDAHooks().nvrtc();
164
+ }
165
+
166
+ static bool setFlushDenormal(bool on);
167
+
168
+ // NB: This method is *purely* whether or not a user requested
169
+ // that CuDNN was enabled, it doesn't actually say anything about
170
+ // whether or not CuDNN is actually usable. Use cudnn_is_acceptable
171
+ // to test this instead
172
+ bool userEnabledCuDNN() const;
173
+ void setUserEnabledCuDNN(bool e);
174
+ bool userEnabledMkldnn() const;
175
+ void setUserEnabledMkldnn(bool e);
176
+ bool benchmarkCuDNN() const;
177
+ void setBenchmarkCuDNN(bool);
178
+ int benchmarkLimitCuDNN() const;
179
+ void setBenchmarkLimitCuDNN(int);
180
+ bool deterministicCuDNN() const;
181
+ void setDeterministicCuDNN(bool);
182
+ bool userEnabledNNPACK() const;
183
+ void setUserEnabledNNPACK(bool e);
184
+
185
+ // Note [Disabling Fused SDP Kernels]
186
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
187
+ // Flash and Memory Efficient SDP kernels are enabled by default.
188
+ // However, they can be disabled by setting
189
+ // at::globalContext().setUserEnabledFlashSDP(false) flag.
190
+ // This is useful for debugging purposes. For example, if you want to
191
+ // compare the performance of the flash SDP kernels with the unfused
192
+ // kernel, you can disable the flash SDP kernels. By disabling
193
+ // the math SDP kernel, you can force your code to use flash kernels.
194
+ // The math SDP kernel can be disabled by setting
195
+ // at::globalContext().setUserEnabledMathSDP(false) flag.
196
+ void setSDPUseFlash(bool);
197
+ bool userEnabledFlashSDP() const;
198
+
199
+ void setSDPUseMemEfficient(bool);
200
+ bool userEnabledMemEfficientSDP() const;
201
+
202
+ void setSDPUseMath(bool);
203
+ bool userEnabledMathSDP() const;
204
+
205
+ void setSDPUseCuDNN(bool);
206
+ bool userEnabledCuDNNSDP() const;
207
+
208
+ at::LinalgBackend linalgPreferredBackend() const;
209
+ void setLinalgPreferredBackend(at::LinalgBackend);
210
+
211
+ // Note [Enabling Deterministic Operations]
212
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
213
+ // Operations in PyTorch that normally act nondeterministically, but have an
214
+ // alternate deterministic implementation, should satisfy the following
215
+ // requirements:
216
+ //
217
+ // * Include this comment: "See Note [Enabling Deterministic Operations]"
218
+ //
219
+ // * Check the value of `at::globalContext().deterministicAlgorithms()` to
220
+ // toggle
221
+ // between nondeterministic and deterministic implementations.
222
+ //
223
+ // * Have an entry in the list of PyTorch operations that toggle between
224
+ // nondeterministic
225
+ // and deterministic implementations, in the docstring of
226
+ // `use_deterministic_algorithms()` in torch/__init__.py
227
+ //
228
+ // `example_func()` below shows an example of toggling between
229
+ // nondeterministic and deterministic implementations:
230
+ //
231
+ // void example_func() {
232
+ // // See Note [Enabling Deterministic Operations]
233
+ // if (at::globalContext().deterministicAlgorithms()) {
234
+ // example_func_deterministic();
235
+ // } else {
236
+ // example_func_nondeterministic();
237
+ // }
238
+ // }
239
+
240
+ bool deterministicAlgorithms() const;
241
+ bool deterministicAlgorithmsWarnOnly() const;
242
+ void setDeterministicAlgorithms(bool, bool);
243
+ bool deterministicFillUninitializedMemory() const;
244
+ void setDeterministicFillUninitializedMemory(bool);
245
+
246
+ // Note [Writing Nondeterministic Operations]
247
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
248
+ // Operations in PyTorch that act nondeterministically and do not have an
249
+ // alternate deterministic implementation should satisfy the following
250
+ // requirements:
251
+ //
252
+ // * Include this comment: "See Note [Writing Nondeterministic Operations]"
253
+ //
254
+ // * Include a comment explaining why the operation is nondeterministic.
255
+ //
256
+ // * Throw an error when `Context::deterministicAlgorithms()` is true. Most
257
+ // of the time, this should be accomplished by calling
258
+ // `at::globalContext().alertNotDeterminstic()`. However, if the
259
+ // nondeterministic behavior is caused by the CuBLAS workspace
260
+ // configuration in CUDA >= 10.2,
261
+ // `at::globalContext().alertCuBLASConfigNotDeterministic()` should be
262
+ // called instead (in this case, a comment explaining why the operation is
263
+ // nondeterministic is not necessary). See below for details on these
264
+ // methods.
265
+ //
266
+ // * Have an entry in the list of nondeterministic PyTorch operations in the
267
+ // docstring of `use_deterministic_algorithms()` in torch/__init__.py
268
+ //
269
+ // * Have a test function in `test/test_torch.py` whose name begins with
270
+ // `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace
271
+ // configuration is the reason for nondeterminism, the operation should be
272
+ // included in the `test_cublas_config_nondeterministic_alert` test. Any new
273
+ // tests should ideally follow a pattern similar to the existing ones.
274
+ //
275
+ // `example_func()` below shows an example of the comments and error-throwing
276
+ // code for a nondeterministic operation:
277
+ //
278
+ // void example_func() {
279
+ // // See Note [Writing Nondeterministic Operations]
280
+ // // Nondeterministic because <reason>
281
+ // at::globalContext().alertNondeterministic("example_func");
282
+ // ...
283
+ // }
284
+
285
+ // Throws an error if `Context::deterministicAlgorithms()` is true
286
+ static void alertNotDeterministic(c10::string_view const& caller);
287
+
288
+ // Throws an error if `Context::deterministicAlgorithms()` is true, CUDA
289
+ // >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or
290
+ // ":4096:8". For more details:
291
+ // https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility
292
+ void alertCuBLASConfigNotDeterministic() const;
293
+
294
+ void setFloat32MatmulPrecision(const std::string& s);
295
+ bool allowTF32CuDNN() const;
296
+ void setAllowTF32CuDNN(bool);
297
+ bool allowTF32CuBLAS() const;
298
+ void setAllowTF32CuBLAS(bool);
299
+ Float32MatmulPrecision float32MatmulPrecision() const;
300
+ void setFloat32MatmulPrecision(Float32MatmulPrecision p);
301
+ bool allowFP16ReductionCuBLAS() const;
302
+ void setAllowFP16ReductionCuBLAS(bool);
303
+ bool allowBF16ReductionCuBLAS() const;
304
+ void setAllowBF16ReductionCuBLAS(bool);
305
+ at::QEngine qEngine() const;
306
+ void setQEngine(at::QEngine e);
307
+ static const std::vector<at::QEngine>& supportedQEngines();
308
+ static bool isXNNPACKAvailable();
309
+ void setCheckSparseTensorInvariants(bool e);
310
+ bool checkSparseTensorInvariants() const;
311
+ // This method is used to release the original weight after pre-packing.
312
+ // It should be called once before loading/running the model.
313
+ // NB: By default it is set to true for mobile builds.
314
+ void setReleaseWeightsWhenPrepacking(bool e);
315
+ bool releaseWeightsWhenPrepacking() const;
316
+
317
+ void setDisplayVmapFallbackWarnings(bool enabled);
318
+ bool areVmapFallbackWarningsEnabled() const;
319
+
320
+ void setDefaultMobileCPUAllocator();
321
+ void unsetDefaultMobileCPUAllocator();
322
+ bool allowFP16ReductionCPU() const;
323
+ void setAllowFP16ReductionCPU(bool);
324
+
325
+ private:
326
+ void initCUDAIfNeeded(c10::DeviceType p) {
327
+ if (p == c10::DeviceType::CUDA) {
328
+ lazyInitCUDA();
329
+ }
330
+ }
331
+ void initHIPIfNeeded(c10::DeviceType p) {
332
+ if (p == c10::DeviceType::HIP) {
333
+ lazyInitHIP();
334
+ }
335
+ }
336
+ void initXPUIfNeeded(c10::DeviceType p) {
337
+ if (p == c10::DeviceType::XPU) {
338
+ lazyInitXPU();
339
+ }
340
+ }
341
+ static bool checkCuBLASConfigDeterministic();
342
+ c10::once_flag thc_init;
343
+ c10::once_flag thh_init;
344
+ c10::once_flag thx_init;
345
+ c10::once_flag thp_init;
346
+ bool enabled_cudnn = true;
347
+ bool deterministic_cudnn = false;
348
+ bool _deterministic_algorithms = false;
349
+ bool _deterministic_algorithms_warn_only = false;
350
+ bool _deterministic_fill_uninitialized_memory = true;
351
+ bool enabled_flashSDP = true;
352
+ bool enabled_mem_efficientSDP = true;
353
+ bool enabled_mathSDP = true;
354
+ bool enabled_cudnnSDP = false;
355
+ #ifdef USE_ROCM
356
+ bool benchmark_cudnn = true;
357
+ #else
358
+ bool benchmark_cudnn = false;
359
+ #endif
360
+ Float32MatmulPrecision float32_matmul_precision =
361
+ c10::utils::check_env("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE") == true
362
+ ? at::Float32MatmulPrecision::HIGH
363
+ : at::Float32MatmulPrecision::HIGHEST;
364
+ int benchmark_limit_cudnn = 10;
365
+ bool allow_tf32_cudnn = true;
366
+ bool allow_fp16_reduction_cublas = true;
367
+ bool allow_bf16_reduction_cublas = true;
368
+ bool enabled_mkldnn = true;
369
+ bool enabled_nnpack = true;
370
+ at::LinalgBackend linalg_preferred_backend =
371
+ c10::utils::check_env("TORCH_LINALG_PREFER_CUSOLVER") == true
372
+ ? at::LinalgBackend::Cusolver
373
+ : at::LinalgBackend::Default;
374
+ #ifdef C10_MOBILE
375
+ bool release_original_weights = true;
376
+ #else
377
+ bool release_original_weights = false;
378
+ #endif
379
+ bool display_vmap_fallback_warnings_ = false;
380
+ c10::optional<at::QEngine> quantized_engine = c10::nullopt;
381
+ bool enable_sparse_tensor_invariant_checks = false;
382
+ bool allow_fp16_reduction_cpu = false;
383
+
384
+ Allocator* prev_allocator_ptr_{nullptr};
385
+ };
386
+
387
+ TORCH_API Context& globalContext();
388
+
389
+ static inline void init() {
390
+ globalContext();
391
+ }
392
+
393
+ TORCH_API Allocator* getCPUAllocator();
394
+
395
+ static inline DeprecatedTypeProperties& getDeprecatedTypeProperties(
396
+ Backend p,
397
+ ScalarType s) {
398
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
399
+ p, s);
400
+ }
401
+
402
+ static inline DeprecatedTypeProperties& CPU(ScalarType s) {
403
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
404
+ Backend::CPU, s);
405
+ }
406
+
407
+ static inline DeprecatedTypeProperties& CUDA(ScalarType s) {
408
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
409
+ Backend::CUDA, s);
410
+ }
411
+
412
+ static inline DeprecatedTypeProperties& HIP(ScalarType s) {
413
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
414
+ Backend::HIP, s);
415
+ }
416
+
417
+ static inline DeprecatedTypeProperties& MPS(ScalarType s) {
418
+ return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
419
+ Backend::MPS, s);
420
+ }
421
+
422
+ static inline bool hasCUDA() {
423
+ return globalContext().hasCUDA();
424
+ }
425
+
426
+ static inline bool hasMTIA() {
427
+ return globalContext().hasMTIA();
428
+ }
429
+
430
+ static inline bool hasHIP() {
431
+ return globalContext().hasHIP();
432
+ }
433
+
434
+ static inline bool hasIPU() {
435
+ return globalContext().hasIPU();
436
+ }
437
+
438
+ static inline bool hasXLA() {
439
+ return globalContext().hasXLA();
440
+ }
441
+
442
+ static inline bool hasMPS() {
443
+ return globalContext().hasMPS();
444
+ }
445
+
446
+ static inline bool hasORT() {
447
+ return globalContext().hasORT();
448
+ }
449
+
450
+ static inline bool hasXPU() {
451
+ return globalContext().hasXPU();
452
+ }
453
+
454
+ // Despite its name, this function returns the number of *CUDA* GPUs.
455
+ static inline size_t getNumGPUs() {
456
+ // WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS
457
+ // FUNCTION. If you are interested in interrogating the number of
458
+ // devices for a specific device type, add that function to the
459
+ // relevant library (e.g., similar to at::cuda::device_count())
460
+ if (hasCUDA() && hasHIP()) {
461
+ throw std::runtime_error(
462
+ "Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades "
463
+ "to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually "
464
+ "means HIP. Rebuild PyTorch with one or the other disabled.");
465
+ } else if (hasCUDA()) {
466
+ return detail::getCUDAHooks().getNumGPUs();
467
+ } else if (hasHIP()) {
468
+ return detail::getHIPHooks().getNumGPUs();
469
+ } else {
470
+ return 0;
471
+ }
472
+ }
473
+
474
+ static inline bool hasOpenMP() {
475
+ return globalContext().hasOpenMP();
476
+ }
477
+
478
+ static inline bool hasMKL() {
479
+ return globalContext().hasMKL();
480
+ }
481
+
482
+ static inline bool hasLAPACK() {
483
+ return globalContext().hasLAPACK();
484
+ }
485
+
486
+ static inline bool hasMAGMA() {
487
+ return globalContext().hasMAGMA();
488
+ }
489
+
490
+ static inline bool hasMKLDNN() {
491
+ return globalContext().hasMKLDNN();
492
+ }
493
+
494
+ static inline void manual_seed(uint64_t seed) {
495
+ auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
496
+ {
497
+ // See Note [Acquire lock when using random generators]
498
+ std::lock_guard<std::mutex> lock(gen.mutex());
499
+ gen.set_current_seed(seed);
500
+ }
501
+ // NB: Sometimes we build with CUDA, but we don't have any GPUs
502
+ // available. In that case, we must not seed CUDA; it will fail!
503
+ const auto cuda_num_gpus = detail::getCUDAHooks().getNumGPUs();
504
+ if (hasCUDA() && cuda_num_gpus > 0) {
505
+ for (const auto i : c10::irange(cuda_num_gpus)) {
506
+ auto cuda_gen = globalContext().defaultGenerator(
507
+ Device(at::kCUDA, static_cast<c10::DeviceIndex>(i)));
508
+ {
509
+ // See Note [Acquire lock when using random generators]
510
+ std::lock_guard<std::mutex> lock(cuda_gen.mutex());
511
+ cuda_gen.set_current_seed(seed);
512
+ }
513
+ }
514
+ }
515
+
516
+ const auto xpu_num_gpus = detail::getXPUHooks().getNumGPUs();
517
+ if (hasXPU() && xpu_num_gpus) {
518
+ for (const auto i : c10::irange(xpu_num_gpus)) {
519
+ auto xpu_gen = globalContext().defaultGenerator(
520
+ Device(at::kXPU, static_cast<c10::DeviceIndex>(i)));
521
+ {
522
+ // See Note [Acquire lock when using random generators]
523
+ std::lock_guard<std::mutex> lock(xpu_gen.mutex());
524
+ xpu_gen.set_current_seed(seed);
525
+ }
526
+ }
527
+ }
528
+
529
+ if (hasMPS()) {
530
+ auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS);
531
+ // See Note [Acquire lock when using random generators]
532
+ std::lock_guard<std::mutex> lock(mps_gen.mutex());
533
+ mps_gen.set_current_seed(seed);
534
+ }
535
+ }
536
+
537
+ // When the global flag `allow_tf32` is set to true, cuBLAS handles are
538
+ // automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH.
539
+ // For some operators, such as addmv, TF32 offers no performance improvement
540
+ // but causes precision loss. To help this case, this class implements
541
+ // a RAII guard that can be used to quickly disable TF32 within its scope.
542
+ //
543
+ // Usage:
544
+ // NoTF32Guard disable_tf32;
545
+ struct TORCH_API NoTF32Guard {
546
+ NoTF32Guard();
547
+ ~NoTF32Guard();
548
+ static bool should_disable_tf32();
549
+
550
+ private:
551
+ bool changed = false;
552
+ };
553
+
554
+ struct TORCH_API ROCmBackwardPassGuard {
555
+ ROCmBackwardPassGuard();
556
+ ~ROCmBackwardPassGuard();
557
+ static bool is_backward_pass();
558
+ };
559
+
560
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DLConvertor.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/Tensor.h>
5
+ #include <ATen/dlpack.h>
6
+
7
+ // this convertor will:
8
+ // 1) take a Tensor object and wrap it in the DLPack tensor
9
+ // 2) take a dlpack tensor and convert it to the ATen Tensor
10
+
11
+ namespace at {
12
+
13
+ TORCH_API ScalarType toScalarType(const DLDataType& dtype);
14
+ TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
15
+ TORCH_API Tensor fromDLPack(DLManagedTensor* src);
16
+ C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant")
17
+ inline Tensor fromDLPack(const DLManagedTensor* src) {
18
+ return fromDLPack(const_cast<DLManagedTensor*>(src));
19
+ }
20
+ TORCH_API Tensor
21
+ fromDLPack(DLManagedTensor* src, std::function<void(void*)> deleter);
22
+ TORCH_API DLDataType getDLDataType(const Tensor& t);
23
+ TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id);
24
+
25
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Device.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/Device.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceAccelerator.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Macros.h>
5
+
6
+ #include <ATen/detail/MTIAHooksInterface.h>
7
+ #include <optional>
8
+
9
+ // This file defines the top level Accelerator concept for PyTorch.
10
+ // A device is an accelerator per the definition here if:
11
+ // - It is mutually exclusive with all other accelerators
12
+ // - It performs asynchronous compute via a Stream/Event system
13
+ // - It provides a set of common APIs as defined by AcceleratorHooksInterface
14
+ //
15
+ // As of today, accelerator devices are (in no particular order):
16
+ // CUDA, MTIA, PrivateUse1
17
+ // We want to add once all the proper APIs are supported and tested:
18
+ // HIP, MPS, XPU
19
+
20
+ namespace at {
21
+
22
+ // Ensures that only one accelerator is available (at
23
+ // compile time if possible) and return it.
24
+ // When checked is true, the returned optional always has a value.
25
+ TORCH_API std::optional<c10::DeviceType> getAccelerator(bool checked = false);
26
+
27
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DeviceGuard.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/IListRef.h>
4
+ #include <ATen/core/Tensor.h>
5
+ #include <c10/core/DeviceGuard.h>
6
+ #include <c10/core/ScalarType.h> // TensorList whyyyyy
7
+
8
+ namespace at {
9
+
10
+ // Are you here because you're wondering why DeviceGuard(tensor) no
11
+ // longer works? For code organization reasons, we have temporarily(?)
12
+ // removed this constructor from DeviceGuard. The new way to
13
+ // spell it is:
14
+ //
15
+ // OptionalDeviceGuard guard(device_of(tensor));
16
+
17
+ /// Return the Device of a Tensor, if the Tensor is defined.
18
+ inline c10::optional<Device> device_of(const Tensor& t) {
19
+ if (t.defined()) {
20
+ return c10::make_optional(t.device());
21
+ } else {
22
+ return c10::nullopt;
23
+ }
24
+ }
25
+
26
+ inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
27
+ return t.has_value() ? device_of(t.value()) : c10::nullopt;
28
+ }
29
+
30
+ /// Return the Device of a TensorList, if the list is non-empty and
31
+ /// the first Tensor is defined. (This function implicitly assumes
32
+ /// that all tensors in the list have the same device.)
33
+ inline c10::optional<Device> device_of(ITensorListRef t) {
34
+ if (!t.empty()) {
35
+ return device_of(t.front());
36
+ } else {
37
+ return c10::nullopt;
38
+ }
39
+ }
40
+
41
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Dimname.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <ATen/core/Dimname.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ExpandBase.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBase.h>
2
+
3
+ // Broadcasting utilities for working with TensorBase
4
+ namespace at {
5
+ namespace internal {
6
+ TORCH_API TensorBase expand_slow_path(const TensorBase& self, IntArrayRef size);
7
+ } // namespace internal
8
+
9
+ inline c10::MaybeOwned<TensorBase> expand_size(
10
+ const TensorBase& self,
11
+ IntArrayRef size) {
12
+ if (size.equals(self.sizes())) {
13
+ return c10::MaybeOwned<TensorBase>::borrowed(self);
14
+ }
15
+ return c10::MaybeOwned<TensorBase>::owned(
16
+ at::internal::expand_slow_path(self, size));
17
+ }
18
+ c10::MaybeOwned<TensorBase> expand_size(TensorBase&& self, IntArrayRef size) =
19
+ delete;
20
+
21
+ inline c10::MaybeOwned<TensorBase> expand_inplace(
22
+ const TensorBase& tensor,
23
+ const TensorBase& to_expand) {
24
+ return expand_size(to_expand, tensor.sizes());
25
+ }
26
+ c10::MaybeOwned<TensorBase> expand_inplace(
27
+ const TensorBase& tensor,
28
+ TensorBase&& to_expand) = delete;
29
+
30
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Formatting.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <ATen/core/Formatting.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/FunctionalStorageImpl.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+
5
+ namespace at::functionalization {
6
+
7
+ // See Note [Functionalization Pass In Core]
8
+
9
+ // ViewMeta is a class used by the functionalization pass to navigate between
10
+ // a base tensor and a view tensor.
11
+ // For example, if I call `b = a.view1(...)`
12
+ // the functionalization pass will generate and store a ViewMeta on b that looks
13
+ // like:
14
+ //
15
+ // ViewMeta(
16
+ // [<captures>](const Tensor& base, int64_t mutated_view_idx) {
17
+ // return base.view1(...);
18
+ // },
19
+ // [<captures>](const at::Tensor& base, const at::Tensor& mutated_view,
20
+ // int64_t mutated_view_idx) -> at::Tensor {
21
+ // return at::functionalization::impl::view1_inverse(base, mutated_view,
22
+ // ...);
23
+ // }
24
+ //
25
+ // The forward_fn lambda describes how to replay view1 on a tensor.
26
+ //
27
+ // The reverse_fn lambda describes how, given a tensor that is already a view,
28
+ // how to get the corresponding base tensor. See Note [Functionalization Pass:
29
+ // View Inverses] for details.
30
+ struct ViewMeta {
31
+ ViewMeta(
32
+ std::function<Tensor(const Tensor&, int64_t)> forward,
33
+ std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse,
34
+ bool is_multi_output = false,
35
+ int64_t out_idx = 0)
36
+ : forward_fn(std::move(forward)),
37
+ reverse_fn(std::move(reverse)),
38
+ out_index(out_idx),
39
+ is_multi_output(is_multi_output) {}
40
+
41
+ std::function<Tensor(const Tensor&, int64_t)> forward_fn;
42
+ std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse_fn;
43
+ // See Note [out_idx in ViewMeta]
44
+ int64_t out_index;
45
+
46
+ // Tells us if this is a multi-output view
47
+ bool is_multi_output;
48
+
49
+ // Returns a copy of the current ViewMeta, if out_idx matches the current
50
+ // out_index. Otherwise, returns a new ViewMeta with the same forward/reverse
51
+ // functions, but a new out index.
52
+ ViewMeta to_out_idx(int64_t out_idx);
53
+ };
54
+
55
+ // FunctionalStorageImpl is a subclass of StorageImpl used by the
56
+ // functionalization pass. It has no underlying data (similar to meta storage).
57
+ // It also knows how to reflect mutations to tensors in the absence of a valid
58
+ // data pointer.
59
+ //
60
+ // A storage represents the state shared by (potentially multiple) views of the
61
+ // same tensor. For example, in the following code:
62
+ //
63
+ // b = a.view1(...)
64
+ // c = b.view2(...)
65
+ // b.add_(1)
66
+ // --> storage.add_update(b, {view1_meta})
67
+ //
68
+ // The call to add_(1) will result in a call to alias.add_update(b,
69
+ // {view1_meta}), queueing up the mutation from b onto the alias. Later, suppose
70
+ // c is used in an expression (e.g. you try to print c, or pass it to an
71
+ // operator). Doing so will involve "syncing" c. First we apply any pending
72
+ // updates to the alias, and then we regenerate c by replaying its views off of
73
+ // the updated alias. E.g:
74
+ //
75
+ // print(str(c))
76
+ // --> c.sync_()
77
+ // --> alias.apply_updates() // after this, the alias will be updated to
78
+ // reflect the mutation to b
79
+ struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl {
80
+ public:
81
+ struct Update {
82
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
83
+ const at::Tensor new_val;
84
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
85
+ const std::vector<ViewMeta> view_metas;
86
+ };
87
+
88
+ explicit FunctionalStorageImpl(const Tensor& value);
89
+
90
+ void add_update(
91
+ const Tensor& updated_val,
92
+ const std::vector<ViewMeta>& view_metas);
93
+ bool apply_updates();
94
+ const Tensor& base() {
95
+ return base_;
96
+ }
97
+ size_t generation() const {
98
+ return generation_;
99
+ }
100
+ void freeze() {
101
+ frozen_ = true;
102
+ }
103
+
104
+ ~FunctionalStorageImpl() override = default;
105
+
106
+ private:
107
+ // NB: base_ should always point to a tensor BELOW the current
108
+ // functionalization layer. This is mainly to avoid reference cycles. e.g.
109
+ // given `b = a.view(...)` Both a.storage_ and b.storage_ are a
110
+ // FunctionStorageImpl containing an Walualias, with contains a Tensor
111
+ // `base_`. In this case (where a and b are FunctionalTensorWrapper's), base_
112
+ // should point not to a, but to a's unwrapped value, a.value_` See Note
113
+ // [Functionalization: Walualias Removal] for a diagram that shows this
114
+ // visually.
115
+ at::Tensor base_;
116
+ std::vector<Update> updates_;
117
+ // generation_ gets incremented every time a mutation is queued onto the
118
+ // alias. It is used to determine if a given tensor is "up to date", or if it
119
+ // needs to be regenerated from the alias.
120
+ size_t generation_ = 0;
121
+ // If frozen, no more mutations are allowed on this storage. Once frozen, a
122
+ // storage cannot be unfrozen.
123
+ bool frozen_ = false;
124
+ };
125
+
126
+ } // namespace at::functionalization
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Functions.h ADDED
@@ -0,0 +1,1427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Functions.h
4
+
5
+ #ifdef TORCH_ASSERT_NO_OPERATORS
6
+ #error This change adds a dependency on native_functions.yaml, \
7
+ meaning the file will need to be re-compiled every time an operator \
8
+ is changed or added. Consider if your change would be better placed in \
9
+ another file, or if a more specific header might achieve the same goal. \
10
+ See NOTE: [Tensor vs. TensorBase]
11
+ #endif
12
+
13
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14
+ #error This change adds a dependency on all pytorch operators, meaning the \
15
+ file will need to be re-compiled every time an operator is changed or added. \
16
+ Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
17
+ see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18
+ #endif
19
+
20
+ // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
21
+ //
22
+ // In ATen, certain generated headers files include the definitions of
23
+ // every single operator in PyTorch. Unfortunately this means every
24
+ // time an operator signature is updated or changed in
25
+ // native_functions.yaml, you (and every other PyTorch developer) need
26
+ // to recompile every source file that includes any of these headers.
27
+ //
28
+ // To break up these header dependencies, and improve incremental
29
+ // build times for all PyTorch developers. These headers are split
30
+ // into per-operator headers in the `ATen/ops` folder. This limits
31
+ // incremental builds to only changes to methods of `Tensor`, or files
32
+ // that use the specific operator being changed. With `at::sum` as an
33
+ // example, you should include
34
+ //
35
+ // <ATen/ops/sum.h> // instead of ATen/Functions.h
36
+ // <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
37
+ // <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
38
+ // <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
39
+ //
40
+ // However, even if you're careful to use this in your own code.
41
+ // `Functions.h` might be included indirectly through another header
42
+ // without you realising. To avoid this, you can add
43
+ //
44
+ // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
45
+ //
46
+ // to the top of your source file. This way any time the non-specific
47
+ // headers are included, the compiler will error out.
48
+ //
49
+ // Also, be aware that `ops` are not available in all build
50
+ // configurations (namely fb-internal) so you must guard these
51
+ // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
52
+ //
53
+ // #ifndef AT_PER_OPERATOR_HEADERS
54
+ // #include <ATen/Functions.h>
55
+ // #else
56
+ // #include <ATen/ops/sum.h>
57
+ // #endif
58
+
59
+ #include <ATen/Context.h>
60
+ #include <ATen/DeviceGuard.h>
61
+ #include <ATen/TensorUtils.h>
62
+ #include <ATen/TracerMode.h>
63
+ #include <ATen/core/Generator.h>
64
+ #include <ATen/core/Reduction.h>
65
+ #include <c10/core/SymInt.h>
66
+ #include <ATen/core/Tensor.h>
67
+ #include <c10/core/Scalar.h>
68
+ #include <c10/core/Storage.h>
69
+ #include <c10/core/TensorOptions.h>
70
+ #include <c10/util/Deprecated.h>
71
+ #include <c10/util/Optional.h>
72
+ #include <c10/util/OptionalArrayRef.h>
73
+
74
+ #include <ATen/ops/from_blob.h>
75
+ #include <ATen/ops/tensor.h>
76
+
77
+ #include <ATen/ops/_adaptive_avg_pool2d.h>
78
+ #include <ATen/ops/_adaptive_avg_pool2d_backward.h>
79
+ #include <ATen/ops/_adaptive_avg_pool3d.h>
80
+ #include <ATen/ops/_adaptive_avg_pool3d_backward.h>
81
+ #include <ATen/ops/_add_batch_dim.h>
82
+ #include <ATen/ops/_add_relu.h>
83
+ #include <ATen/ops/_addmm_activation.h>
84
+ #include <ATen/ops/_aminmax.h>
85
+ #include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
86
+ #include <ATen/ops/_amp_update_scale.h>
87
+ #include <ATen/ops/_assert_async.h>
88
+ #include <ATen/ops/_assert_scalar.h>
89
+ #include <ATen/ops/_assert_tensor_metadata.h>
90
+ #include <ATen/ops/_autocast_to_full_precision.h>
91
+ #include <ATen/ops/_autocast_to_reduced_precision.h>
92
+ #include <ATen/ops/_backward.h>
93
+ #include <ATen/ops/_batch_norm_impl_index.h>
94
+ #include <ATen/ops/_batch_norm_impl_index_backward.h>
95
+ #include <ATen/ops/_cast_Byte.h>
96
+ #include <ATen/ops/_cast_Char.h>
97
+ #include <ATen/ops/_cast_Double.h>
98
+ #include <ATen/ops/_cast_Float.h>
99
+ #include <ATen/ops/_cast_Half.h>
100
+ #include <ATen/ops/_cast_Int.h>
101
+ #include <ATen/ops/_cast_Long.h>
102
+ #include <ATen/ops/_cast_Short.h>
103
+ #include <ATen/ops/_cdist_backward.h>
104
+ #include <ATen/ops/_cdist_forward.h>
105
+ #include <ATen/ops/_cholesky_solve_helper.h>
106
+ #include <ATen/ops/_choose_qparams_per_tensor.h>
107
+ #include <ATen/ops/_chunk_cat.h>
108
+ #include <ATen/ops/_coalesce.h>
109
+ #include <ATen/ops/_coalesced.h>
110
+ #include <ATen/ops/_compute_linear_combination.h>
111
+ #include <ATen/ops/_conj.h>
112
+ #include <ATen/ops/_conj_copy.h>
113
+ #include <ATen/ops/_conj_physical.h>
114
+ #include <ATen/ops/_conv_depthwise2d.h>
115
+ #include <ATen/ops/_convert_indices_from_coo_to_csr.h>
116
+ #include <ATen/ops/_convert_indices_from_csr_to_coo.h>
117
+ #include <ATen/ops/_convert_weight_to_int4pack.h>
118
+ #include <ATen/ops/_convolution.h>
119
+ #include <ATen/ops/_convolution_double_backward.h>
120
+ #include <ATen/ops/_convolution_mode.h>
121
+ #include <ATen/ops/_copy_from.h>
122
+ #include <ATen/ops/_copy_from_and_resize.h>
123
+ #include <ATen/ops/_cslt_compress.h>
124
+ #include <ATen/ops/_cslt_sparse_mm.h>
125
+ #include <ATen/ops/_cslt_sparse_mm_search.h>
126
+ #include <ATen/ops/_ctc_loss.h>
127
+ #include <ATen/ops/_ctc_loss_backward.h>
128
+ #include <ATen/ops/_cudnn_ctc_loss.h>
129
+ #include <ATen/ops/_cudnn_init_dropout_state.h>
130
+ #include <ATen/ops/_cudnn_rnn.h>
131
+ #include <ATen/ops/_cudnn_rnn_backward.h>
132
+ #include <ATen/ops/_cudnn_rnn_flatten_weight.h>
133
+ #include <ATen/ops/_cufft_clear_plan_cache.h>
134
+ #include <ATen/ops/_cufft_get_plan_cache_max_size.h>
135
+ #include <ATen/ops/_cufft_get_plan_cache_size.h>
136
+ #include <ATen/ops/_cufft_set_plan_cache_max_size.h>
137
+ #include <ATen/ops/_cummax_helper.h>
138
+ #include <ATen/ops/_cummin_helper.h>
139
+ #include <ATen/ops/_debug_has_internal_overlap.h>
140
+ #include <ATen/ops/_dimI.h>
141
+ #include <ATen/ops/_dimV.h>
142
+ #include <ATen/ops/_dim_arange.h>
143
+ #include <ATen/ops/_dirichlet_grad.h>
144
+ #include <ATen/ops/_efficient_attention_backward.h>
145
+ #include <ATen/ops/_efficient_attention_forward.h>
146
+ #include <ATen/ops/_efficientzerotensor.h>
147
+ #include <ATen/ops/_embedding_bag.h>
148
+ #include <ATen/ops/_embedding_bag_backward.h>
149
+ #include <ATen/ops/_embedding_bag_dense_backward.h>
150
+ #include <ATen/ops/_embedding_bag_forward_only.h>
151
+ #include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
152
+ #include <ATen/ops/_embedding_bag_sparse_backward.h>
153
+ #include <ATen/ops/_empty_affine_quantized.h>
154
+ #include <ATen/ops/_empty_per_channel_affine_quantized.h>
155
+ #include <ATen/ops/_euclidean_dist.h>
156
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
157
+ #include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h>
158
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
159
+ #include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h>
160
+ #include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
161
+ #include <ATen/ops/_fft_c2c.h>
162
+ #include <ATen/ops/_fft_c2r.h>
163
+ #include <ATen/ops/_fft_r2c.h>
164
+ #include <ATen/ops/_fill_mem_eff_dropout_mask.h>
165
+ #include <ATen/ops/_flash_attention_backward.h>
166
+ #include <ATen/ops/_flash_attention_forward.h>
167
+ #include <ATen/ops/_foobar.h>
168
+ #include <ATen/ops/_foreach_abs.h>
169
+ #include <ATen/ops/_foreach_acos.h>
170
+ #include <ATen/ops/_foreach_add.h>
171
+ #include <ATen/ops/_foreach_addcdiv.h>
172
+ #include <ATen/ops/_foreach_addcmul.h>
173
+ #include <ATen/ops/_foreach_asin.h>
174
+ #include <ATen/ops/_foreach_atan.h>
175
+ #include <ATen/ops/_foreach_ceil.h>
176
+ #include <ATen/ops/_foreach_clamp_max.h>
177
+ #include <ATen/ops/_foreach_clamp_min.h>
178
+ #include <ATen/ops/_foreach_copy.h>
179
+ #include <ATen/ops/_foreach_cos.h>
180
+ #include <ATen/ops/_foreach_cosh.h>
181
+ #include <ATen/ops/_foreach_div.h>
182
+ #include <ATen/ops/_foreach_erf.h>
183
+ #include <ATen/ops/_foreach_erfc.h>
184
+ #include <ATen/ops/_foreach_exp.h>
185
+ #include <ATen/ops/_foreach_expm1.h>
186
+ #include <ATen/ops/_foreach_floor.h>
187
+ #include <ATen/ops/_foreach_frac.h>
188
+ #include <ATen/ops/_foreach_lerp.h>
189
+ #include <ATen/ops/_foreach_lgamma.h>
190
+ #include <ATen/ops/_foreach_log.h>
191
+ #include <ATen/ops/_foreach_log10.h>
192
+ #include <ATen/ops/_foreach_log1p.h>
193
+ #include <ATen/ops/_foreach_log2.h>
194
+ #include <ATen/ops/_foreach_maximum.h>
195
+ #include <ATen/ops/_foreach_minimum.h>
196
+ #include <ATen/ops/_foreach_mul.h>
197
+ #include <ATen/ops/_foreach_neg.h>
198
+ #include <ATen/ops/_foreach_norm.h>
199
+ #include <ATen/ops/_foreach_pow.h>
200
+ #include <ATen/ops/_foreach_reciprocal.h>
201
+ #include <ATen/ops/_foreach_round.h>
202
+ #include <ATen/ops/_foreach_sigmoid.h>
203
+ #include <ATen/ops/_foreach_sign.h>
204
+ #include <ATen/ops/_foreach_sin.h>
205
+ #include <ATen/ops/_foreach_sinh.h>
206
+ #include <ATen/ops/_foreach_sqrt.h>
207
+ #include <ATen/ops/_foreach_sub.h>
208
+ #include <ATen/ops/_foreach_tan.h>
209
+ #include <ATen/ops/_foreach_tanh.h>
210
+ #include <ATen/ops/_foreach_trunc.h>
211
+ #include <ATen/ops/_foreach_zero.h>
212
+ #include <ATen/ops/_functional_assert_async.h>
213
+ #include <ATen/ops/_functional_assert_scalar.h>
214
+ #include <ATen/ops/_functional_sym_constrain_range.h>
215
+ #include <ATen/ops/_functional_sym_constrain_range_for_size.h>
216
+ #include <ATen/ops/_fused_adam.h>
217
+ #include <ATen/ops/_fused_adamw.h>
218
+ #include <ATen/ops/_fused_dropout.h>
219
+ #include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
220
+ #include <ATen/ops/_fused_sdp_choice.h>
221
+ #include <ATen/ops/_fused_sgd.h>
222
+ #include <ATen/ops/_fw_primal.h>
223
+ #include <ATen/ops/_fw_primal_copy.h>
224
+ #include <ATen/ops/_gather_sparse_backward.h>
225
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
226
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h>
227
+ #include <ATen/ops/_has_compatible_shallow_copy_type.h>
228
+ #include <ATen/ops/_has_same_storage_numel.h>
229
+ #include <ATen/ops/_histogramdd_bin_edges.h>
230
+ #include <ATen/ops/_histogramdd_from_bin_cts.h>
231
+ #include <ATen/ops/_histogramdd_from_bin_tensors.h>
232
+ #include <ATen/ops/_index_put_impl.h>
233
+ #include <ATen/ops/_indices.h>
234
+ #include <ATen/ops/_indices_copy.h>
235
+ #include <ATen/ops/_int_mm.h>
236
+ #include <ATen/ops/_is_all_true.h>
237
+ #include <ATen/ops/_is_any_true.h>
238
+ #include <ATen/ops/_is_zerotensor.h>
239
+ #include <ATen/ops/_lazy_clone.h>
240
+ #include <ATen/ops/_linalg_check_errors.h>
241
+ #include <ATen/ops/_linalg_det.h>
242
+ #include <ATen/ops/_linalg_eigh.h>
243
+ #include <ATen/ops/_linalg_eigvals.h>
244
+ #include <ATen/ops/_linalg_slogdet.h>
245
+ #include <ATen/ops/_linalg_solve_ex.h>
246
+ #include <ATen/ops/_linalg_svd.h>
247
+ #include <ATen/ops/_local_scalar_dense.h>
248
+ #include <ATen/ops/_log_softmax.h>
249
+ #include <ATen/ops/_log_softmax_backward_data.h>
250
+ #include <ATen/ops/_logcumsumexp.h>
251
+ #include <ATen/ops/_lstm_mps.h>
252
+ #include <ATen/ops/_lu_with_info.h>
253
+ #include <ATen/ops/_make_dep_token.h>
254
+ #include <ATen/ops/_make_dual.h>
255
+ #include <ATen/ops/_make_dual_copy.h>
256
+ #include <ATen/ops/_make_per_channel_quantized_tensor.h>
257
+ #include <ATen/ops/_make_per_tensor_quantized_tensor.h>
258
+ #include <ATen/ops/_masked_scale.h>
259
+ #include <ATen/ops/_masked_softmax.h>
260
+ #include <ATen/ops/_masked_softmax_backward.h>
261
+ #include <ATen/ops/_mixed_dtypes_linear.h>
262
+ #include <ATen/ops/_mkldnn_reshape.h>
263
+ #include <ATen/ops/_mkldnn_transpose.h>
264
+ #include <ATen/ops/_mps_convolution.h>
265
+ #include <ATen/ops/_mps_convolution_transpose.h>
266
+ #include <ATen/ops/_native_batch_norm_legit.h>
267
+ #include <ATen/ops/_native_batch_norm_legit_no_training.h>
268
+ #include <ATen/ops/_native_multi_head_attention.h>
269
+ #include <ATen/ops/_neg_view.h>
270
+ #include <ATen/ops/_neg_view_copy.h>
271
+ #include <ATen/ops/_nested_from_padded.h>
272
+ #include <ATen/ops/_nested_from_padded_and_nested_example.h>
273
+ #include <ATen/ops/_nested_get_jagged_dummy.h>
274
+ #include <ATen/ops/_nested_get_lengths.h>
275
+ #include <ATen/ops/_nested_get_offsets.h>
276
+ #include <ATen/ops/_nested_get_ragged_idx.h>
277
+ #include <ATen/ops/_nested_get_values.h>
278
+ #include <ATen/ops/_nested_get_values_copy.h>
279
+ #include <ATen/ops/_nested_select_backward.h>
280
+ #include <ATen/ops/_nested_sum_backward.h>
281
+ #include <ATen/ops/_nested_tensor_from_mask.h>
282
+ #include <ATen/ops/_nested_tensor_from_mask_left_aligned.h>
283
+ #include <ATen/ops/_nested_tensor_from_tensor_list.h>
284
+ #include <ATen/ops/_nested_tensor_size.h>
285
+ #include <ATen/ops/_nested_tensor_softmax_with_shape.h>
286
+ #include <ATen/ops/_nested_tensor_storage_offsets.h>
287
+ #include <ATen/ops/_nested_tensor_strides.h>
288
+ #include <ATen/ops/_nested_view_from_buffer.h>
289
+ #include <ATen/ops/_nested_view_from_buffer_copy.h>
290
+ #include <ATen/ops/_nested_view_from_jagged.h>
291
+ #include <ATen/ops/_nested_view_from_jagged_copy.h>
292
+ #include <ATen/ops/_new_zeros_with_same_feature_meta.h>
293
+ #include <ATen/ops/_nnpack_available.h>
294
+ #include <ATen/ops/_nnpack_spatial_convolution.h>
295
+ #include <ATen/ops/_nnz.h>
296
+ #include <ATen/ops/_pack_padded_sequence.h>
297
+ #include <ATen/ops/_pack_padded_sequence_backward.h>
298
+ #include <ATen/ops/_pad_circular.h>
299
+ #include <ATen/ops/_pad_enum.h>
300
+ #include <ATen/ops/_pad_packed_sequence.h>
301
+ #include <ATen/ops/_pdist_backward.h>
302
+ #include <ATen/ops/_pdist_forward.h>
303
+ #include <ATen/ops/_pin_memory.h>
304
+ #include <ATen/ops/_prelu_kernel.h>
305
+ #include <ATen/ops/_prelu_kernel_backward.h>
306
+ #include <ATen/ops/_print.h>
307
+ #include <ATen/ops/_propagate_xla_data.h>
308
+ #include <ATen/ops/_remove_batch_dim.h>
309
+ #include <ATen/ops/_reshape_alias.h>
310
+ #include <ATen/ops/_reshape_alias_copy.h>
311
+ #include <ATen/ops/_reshape_copy.h>
312
+ #include <ATen/ops/_reshape_from_tensor.h>
313
+ #include <ATen/ops/_resize_output.h>
314
+ #include <ATen/ops/_rowwise_prune.h>
315
+ #include <ATen/ops/_sample_dirichlet.h>
316
+ #include <ATen/ops/_saturate_weight_to_fp16.h>
317
+ #include <ATen/ops/_scaled_dot_product_attention_math.h>
318
+ #include <ATen/ops/_scaled_dot_product_cudnn_attention.h>
319
+ #include <ATen/ops/_scaled_dot_product_efficient_attention.h>
320
+ #include <ATen/ops/_scaled_dot_product_efficient_attention_backward.h>
321
+ #include <ATen/ops/_scaled_dot_product_flash_attention.h>
322
+ #include <ATen/ops/_scaled_dot_product_flash_attention_backward.h>
323
+ #include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu.h>
324
+ #include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward.h>
325
+ #include <ATen/ops/_scaled_mm.h>
326
+ #include <ATen/ops/_segment_reduce_backward.h>
327
+ #include <ATen/ops/_shape_as_tensor.h>
328
+ #include <ATen/ops/_slow_conv2d_backward.h>
329
+ #include <ATen/ops/_slow_conv2d_forward.h>
330
+ #include <ATen/ops/_sobol_engine_draw.h>
331
+ #include <ATen/ops/_sobol_engine_ff.h>
332
+ #include <ATen/ops/_sobol_engine_initialize_state.h>
333
+ #include <ATen/ops/_sobol_engine_scramble.h>
334
+ #include <ATen/ops/_softmax.h>
335
+ #include <ATen/ops/_softmax_backward_data.h>
336
+ #include <ATen/ops/_sparse_addmm.h>
337
+ #include <ATen/ops/_sparse_broadcast_to.h>
338
+ #include <ATen/ops/_sparse_broadcast_to_copy.h>
339
+ #include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
340
+ #include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
341
+ #include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
342
+ #include <ATen/ops/_sparse_coo_tensor_unsafe.h>
343
+ #include <ATen/ops/_sparse_coo_tensor_with_dims.h>
344
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
345
+ #include <ATen/ops/_sparse_csc_tensor_unsafe.h>
346
+ #include <ATen/ops/_sparse_csr_prod.h>
347
+ #include <ATen/ops/_sparse_csr_sum.h>
348
+ #include <ATen/ops/_sparse_csr_tensor_unsafe.h>
349
+ #include <ATen/ops/_sparse_log_softmax.h>
350
+ #include <ATen/ops/_sparse_log_softmax_backward_data.h>
351
+ #include <ATen/ops/_sparse_mask_projection.h>
352
+ #include <ATen/ops/_sparse_mm.h>
353
+ #include <ATen/ops/_sparse_mm_reduce_impl.h>
354
+ #include <ATen/ops/_sparse_mm_reduce_impl_backward.h>
355
+ #include <ATen/ops/_sparse_semi_structured_linear.h>
356
+ #include <ATen/ops/_sparse_softmax.h>
357
+ #include <ATen/ops/_sparse_softmax_backward_data.h>
358
+ #include <ATen/ops/_sparse_sparse_matmul.h>
359
+ #include <ATen/ops/_sparse_sum.h>
360
+ #include <ATen/ops/_sparse_sum_backward.h>
361
+ #include <ATen/ops/_spdiags.h>
362
+ #include <ATen/ops/_stack.h>
363
+ #include <ATen/ops/_standard_gamma.h>
364
+ #include <ATen/ops/_standard_gamma_grad.h>
365
+ #include <ATen/ops/_test_ambiguous_defaults.h>
366
+ #include <ATen/ops/_test_autograd_multiple_dispatch.h>
367
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view.h>
368
+ #include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
369
+ #include <ATen/ops/_test_check_tensor.h>
370
+ #include <ATen/ops/_test_functorch_fallback.h>
371
+ #include <ATen/ops/_test_optional_filled_intlist.h>
372
+ #include <ATen/ops/_test_optional_floatlist.h>
373
+ #include <ATen/ops/_test_optional_intlist.h>
374
+ #include <ATen/ops/_test_parallel_materialize.h>
375
+ #include <ATen/ops/_test_serialization_subcmul.h>
376
+ #include <ATen/ops/_test_string_default.h>
377
+ #include <ATen/ops/_test_warn_in_autograd.h>
378
+ #include <ATen/ops/_thnn_differentiable_gru_cell_backward.h>
379
+ #include <ATen/ops/_thnn_differentiable_lstm_cell_backward.h>
380
+ #include <ATen/ops/_thnn_fused_gru_cell.h>
381
+ #include <ATen/ops/_thnn_fused_gru_cell_backward.h>
382
+ #include <ATen/ops/_thnn_fused_lstm_cell.h>
383
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward.h>
384
+ #include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
385
+ #include <ATen/ops/_to_copy.h>
386
+ #include <ATen/ops/_to_cpu.h>
387
+ #include <ATen/ops/_to_dense.h>
388
+ #include <ATen/ops/_to_sparse.h>
389
+ #include <ATen/ops/_to_sparse_bsc.h>
390
+ #include <ATen/ops/_to_sparse_bsr.h>
391
+ #include <ATen/ops/_to_sparse_csc.h>
392
+ #include <ATen/ops/_to_sparse_csr.h>
393
+ #include <ATen/ops/_to_sparse_semi_structured.h>
394
+ #include <ATen/ops/_transform_bias_rescale_qkv.h>
395
+ #include <ATen/ops/_transformer_encoder_layer_fwd.h>
396
+ #include <ATen/ops/_trilinear.h>
397
+ #include <ATen/ops/_triton_multi_head_attention.h>
398
+ #include <ATen/ops/_triton_scaled_dot_attention.h>
399
+ #include <ATen/ops/_unique.h>
400
+ #include <ATen/ops/_unique2.h>
401
+ #include <ATen/ops/_unpack_dual.h>
402
+ #include <ATen/ops/_unsafe_index.h>
403
+ #include <ATen/ops/_unsafe_index_put.h>
404
+ #include <ATen/ops/_unsafe_view.h>
405
+ #include <ATen/ops/_upsample_bicubic2d_aa.h>
406
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
407
+ #include <ATen/ops/_upsample_bilinear2d_aa.h>
408
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
409
+ #include <ATen/ops/_upsample_nearest_exact1d.h>
410
+ #include <ATen/ops/_upsample_nearest_exact1d_backward.h>
411
+ #include <ATen/ops/_upsample_nearest_exact2d.h>
412
+ #include <ATen/ops/_upsample_nearest_exact2d_backward.h>
413
+ #include <ATen/ops/_upsample_nearest_exact3d.h>
414
+ #include <ATen/ops/_upsample_nearest_exact3d_backward.h>
415
+ #include <ATen/ops/_use_cudnn_ctc_loss.h>
416
+ #include <ATen/ops/_use_cudnn_rnn_flatten_weight.h>
417
+ #include <ATen/ops/_validate_compressed_sparse_indices.h>
418
+ #include <ATen/ops/_validate_sparse_bsc_tensor_args.h>
419
+ #include <ATen/ops/_validate_sparse_bsr_tensor_args.h>
420
+ #include <ATen/ops/_validate_sparse_compressed_tensor_args.h>
421
+ #include <ATen/ops/_validate_sparse_coo_tensor_args.h>
422
+ #include <ATen/ops/_validate_sparse_csc_tensor_args.h>
423
+ #include <ATen/ops/_validate_sparse_csr_tensor_args.h>
424
+ #include <ATen/ops/_values.h>
425
+ #include <ATen/ops/_values_copy.h>
426
+ #include <ATen/ops/_version.h>
427
+ #include <ATen/ops/_weight_int4pack_mm.h>
428
+ #include <ATen/ops/_weight_int8pack_mm.h>
429
+ #include <ATen/ops/_weight_norm.h>
430
+ #include <ATen/ops/_weight_norm_differentiable_backward.h>
431
+ #include <ATen/ops/_weight_norm_interface.h>
432
+ #include <ATen/ops/_weight_norm_interface_backward.h>
433
+ #include <ATen/ops/abs.h>
434
+ #include <ATen/ops/absolute.h>
435
+ #include <ATen/ops/acos.h>
436
+ #include <ATen/ops/acosh.h>
437
+ #include <ATen/ops/adaptive_avg_pool1d.h>
438
+ #include <ATen/ops/adaptive_avg_pool2d.h>
439
+ #include <ATen/ops/adaptive_avg_pool3d.h>
440
+ #include <ATen/ops/adaptive_avg_pool3d_backward.h>
441
+ #include <ATen/ops/adaptive_max_pool1d.h>
442
+ #include <ATen/ops/adaptive_max_pool2d.h>
443
+ #include <ATen/ops/adaptive_max_pool2d_backward.h>
444
+ #include <ATen/ops/adaptive_max_pool3d.h>
445
+ #include <ATen/ops/adaptive_max_pool3d_backward.h>
446
+ #include <ATen/ops/add.h>
447
+ #include <ATen/ops/addbmm.h>
448
+ #include <ATen/ops/addcdiv.h>
449
+ #include <ATen/ops/addcmul.h>
450
+ #include <ATen/ops/addmm.h>
451
+ #include <ATen/ops/addmv.h>
452
+ #include <ATen/ops/addr.h>
453
+ #include <ATen/ops/adjoint.h>
454
+ #include <ATen/ops/affine_grid_generator.h>
455
+ #include <ATen/ops/affine_grid_generator_backward.h>
456
+ #include <ATen/ops/alias.h>
457
+ #include <ATen/ops/alias_copy.h>
458
+ #include <ATen/ops/align_as.h>
459
+ #include <ATen/ops/align_tensors.h>
460
+ #include <ATen/ops/align_to.h>
461
+ #include <ATen/ops/all.h>
462
+ #include <ATen/ops/allclose.h>
463
+ #include <ATen/ops/alpha_dropout.h>
464
+ #include <ATen/ops/amax.h>
465
+ #include <ATen/ops/amin.h>
466
+ #include <ATen/ops/aminmax.h>
467
+ #include <ATen/ops/and.h>
468
+ #include <ATen/ops/angle.h>
469
+ #include <ATen/ops/any.h>
470
+ #include <ATen/ops/arange.h>
471
+ #include <ATen/ops/arccos.h>
472
+ #include <ATen/ops/arccosh.h>
473
+ #include <ATen/ops/arcsin.h>
474
+ #include <ATen/ops/arcsinh.h>
475
+ #include <ATen/ops/arctan.h>
476
+ #include <ATen/ops/arctan2.h>
477
+ #include <ATen/ops/arctanh.h>
478
+ #include <ATen/ops/argmax.h>
479
+ #include <ATen/ops/argmin.h>
480
+ #include <ATen/ops/argsort.h>
481
+ #include <ATen/ops/argwhere.h>
482
+ #include <ATen/ops/as_strided.h>
483
+ #include <ATen/ops/as_strided_copy.h>
484
+ #include <ATen/ops/as_strided_scatter.h>
485
+ #include <ATen/ops/asin.h>
486
+ #include <ATen/ops/asinh.h>
487
+ #include <ATen/ops/atan.h>
488
+ #include <ATen/ops/atan2.h>
489
+ #include <ATen/ops/atanh.h>
490
+ #include <ATen/ops/atleast_1d.h>
491
+ #include <ATen/ops/atleast_2d.h>
492
+ #include <ATen/ops/atleast_3d.h>
493
+ #include <ATen/ops/avg_pool1d.h>
494
+ #include <ATen/ops/avg_pool2d.h>
495
+ #include <ATen/ops/avg_pool2d_backward.h>
496
+ #include <ATen/ops/avg_pool3d.h>
497
+ #include <ATen/ops/avg_pool3d_backward.h>
498
+ #include <ATen/ops/baddbmm.h>
499
+ #include <ATen/ops/bartlett_window.h>
500
+ #include <ATen/ops/batch_norm.h>
501
+ #include <ATen/ops/batch_norm_backward_elemt.h>
502
+ #include <ATen/ops/batch_norm_backward_reduce.h>
503
+ #include <ATen/ops/batch_norm_elemt.h>
504
+ #include <ATen/ops/batch_norm_gather_stats.h>
505
+ #include <ATen/ops/batch_norm_gather_stats_with_counts.h>
506
+ #include <ATen/ops/batch_norm_stats.h>
507
+ #include <ATen/ops/batch_norm_update_stats.h>
508
+ #include <ATen/ops/bernoulli.h>
509
+ #include <ATen/ops/bilinear.h>
510
+ #include <ATen/ops/binary_cross_entropy.h>
511
+ #include <ATen/ops/binary_cross_entropy_backward.h>
512
+ #include <ATen/ops/binary_cross_entropy_with_logits.h>
513
+ #include <ATen/ops/bincount.h>
514
+ #include <ATen/ops/binomial.h>
515
+ #include <ATen/ops/bitwise_and.h>
516
+ #include <ATen/ops/bitwise_left_shift.h>
517
+ #include <ATen/ops/bitwise_not.h>
518
+ #include <ATen/ops/bitwise_or.h>
519
+ #include <ATen/ops/bitwise_right_shift.h>
520
+ #include <ATen/ops/bitwise_xor.h>
521
+ #include <ATen/ops/blackman_window.h>
522
+ #include <ATen/ops/block_diag.h>
523
+ #include <ATen/ops/bmm.h>
524
+ #include <ATen/ops/broadcast_tensors.h>
525
+ #include <ATen/ops/broadcast_to.h>
526
+ #include <ATen/ops/bucketize.h>
527
+ #include <ATen/ops/can_cast.h>
528
+ #include <ATen/ops/cartesian_prod.h>
529
+ #include <ATen/ops/cat.h>
530
+ #include <ATen/ops/cauchy.h>
531
+ #include <ATen/ops/ccol_indices.h>
532
+ #include <ATen/ops/ccol_indices_copy.h>
533
+ #include <ATen/ops/cdist.h>
534
+ #include <ATen/ops/ceil.h>
535
+ #include <ATen/ops/celu.h>
536
+ #include <ATen/ops/chain_matmul.h>
537
+ #include <ATen/ops/chalf.h>
538
+ #include <ATen/ops/channel_shuffle.h>
539
+ #include <ATen/ops/cholesky.h>
540
+ #include <ATen/ops/cholesky_inverse.h>
541
+ #include <ATen/ops/cholesky_solve.h>
542
+ #include <ATen/ops/choose_qparams_optimized.h>
543
+ #include <ATen/ops/chunk.h>
544
+ #include <ATen/ops/clamp.h>
545
+ #include <ATen/ops/clamp_max.h>
546
+ #include <ATen/ops/clamp_min.h>
547
+ #include <ATen/ops/clip.h>
548
+ #include <ATen/ops/clone.h>
549
+ #include <ATen/ops/coalesce.h>
550
+ #include <ATen/ops/col2im.h>
551
+ #include <ATen/ops/col_indices.h>
552
+ #include <ATen/ops/col_indices_copy.h>
553
+ #include <ATen/ops/column_stack.h>
554
+ #include <ATen/ops/combinations.h>
555
+ #include <ATen/ops/complex.h>
556
+ #include <ATen/ops/concat.h>
557
+ #include <ATen/ops/concatenate.h>
558
+ #include <ATen/ops/conj.h>
559
+ #include <ATen/ops/conj_physical.h>
560
+ #include <ATen/ops/constant_pad_nd.h>
561
+ #include <ATen/ops/contiguous.h>
562
+ #include <ATen/ops/conv1d.h>
563
+ #include <ATen/ops/conv2d.h>
564
+ #include <ATen/ops/conv3d.h>
565
+ #include <ATen/ops/conv_depthwise3d.h>
566
+ #include <ATen/ops/conv_tbc.h>
567
+ #include <ATen/ops/conv_tbc_backward.h>
568
+ #include <ATen/ops/conv_transpose1d.h>
569
+ #include <ATen/ops/conv_transpose2d.h>
570
+ #include <ATen/ops/conv_transpose3d.h>
571
+ #include <ATen/ops/convolution.h>
572
+ #include <ATen/ops/convolution_backward.h>
573
+ #include <ATen/ops/convolution_backward_overrideable.h>
574
+ #include <ATen/ops/convolution_overrideable.h>
575
+ #include <ATen/ops/copy.h>
576
+ #include <ATen/ops/copy_sparse_to_sparse.h>
577
+ #include <ATen/ops/copysign.h>
578
+ #include <ATen/ops/corrcoef.h>
579
+ #include <ATen/ops/cos.h>
580
+ #include <ATen/ops/cosh.h>
581
+ #include <ATen/ops/cosine_embedding_loss.h>
582
+ #include <ATen/ops/cosine_similarity.h>
583
+ #include <ATen/ops/count_nonzero.h>
584
+ #include <ATen/ops/cov.h>
585
+ #include <ATen/ops/cross.h>
586
+ #include <ATen/ops/cross_entropy_loss.h>
587
+ #include <ATen/ops/crow_indices.h>
588
+ #include <ATen/ops/crow_indices_copy.h>
589
+ #include <ATen/ops/ctc_loss.h>
590
+ #include <ATen/ops/cudnn_affine_grid_generator.h>
591
+ #include <ATen/ops/cudnn_affine_grid_generator_backward.h>
592
+ #include <ATen/ops/cudnn_batch_norm.h>
593
+ #include <ATen/ops/cudnn_batch_norm_backward.h>
594
+ #include <ATen/ops/cudnn_convolution.h>
595
+ #include <ATen/ops/cudnn_convolution_add_relu.h>
596
+ #include <ATen/ops/cudnn_convolution_relu.h>
597
+ #include <ATen/ops/cudnn_convolution_transpose.h>
598
+ #include <ATen/ops/cudnn_grid_sampler.h>
599
+ #include <ATen/ops/cudnn_grid_sampler_backward.h>
600
+ #include <ATen/ops/cudnn_is_acceptable.h>
601
+ #include <ATen/ops/cummax.h>
602
+ #include <ATen/ops/cummaxmin_backward.h>
603
+ #include <ATen/ops/cummin.h>
604
+ #include <ATen/ops/cumprod.h>
605
+ #include <ATen/ops/cumprod_backward.h>
606
+ #include <ATen/ops/cumsum.h>
607
+ #include <ATen/ops/cumulative_trapezoid.h>
608
+ #include <ATen/ops/data.h>
609
+ #include <ATen/ops/deg2rad.h>
610
+ #include <ATen/ops/dense_dim.h>
611
+ #include <ATen/ops/dequantize.h>
612
+ #include <ATen/ops/det.h>
613
+ #include <ATen/ops/detach.h>
614
+ #include <ATen/ops/detach_copy.h>
615
+ #include <ATen/ops/diag.h>
616
+ #include <ATen/ops/diag_embed.h>
617
+ #include <ATen/ops/diagflat.h>
618
+ #include <ATen/ops/diagonal.h>
619
+ #include <ATen/ops/diagonal_backward.h>
620
+ #include <ATen/ops/diagonal_copy.h>
621
+ #include <ATen/ops/diagonal_scatter.h>
622
+ #include <ATen/ops/diff.h>
623
+ #include <ATen/ops/digamma.h>
624
+ #include <ATen/ops/dist.h>
625
+ #include <ATen/ops/div.h>
626
+ #include <ATen/ops/divide.h>
627
+ #include <ATen/ops/dot.h>
628
+ #include <ATen/ops/dropout.h>
629
+ #include <ATen/ops/dsplit.h>
630
+ #include <ATen/ops/dstack.h>
631
+ #include <ATen/ops/einsum.h>
632
+ #include <ATen/ops/elu.h>
633
+ #include <ATen/ops/elu_backward.h>
634
+ #include <ATen/ops/embedding.h>
635
+ #include <ATen/ops/embedding_backward.h>
636
+ #include <ATen/ops/embedding_bag.h>
637
+ #include <ATen/ops/embedding_dense_backward.h>
638
+ #include <ATen/ops/embedding_renorm.h>
639
+ #include <ATen/ops/embedding_sparse_backward.h>
640
+ #include <ATen/ops/empty.h>
641
+ #include <ATen/ops/empty_like.h>
642
+ #include <ATen/ops/empty_permuted.h>
643
+ #include <ATen/ops/empty_quantized.h>
644
+ #include <ATen/ops/empty_strided.h>
645
+ #include <ATen/ops/eq.h>
646
+ #include <ATen/ops/equal.h>
647
+ #include <ATen/ops/erf.h>
648
+ #include <ATen/ops/erfc.h>
649
+ #include <ATen/ops/erfinv.h>
650
+ #include <ATen/ops/exp.h>
651
+ #include <ATen/ops/exp2.h>
652
+ #include <ATen/ops/expand.h>
653
+ #include <ATen/ops/expand_as.h>
654
+ #include <ATen/ops/expand_copy.h>
655
+ #include <ATen/ops/expm1.h>
656
+ #include <ATen/ops/exponential.h>
657
+ #include <ATen/ops/eye.h>
658
+ #include <ATen/ops/fake_quantize_per_channel_affine.h>
659
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
660
+ #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h>
661
+ #include <ATen/ops/fake_quantize_per_tensor_affine.h>
662
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
663
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h>
664
+ #include <ATen/ops/fbgemm_linear_fp16_weight.h>
665
+ #include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h>
666
+ #include <ATen/ops/fbgemm_linear_int8_weight.h>
667
+ #include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h>
668
+ #include <ATen/ops/fbgemm_linear_quantize_weight.h>
669
+ #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16.h>
670
+ #include <ATen/ops/fbgemm_pack_quantized_matrix.h>
671
+ #include <ATen/ops/feature_alpha_dropout.h>
672
+ #include <ATen/ops/feature_dropout.h>
673
+ #include <ATen/ops/fft_fft.h>
674
+ #include <ATen/ops/fft_fft2.h>
675
+ #include <ATen/ops/fft_fftfreq.h>
676
+ #include <ATen/ops/fft_fftn.h>
677
+ #include <ATen/ops/fft_fftshift.h>
678
+ #include <ATen/ops/fft_hfft.h>
679
+ #include <ATen/ops/fft_hfft2.h>
680
+ #include <ATen/ops/fft_hfftn.h>
681
+ #include <ATen/ops/fft_ifft.h>
682
+ #include <ATen/ops/fft_ifft2.h>
683
+ #include <ATen/ops/fft_ifftn.h>
684
+ #include <ATen/ops/fft_ifftshift.h>
685
+ #include <ATen/ops/fft_ihfft.h>
686
+ #include <ATen/ops/fft_ihfft2.h>
687
+ #include <ATen/ops/fft_ihfftn.h>
688
+ #include <ATen/ops/fft_irfft.h>
689
+ #include <ATen/ops/fft_irfft2.h>
690
+ #include <ATen/ops/fft_irfftn.h>
691
+ #include <ATen/ops/fft_rfft.h>
692
+ #include <ATen/ops/fft_rfft2.h>
693
+ #include <ATen/ops/fft_rfftfreq.h>
694
+ #include <ATen/ops/fft_rfftn.h>
695
+ #include <ATen/ops/fill.h>
696
+ #include <ATen/ops/fill_diagonal.h>
697
+ #include <ATen/ops/fix.h>
698
+ #include <ATen/ops/flatten.h>
699
+ #include <ATen/ops/flatten_dense_tensors.h>
700
+ #include <ATen/ops/flip.h>
701
+ #include <ATen/ops/fliplr.h>
702
+ #include <ATen/ops/flipud.h>
703
+ #include <ATen/ops/float_power.h>
704
+ #include <ATen/ops/floor.h>
705
+ #include <ATen/ops/floor_divide.h>
706
+ #include <ATen/ops/fmax.h>
707
+ #include <ATen/ops/fmin.h>
708
+ #include <ATen/ops/fmod.h>
709
+ #include <ATen/ops/frac.h>
710
+ #include <ATen/ops/fractional_max_pool2d.h>
711
+ #include <ATen/ops/fractional_max_pool2d_backward.h>
712
+ #include <ATen/ops/fractional_max_pool3d.h>
713
+ #include <ATen/ops/fractional_max_pool3d_backward.h>
714
+ #include <ATen/ops/frexp.h>
715
+ #include <ATen/ops/frobenius_norm.h>
716
+ #include <ATen/ops/from_file.h>
717
+ #include <ATen/ops/full.h>
718
+ #include <ATen/ops/full_like.h>
719
+ #include <ATen/ops/fused_moving_avg_obs_fake_quant.h>
720
+ #include <ATen/ops/gather.h>
721
+ #include <ATen/ops/gather_backward.h>
722
+ #include <ATen/ops/gcd.h>
723
+ #include <ATen/ops/ge.h>
724
+ #include <ATen/ops/gelu.h>
725
+ #include <ATen/ops/gelu_backward.h>
726
+ #include <ATen/ops/geometric.h>
727
+ #include <ATen/ops/geqrf.h>
728
+ #include <ATen/ops/ger.h>
729
+ #include <ATen/ops/glu.h>
730
+ #include <ATen/ops/glu_backward.h>
731
+ #include <ATen/ops/glu_backward_jvp.h>
732
+ #include <ATen/ops/glu_jvp.h>
733
+ #include <ATen/ops/gradient.h>
734
+ #include <ATen/ops/greater.h>
735
+ #include <ATen/ops/greater_equal.h>
736
+ #include <ATen/ops/grid_sampler.h>
737
+ #include <ATen/ops/grid_sampler_2d.h>
738
+ #include <ATen/ops/grid_sampler_2d_backward.h>
739
+ #include <ATen/ops/grid_sampler_3d.h>
740
+ #include <ATen/ops/grid_sampler_3d_backward.h>
741
+ #include <ATen/ops/group_norm.h>
742
+ #include <ATen/ops/gru.h>
743
+ #include <ATen/ops/gru_cell.h>
744
+ #include <ATen/ops/gt.h>
745
+ #include <ATen/ops/hamming_window.h>
746
+ #include <ATen/ops/hann_window.h>
747
+ #include <ATen/ops/hardshrink.h>
748
+ #include <ATen/ops/hardshrink_backward.h>
749
+ #include <ATen/ops/hardsigmoid.h>
750
+ #include <ATen/ops/hardsigmoid_backward.h>
751
+ #include <ATen/ops/hardswish.h>
752
+ #include <ATen/ops/hardswish_backward.h>
753
+ #include <ATen/ops/hardtanh.h>
754
+ #include <ATen/ops/hardtanh_backward.h>
755
+ #include <ATen/ops/heaviside.h>
756
+ #include <ATen/ops/hinge_embedding_loss.h>
757
+ #include <ATen/ops/histc.h>
758
+ #include <ATen/ops/histogram.h>
759
+ #include <ATen/ops/histogramdd.h>
760
+ #include <ATen/ops/hsplit.h>
761
+ #include <ATen/ops/hspmm.h>
762
+ #include <ATen/ops/hstack.h>
763
+ #include <ATen/ops/huber_loss.h>
764
+ #include <ATen/ops/huber_loss_backward.h>
765
+ #include <ATen/ops/hypot.h>
766
+ #include <ATen/ops/i0.h>
767
+ #include <ATen/ops/igamma.h>
768
+ #include <ATen/ops/igammac.h>
769
+ #include <ATen/ops/im2col.h>
770
+ #include <ATen/ops/imag.h>
771
+ #include <ATen/ops/index.h>
772
+ #include <ATen/ops/index_add.h>
773
+ #include <ATen/ops/index_copy.h>
774
+ #include <ATen/ops/index_fill.h>
775
+ #include <ATen/ops/index_put.h>
776
+ #include <ATen/ops/index_reduce.h>
777
+ #include <ATen/ops/index_select.h>
778
+ #include <ATen/ops/index_select_backward.h>
779
+ #include <ATen/ops/indices.h>
780
+ #include <ATen/ops/indices_copy.h>
781
+ #include <ATen/ops/infinitely_differentiable_gelu_backward.h>
782
+ #include <ATen/ops/inner.h>
783
+ #include <ATen/ops/instance_norm.h>
784
+ #include <ATen/ops/int_repr.h>
785
+ #include <ATen/ops/inverse.h>
786
+ #include <ATen/ops/is_coalesced.h>
787
+ #include <ATen/ops/is_complex.h>
788
+ #include <ATen/ops/is_conj.h>
789
+ #include <ATen/ops/is_distributed.h>
790
+ #include <ATen/ops/is_floating_point.h>
791
+ #include <ATen/ops/is_inference.h>
792
+ #include <ATen/ops/is_leaf.h>
793
+ #include <ATen/ops/is_neg.h>
794
+ #include <ATen/ops/is_nonzero.h>
795
+ #include <ATen/ops/is_pinned.h>
796
+ #include <ATen/ops/is_same_size.h>
797
+ #include <ATen/ops/is_set_to.h>
798
+ #include <ATen/ops/is_signed.h>
799
+ #include <ATen/ops/is_vulkan_available.h>
800
+ #include <ATen/ops/isclose.h>
801
+ #include <ATen/ops/isfinite.h>
802
+ #include <ATen/ops/isin.h>
803
+ #include <ATen/ops/isinf.h>
804
+ #include <ATen/ops/isnan.h>
805
+ #include <ATen/ops/isneginf.h>
806
+ #include <ATen/ops/isposinf.h>
807
+ #include <ATen/ops/isreal.h>
808
+ #include <ATen/ops/istft.h>
809
+ #include <ATen/ops/item.h>
810
+ #include <ATen/ops/kaiser_window.h>
811
+ #include <ATen/ops/kl_div.h>
812
+ #include <ATen/ops/kron.h>
813
+ #include <ATen/ops/kthvalue.h>
814
+ #include <ATen/ops/l1_loss.h>
815
+ #include <ATen/ops/layer_norm.h>
816
+ #include <ATen/ops/lcm.h>
817
+ #include <ATen/ops/ldexp.h>
818
+ #include <ATen/ops/le.h>
819
+ #include <ATen/ops/leaky_relu.h>
820
+ #include <ATen/ops/leaky_relu_backward.h>
821
+ #include <ATen/ops/lerp.h>
822
+ #include <ATen/ops/less.h>
823
+ #include <ATen/ops/less_equal.h>
824
+ #include <ATen/ops/lgamma.h>
825
+ #include <ATen/ops/lift.h>
826
+ #include <ATen/ops/lift_fresh.h>
827
+ #include <ATen/ops/lift_fresh_copy.h>
828
+ #include <ATen/ops/linalg_cholesky.h>
829
+ #include <ATen/ops/linalg_cholesky_ex.h>
830
+ #include <ATen/ops/linalg_cond.h>
831
+ #include <ATen/ops/linalg_cross.h>
832
+ #include <ATen/ops/linalg_det.h>
833
+ #include <ATen/ops/linalg_diagonal.h>
834
+ #include <ATen/ops/linalg_eig.h>
835
+ #include <ATen/ops/linalg_eigh.h>
836
+ #include <ATen/ops/linalg_eigvals.h>
837
+ #include <ATen/ops/linalg_eigvalsh.h>
838
+ #include <ATen/ops/linalg_householder_product.h>
839
+ #include <ATen/ops/linalg_inv.h>
840
+ #include <ATen/ops/linalg_inv_ex.h>
841
+ #include <ATen/ops/linalg_ldl_factor.h>
842
+ #include <ATen/ops/linalg_ldl_factor_ex.h>
843
+ #include <ATen/ops/linalg_ldl_solve.h>
844
+ #include <ATen/ops/linalg_lstsq.h>
845
+ #include <ATen/ops/linalg_lu.h>
846
+ #include <ATen/ops/linalg_lu_factor.h>
847
+ #include <ATen/ops/linalg_lu_factor_ex.h>
848
+ #include <ATen/ops/linalg_lu_solve.h>
849
+ #include <ATen/ops/linalg_matmul.h>
850
+ #include <ATen/ops/linalg_matrix_exp.h>
851
+ #include <ATen/ops/linalg_matrix_norm.h>
852
+ #include <ATen/ops/linalg_matrix_power.h>
853
+ #include <ATen/ops/linalg_matrix_rank.h>
854
+ #include <ATen/ops/linalg_multi_dot.h>
855
+ #include <ATen/ops/linalg_norm.h>
856
+ #include <ATen/ops/linalg_pinv.h>
857
+ #include <ATen/ops/linalg_qr.h>
858
+ #include <ATen/ops/linalg_slogdet.h>
859
+ #include <ATen/ops/linalg_solve.h>
860
+ #include <ATen/ops/linalg_solve_ex.h>
861
+ #include <ATen/ops/linalg_solve_triangular.h>
862
+ #include <ATen/ops/linalg_svd.h>
863
+ #include <ATen/ops/linalg_svdvals.h>
864
+ #include <ATen/ops/linalg_tensorinv.h>
865
+ #include <ATen/ops/linalg_tensorsolve.h>
866
+ #include <ATen/ops/linalg_vander.h>
867
+ #include <ATen/ops/linalg_vecdot.h>
868
+ #include <ATen/ops/linalg_vector_norm.h>
869
+ #include <ATen/ops/linear.h>
870
+ #include <ATen/ops/linear_backward.h>
871
+ #include <ATen/ops/linspace.h>
872
+ #include <ATen/ops/log.h>
873
+ #include <ATen/ops/log10.h>
874
+ #include <ATen/ops/log1p.h>
875
+ #include <ATen/ops/log2.h>
876
+ #include <ATen/ops/log_normal.h>
877
+ #include <ATen/ops/log_sigmoid.h>
878
+ #include <ATen/ops/log_sigmoid_backward.h>
879
+ #include <ATen/ops/log_sigmoid_forward.h>
880
+ #include <ATen/ops/log_softmax.h>
881
+ #include <ATen/ops/logaddexp.h>
882
+ #include <ATen/ops/logaddexp2.h>
883
+ #include <ATen/ops/logcumsumexp.h>
884
+ #include <ATen/ops/logdet.h>
885
+ #include <ATen/ops/logical_and.h>
886
+ #include <ATen/ops/logical_not.h>
887
+ #include <ATen/ops/logical_or.h>
888
+ #include <ATen/ops/logical_xor.h>
889
+ #include <ATen/ops/logit.h>
890
+ #include <ATen/ops/logit_backward.h>
891
+ #include <ATen/ops/logspace.h>
892
+ #include <ATen/ops/logsumexp.h>
893
+ #include <ATen/ops/lshift.h>
894
+ #include <ATen/ops/lstm.h>
895
+ #include <ATen/ops/lstm_cell.h>
896
+ #include <ATen/ops/lstm_mps_backward.h>
897
+ #include <ATen/ops/lt.h>
898
+ #include <ATen/ops/lu_solve.h>
899
+ #include <ATen/ops/lu_unpack.h>
900
+ #include <ATen/ops/mH.h>
901
+ #include <ATen/ops/mT.h>
902
+ #include <ATen/ops/margin_ranking_loss.h>
903
+ #include <ATen/ops/masked_fill.h>
904
+ #include <ATen/ops/masked_scatter.h>
905
+ #include <ATen/ops/masked_scatter_backward.h>
906
+ #include <ATen/ops/masked_select.h>
907
+ #include <ATen/ops/masked_select_backward.h>
908
+ #include <ATen/ops/matmul.h>
909
+ #include <ATen/ops/matmul_backward.h>
910
+ #include <ATen/ops/matrix_H.h>
911
+ #include <ATen/ops/matrix_exp.h>
912
+ #include <ATen/ops/matrix_exp_backward.h>
913
+ #include <ATen/ops/matrix_power.h>
914
+ #include <ATen/ops/max.h>
915
+ #include <ATen/ops/max_pool1d.h>
916
+ #include <ATen/ops/max_pool1d_with_indices.h>
917
+ #include <ATen/ops/max_pool2d.h>
918
+ #include <ATen/ops/max_pool2d_backward.h>
919
+ #include <ATen/ops/max_pool2d_with_indices.h>
920
+ #include <ATen/ops/max_pool2d_with_indices_backward.h>
921
+ #include <ATen/ops/max_pool3d.h>
922
+ #include <ATen/ops/max_pool3d_with_indices.h>
923
+ #include <ATen/ops/max_pool3d_with_indices_backward.h>
924
+ #include <ATen/ops/max_unpool2d.h>
925
+ #include <ATen/ops/max_unpool3d.h>
926
+ #include <ATen/ops/maximum.h>
927
+ #include <ATen/ops/mean.h>
928
+ #include <ATen/ops/median.h>
929
+ #include <ATen/ops/meshgrid.h>
930
+ #include <ATen/ops/min.h>
931
+ #include <ATen/ops/minimum.h>
932
+ #include <ATen/ops/miopen_batch_norm.h>
933
+ #include <ATen/ops/miopen_batch_norm_backward.h>
934
+ #include <ATen/ops/miopen_convolution.h>
935
+ #include <ATen/ops/miopen_convolution_add_relu.h>
936
+ #include <ATen/ops/miopen_convolution_relu.h>
937
+ #include <ATen/ops/miopen_convolution_transpose.h>
938
+ #include <ATen/ops/miopen_depthwise_convolution.h>
939
+ #include <ATen/ops/miopen_rnn.h>
940
+ #include <ATen/ops/miopen_rnn_backward.h>
941
+ #include <ATen/ops/mish.h>
942
+ #include <ATen/ops/mish_backward.h>
943
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
944
+ #include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
945
+ #include <ATen/ops/mkldnn_convolution.h>
946
+ #include <ATen/ops/mkldnn_linear.h>
947
+ #include <ATen/ops/mkldnn_linear_backward.h>
948
+ #include <ATen/ops/mkldnn_linear_backward_input.h>
949
+ #include <ATen/ops/mkldnn_linear_backward_weights.h>
950
+ #include <ATen/ops/mkldnn_max_pool2d.h>
951
+ #include <ATen/ops/mkldnn_max_pool2d_backward.h>
952
+ #include <ATen/ops/mkldnn_max_pool3d.h>
953
+ #include <ATen/ops/mkldnn_max_pool3d_backward.h>
954
+ #include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
955
+ #include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
956
+ #include <ATen/ops/mkldnn_rnn_layer.h>
957
+ #include <ATen/ops/mkldnn_rnn_layer_backward.h>
958
+ #include <ATen/ops/mm.h>
959
+ #include <ATen/ops/mode.h>
960
+ #include <ATen/ops/moveaxis.h>
961
+ #include <ATen/ops/movedim.h>
962
+ #include <ATen/ops/mps_convolution_backward.h>
963
+ #include <ATen/ops/mps_convolution_transpose_backward.h>
964
+ #include <ATen/ops/mse_loss.h>
965
+ #include <ATen/ops/mse_loss_backward.h>
966
+ #include <ATen/ops/msort.h>
967
+ #include <ATen/ops/mul.h>
968
+ #include <ATen/ops/multi_margin_loss.h>
969
+ #include <ATen/ops/multi_margin_loss_backward.h>
970
+ #include <ATen/ops/multilabel_margin_loss.h>
971
+ #include <ATen/ops/multilabel_margin_loss_backward.h>
972
+ #include <ATen/ops/multilabel_margin_loss_forward.h>
973
+ #include <ATen/ops/multinomial.h>
974
+ #include <ATen/ops/multiply.h>
975
+ #include <ATen/ops/mv.h>
976
+ #include <ATen/ops/mvlgamma.h>
977
+ #include <ATen/ops/nan_to_num.h>
978
+ #include <ATen/ops/nanmean.h>
979
+ #include <ATen/ops/nanmedian.h>
980
+ #include <ATen/ops/nanquantile.h>
981
+ #include <ATen/ops/nansum.h>
982
+ #include <ATen/ops/narrow.h>
983
+ #include <ATen/ops/narrow_copy.h>
984
+ #include <ATen/ops/native_batch_norm.h>
985
+ #include <ATen/ops/native_batch_norm_backward.h>
986
+ #include <ATen/ops/native_channel_shuffle.h>
987
+ #include <ATen/ops/native_dropout.h>
988
+ #include <ATen/ops/native_dropout_backward.h>
989
+ #include <ATen/ops/native_group_norm.h>
990
+ #include <ATen/ops/native_group_norm_backward.h>
991
+ #include <ATen/ops/native_layer_norm.h>
992
+ #include <ATen/ops/native_layer_norm_backward.h>
993
+ #include <ATen/ops/native_norm.h>
994
+ #include <ATen/ops/ne.h>
995
+ #include <ATen/ops/neg.h>
996
+ #include <ATen/ops/negative.h>
997
+ #include <ATen/ops/nested_to_padded_tensor.h>
998
+ #include <ATen/ops/new_empty.h>
999
+ #include <ATen/ops/new_empty_strided.h>
1000
+ #include <ATen/ops/new_full.h>
1001
+ #include <ATen/ops/new_ones.h>
1002
+ #include <ATen/ops/new_zeros.h>
1003
+ #include <ATen/ops/nextafter.h>
1004
+ #include <ATen/ops/nll_loss.h>
1005
+ #include <ATen/ops/nll_loss2d.h>
1006
+ #include <ATen/ops/nll_loss2d_backward.h>
1007
+ #include <ATen/ops/nll_loss2d_forward.h>
1008
+ #include <ATen/ops/nll_loss_backward.h>
1009
+ #include <ATen/ops/nll_loss_forward.h>
1010
+ #include <ATen/ops/nll_loss_nd.h>
1011
+ #include <ATen/ops/nonzero.h>
1012
+ #include <ATen/ops/nonzero_numpy.h>
1013
+ #include <ATen/ops/nonzero_static.h>
1014
+ #include <ATen/ops/norm.h>
1015
+ #include <ATen/ops/norm_except_dim.h>
1016
+ #include <ATen/ops/normal.h>
1017
+ #include <ATen/ops/not_equal.h>
1018
+ #include <ATen/ops/nuclear_norm.h>
1019
+ #include <ATen/ops/numpy_T.h>
1020
+ #include <ATen/ops/one_hot.h>
1021
+ #include <ATen/ops/ones.h>
1022
+ #include <ATen/ops/ones_like.h>
1023
+ #include <ATen/ops/or.h>
1024
+ #include <ATen/ops/orgqr.h>
1025
+ #include <ATen/ops/ormqr.h>
1026
+ #include <ATen/ops/outer.h>
1027
+ #include <ATen/ops/output_nr.h>
1028
+ #include <ATen/ops/pad.h>
1029
+ #include <ATen/ops/pad_sequence.h>
1030
+ #include <ATen/ops/pairwise_distance.h>
1031
+ #include <ATen/ops/pdist.h>
1032
+ #include <ATen/ops/permute.h>
1033
+ #include <ATen/ops/permute_copy.h>
1034
+ #include <ATen/ops/pin_memory.h>
1035
+ #include <ATen/ops/pinverse.h>
1036
+ #include <ATen/ops/pixel_shuffle.h>
1037
+ #include <ATen/ops/pixel_unshuffle.h>
1038
+ #include <ATen/ops/poisson.h>
1039
+ #include <ATen/ops/poisson_nll_loss.h>
1040
+ #include <ATen/ops/polar.h>
1041
+ #include <ATen/ops/polygamma.h>
1042
+ #include <ATen/ops/positive.h>
1043
+ #include <ATen/ops/pow.h>
1044
+ #include <ATen/ops/prelu.h>
1045
+ #include <ATen/ops/prod.h>
1046
+ #include <ATen/ops/promote_types.h>
1047
+ #include <ATen/ops/put.h>
1048
+ #include <ATen/ops/q_per_channel_axis.h>
1049
+ #include <ATen/ops/q_per_channel_scales.h>
1050
+ #include <ATen/ops/q_per_channel_zero_points.h>
1051
+ #include <ATen/ops/q_scale.h>
1052
+ #include <ATen/ops/q_zero_point.h>
1053
+ #include <ATen/ops/qr.h>
1054
+ #include <ATen/ops/qscheme.h>
1055
+ #include <ATen/ops/quantile.h>
1056
+ #include <ATen/ops/quantize_per_channel.h>
1057
+ #include <ATen/ops/quantize_per_tensor.h>
1058
+ #include <ATen/ops/quantize_per_tensor_dynamic.h>
1059
+ #include <ATen/ops/quantized_batch_norm.h>
1060
+ #include <ATen/ops/quantized_gru_cell.h>
1061
+ #include <ATen/ops/quantized_lstm_cell.h>
1062
+ #include <ATen/ops/quantized_max_pool1d.h>
1063
+ #include <ATen/ops/quantized_max_pool2d.h>
1064
+ #include <ATen/ops/quantized_max_pool3d.h>
1065
+ #include <ATen/ops/quantized_rnn_relu_cell.h>
1066
+ #include <ATen/ops/quantized_rnn_tanh_cell.h>
1067
+ #include <ATen/ops/rad2deg.h>
1068
+ #include <ATen/ops/rand.h>
1069
+ #include <ATen/ops/rand_like.h>
1070
+ #include <ATen/ops/randint.h>
1071
+ #include <ATen/ops/randint_like.h>
1072
+ #include <ATen/ops/randn.h>
1073
+ #include <ATen/ops/randn_like.h>
1074
+ #include <ATen/ops/random.h>
1075
+ #include <ATen/ops/randperm.h>
1076
+ #include <ATen/ops/range.h>
1077
+ #include <ATen/ops/ravel.h>
1078
+ #include <ATen/ops/real.h>
1079
+ #include <ATen/ops/reciprocal.h>
1080
+ #include <ATen/ops/record_stream.h>
1081
+ #include <ATen/ops/refine_names.h>
1082
+ #include <ATen/ops/reflection_pad1d.h>
1083
+ #include <ATen/ops/reflection_pad1d_backward.h>
1084
+ #include <ATen/ops/reflection_pad2d.h>
1085
+ #include <ATen/ops/reflection_pad2d_backward.h>
1086
+ #include <ATen/ops/reflection_pad3d.h>
1087
+ #include <ATen/ops/reflection_pad3d_backward.h>
1088
+ #include <ATen/ops/relu.h>
1089
+ #include <ATen/ops/relu6.h>
1090
+ #include <ATen/ops/remainder.h>
1091
+ #include <ATen/ops/rename.h>
1092
+ #include <ATen/ops/renorm.h>
1093
+ #include <ATen/ops/repeat.h>
1094
+ #include <ATen/ops/repeat_interleave.h>
1095
+ #include <ATen/ops/replication_pad1d.h>
1096
+ #include <ATen/ops/replication_pad1d_backward.h>
1097
+ #include <ATen/ops/replication_pad2d.h>
1098
+ #include <ATen/ops/replication_pad2d_backward.h>
1099
+ #include <ATen/ops/replication_pad3d.h>
1100
+ #include <ATen/ops/replication_pad3d_backward.h>
1101
+ #include <ATen/ops/requires_grad.h>
1102
+ #include <ATen/ops/reshape.h>
1103
+ #include <ATen/ops/reshape_as.h>
1104
+ #include <ATen/ops/resize.h>
1105
+ #include <ATen/ops/resize_as.h>
1106
+ #include <ATen/ops/resize_as_sparse.h>
1107
+ #include <ATen/ops/resolve_conj.h>
1108
+ #include <ATen/ops/resolve_neg.h>
1109
+ #include <ATen/ops/result_type.h>
1110
+ #include <ATen/ops/retain_grad.h>
1111
+ #include <ATen/ops/retains_grad.h>
1112
+ #include <ATen/ops/rnn_relu.h>
1113
+ #include <ATen/ops/rnn_relu_cell.h>
1114
+ #include <ATen/ops/rnn_tanh.h>
1115
+ #include <ATen/ops/rnn_tanh_cell.h>
1116
+ #include <ATen/ops/roll.h>
1117
+ #include <ATen/ops/rot90.h>
1118
+ #include <ATen/ops/round.h>
1119
+ #include <ATen/ops/row_indices.h>
1120
+ #include <ATen/ops/row_indices_copy.h>
1121
+ #include <ATen/ops/row_stack.h>
1122
+ #include <ATen/ops/rrelu.h>
1123
+ #include <ATen/ops/rrelu_with_noise.h>
1124
+ #include <ATen/ops/rrelu_with_noise_backward.h>
1125
+ #include <ATen/ops/rshift.h>
1126
+ #include <ATen/ops/rsqrt.h>
1127
+ #include <ATen/ops/rsub.h>
1128
+ #include <ATen/ops/scalar_tensor.h>
1129
+ #include <ATen/ops/scaled_dot_product_attention.h>
1130
+ #include <ATen/ops/scatter.h>
1131
+ #include <ATen/ops/scatter_add.h>
1132
+ #include <ATen/ops/scatter_reduce.h>
1133
+ #include <ATen/ops/searchsorted.h>
1134
+ #include <ATen/ops/segment_reduce.h>
1135
+ #include <ATen/ops/select.h>
1136
+ #include <ATen/ops/select_backward.h>
1137
+ #include <ATen/ops/select_copy.h>
1138
+ #include <ATen/ops/select_scatter.h>
1139
+ #include <ATen/ops/selu.h>
1140
+ #include <ATen/ops/set.h>
1141
+ #include <ATen/ops/set_data.h>
1142
+ #include <ATen/ops/sgn.h>
1143
+ #include <ATen/ops/sigmoid.h>
1144
+ #include <ATen/ops/sigmoid_backward.h>
1145
+ #include <ATen/ops/sign.h>
1146
+ #include <ATen/ops/signbit.h>
1147
+ #include <ATen/ops/silu.h>
1148
+ #include <ATen/ops/silu_backward.h>
1149
+ #include <ATen/ops/sin.h>
1150
+ #include <ATen/ops/sinc.h>
1151
+ #include <ATen/ops/sinh.h>
1152
+ #include <ATen/ops/size.h>
1153
+ #include <ATen/ops/slice.h>
1154
+ #include <ATen/ops/slice_backward.h>
1155
+ #include <ATen/ops/slice_copy.h>
1156
+ #include <ATen/ops/slice_inverse.h>
1157
+ #include <ATen/ops/slice_scatter.h>
1158
+ #include <ATen/ops/slogdet.h>
1159
+ #include <ATen/ops/slow_conv3d.h>
1160
+ #include <ATen/ops/slow_conv3d_forward.h>
1161
+ #include <ATen/ops/slow_conv_dilated2d.h>
1162
+ #include <ATen/ops/slow_conv_dilated3d.h>
1163
+ #include <ATen/ops/slow_conv_transpose2d.h>
1164
+ #include <ATen/ops/slow_conv_transpose3d.h>
1165
+ #include <ATen/ops/smm.h>
1166
+ #include <ATen/ops/smooth_l1_loss.h>
1167
+ #include <ATen/ops/smooth_l1_loss_backward.h>
1168
+ #include <ATen/ops/soft_margin_loss.h>
1169
+ #include <ATen/ops/soft_margin_loss_backward.h>
1170
+ #include <ATen/ops/softmax.h>
1171
+ #include <ATen/ops/softplus.h>
1172
+ #include <ATen/ops/softplus_backward.h>
1173
+ #include <ATen/ops/softshrink.h>
1174
+ #include <ATen/ops/softshrink_backward.h>
1175
+ #include <ATen/ops/sort.h>
1176
+ #include <ATen/ops/sparse_bsc_tensor.h>
1177
+ #include <ATen/ops/sparse_bsr_tensor.h>
1178
+ #include <ATen/ops/sparse_compressed_tensor.h>
1179
+ #include <ATen/ops/sparse_coo_tensor.h>
1180
+ #include <ATen/ops/sparse_csc_tensor.h>
1181
+ #include <ATen/ops/sparse_csr_tensor.h>
1182
+ #include <ATen/ops/sparse_dim.h>
1183
+ #include <ATen/ops/sparse_mask.h>
1184
+ #include <ATen/ops/sparse_resize.h>
1185
+ #include <ATen/ops/sparse_resize_and_clear.h>
1186
+ #include <ATen/ops/sparse_sampled_addmm.h>
1187
+ #include <ATen/ops/special_airy_ai.h>
1188
+ #include <ATen/ops/special_bessel_j0.h>
1189
+ #include <ATen/ops/special_bessel_j1.h>
1190
+ #include <ATen/ops/special_bessel_y0.h>
1191
+ #include <ATen/ops/special_bessel_y1.h>
1192
+ #include <ATen/ops/special_chebyshev_polynomial_t.h>
1193
+ #include <ATen/ops/special_chebyshev_polynomial_u.h>
1194
+ #include <ATen/ops/special_chebyshev_polynomial_v.h>
1195
+ #include <ATen/ops/special_chebyshev_polynomial_w.h>
1196
+ #include <ATen/ops/special_digamma.h>
1197
+ #include <ATen/ops/special_entr.h>
1198
+ #include <ATen/ops/special_erf.h>
1199
+ #include <ATen/ops/special_erfc.h>
1200
+ #include <ATen/ops/special_erfcx.h>
1201
+ #include <ATen/ops/special_erfinv.h>
1202
+ #include <ATen/ops/special_exp2.h>
1203
+ #include <ATen/ops/special_expit.h>
1204
+ #include <ATen/ops/special_expm1.h>
1205
+ #include <ATen/ops/special_gammainc.h>
1206
+ #include <ATen/ops/special_gammaincc.h>
1207
+ #include <ATen/ops/special_gammaln.h>
1208
+ #include <ATen/ops/special_hermite_polynomial_h.h>
1209
+ #include <ATen/ops/special_hermite_polynomial_he.h>
1210
+ #include <ATen/ops/special_i0.h>
1211
+ #include <ATen/ops/special_i0e.h>
1212
+ #include <ATen/ops/special_i1.h>
1213
+ #include <ATen/ops/special_i1e.h>
1214
+ #include <ATen/ops/special_laguerre_polynomial_l.h>
1215
+ #include <ATen/ops/special_legendre_polynomial_p.h>
1216
+ #include <ATen/ops/special_log1p.h>
1217
+ #include <ATen/ops/special_log_ndtr.h>
1218
+ #include <ATen/ops/special_log_softmax.h>
1219
+ #include <ATen/ops/special_logit.h>
1220
+ #include <ATen/ops/special_logsumexp.h>
1221
+ #include <ATen/ops/special_modified_bessel_i0.h>
1222
+ #include <ATen/ops/special_modified_bessel_i1.h>
1223
+ #include <ATen/ops/special_modified_bessel_k0.h>
1224
+ #include <ATen/ops/special_modified_bessel_k1.h>
1225
+ #include <ATen/ops/special_multigammaln.h>
1226
+ #include <ATen/ops/special_ndtr.h>
1227
+ #include <ATen/ops/special_ndtri.h>
1228
+ #include <ATen/ops/special_polygamma.h>
1229
+ #include <ATen/ops/special_psi.h>
1230
+ #include <ATen/ops/special_round.h>
1231
+ #include <ATen/ops/special_scaled_modified_bessel_k0.h>
1232
+ #include <ATen/ops/special_scaled_modified_bessel_k1.h>
1233
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
1234
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
1235
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
1236
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
1237
+ #include <ATen/ops/special_sinc.h>
1238
+ #include <ATen/ops/special_softmax.h>
1239
+ #include <ATen/ops/special_spherical_bessel_j0.h>
1240
+ #include <ATen/ops/special_xlog1py.h>
1241
+ #include <ATen/ops/special_xlogy.h>
1242
+ #include <ATen/ops/special_zeta.h>
1243
+ #include <ATen/ops/split.h>
1244
+ #include <ATen/ops/split_copy.h>
1245
+ #include <ATen/ops/split_with_sizes.h>
1246
+ #include <ATen/ops/split_with_sizes_copy.h>
1247
+ #include <ATen/ops/sqrt.h>
1248
+ #include <ATen/ops/square.h>
1249
+ #include <ATen/ops/squeeze.h>
1250
+ #include <ATen/ops/squeeze_copy.h>
1251
+ #include <ATen/ops/sspaddmm.h>
1252
+ #include <ATen/ops/stack.h>
1253
+ #include <ATen/ops/std.h>
1254
+ #include <ATen/ops/std_mean.h>
1255
+ #include <ATen/ops/stft.h>
1256
+ #include <ATen/ops/stride.h>
1257
+ #include <ATen/ops/sub.h>
1258
+ #include <ATen/ops/subtract.h>
1259
+ #include <ATen/ops/sum.h>
1260
+ #include <ATen/ops/sum_to_size.h>
1261
+ #include <ATen/ops/svd.h>
1262
+ #include <ATen/ops/swapaxes.h>
1263
+ #include <ATen/ops/swapdims.h>
1264
+ #include <ATen/ops/sym_constrain_range.h>
1265
+ #include <ATen/ops/sym_constrain_range_for_size.h>
1266
+ #include <ATen/ops/sym_numel.h>
1267
+ #include <ATen/ops/sym_size.h>
1268
+ #include <ATen/ops/sym_storage_offset.h>
1269
+ #include <ATen/ops/sym_stride.h>
1270
+ #include <ATen/ops/t.h>
1271
+ #include <ATen/ops/t_copy.h>
1272
+ #include <ATen/ops/take.h>
1273
+ #include <ATen/ops/take_along_dim.h>
1274
+ #include <ATen/ops/tan.h>
1275
+ #include <ATen/ops/tanh.h>
1276
+ #include <ATen/ops/tanh_backward.h>
1277
+ #include <ATen/ops/tensor_split.h>
1278
+ #include <ATen/ops/tensordot.h>
1279
+ #include <ATen/ops/thnn_conv2d.h>
1280
+ #include <ATen/ops/threshold.h>
1281
+ #include <ATen/ops/threshold_backward.h>
1282
+ #include <ATen/ops/tile.h>
1283
+ #include <ATen/ops/to.h>
1284
+ #include <ATen/ops/to_dense.h>
1285
+ #include <ATen/ops/to_dense_backward.h>
1286
+ #include <ATen/ops/to_mkldnn.h>
1287
+ #include <ATen/ops/to_mkldnn_backward.h>
1288
+ #include <ATen/ops/to_padded_tensor.h>
1289
+ #include <ATen/ops/to_sparse.h>
1290
+ #include <ATen/ops/to_sparse_bsc.h>
1291
+ #include <ATen/ops/to_sparse_bsr.h>
1292
+ #include <ATen/ops/to_sparse_csc.h>
1293
+ #include <ATen/ops/to_sparse_csr.h>
1294
+ #include <ATen/ops/topk.h>
1295
+ #include <ATen/ops/trace.h>
1296
+ #include <ATen/ops/trace_backward.h>
1297
+ #include <ATen/ops/transpose.h>
1298
+ #include <ATen/ops/transpose_copy.h>
1299
+ #include <ATen/ops/trapezoid.h>
1300
+ #include <ATen/ops/trapz.h>
1301
+ #include <ATen/ops/triangular_solve.h>
1302
+ #include <ATen/ops/tril.h>
1303
+ #include <ATen/ops/tril_indices.h>
1304
+ #include <ATen/ops/triplet_margin_loss.h>
1305
+ #include <ATen/ops/triu.h>
1306
+ #include <ATen/ops/triu_indices.h>
1307
+ #include <ATen/ops/true_divide.h>
1308
+ #include <ATen/ops/trunc.h>
1309
+ #include <ATen/ops/type_as.h>
1310
+ #include <ATen/ops/unbind.h>
1311
+ #include <ATen/ops/unbind_copy.h>
1312
+ #include <ATen/ops/unflatten.h>
1313
+ #include <ATen/ops/unflatten_dense_tensors.h>
1314
+ #include <ATen/ops/unfold.h>
1315
+ #include <ATen/ops/unfold_backward.h>
1316
+ #include <ATen/ops/unfold_copy.h>
1317
+ #include <ATen/ops/uniform.h>
1318
+ #include <ATen/ops/unique_consecutive.h>
1319
+ #include <ATen/ops/unique_dim.h>
1320
+ #include <ATen/ops/unique_dim_consecutive.h>
1321
+ #include <ATen/ops/unsafe_chunk.h>
1322
+ #include <ATen/ops/unsafe_split.h>
1323
+ #include <ATen/ops/unsafe_split_with_sizes.h>
1324
+ #include <ATen/ops/unsqueeze.h>
1325
+ #include <ATen/ops/unsqueeze_copy.h>
1326
+ #include <ATen/ops/upsample_bicubic2d.h>
1327
+ #include <ATen/ops/upsample_bicubic2d_backward.h>
1328
+ #include <ATen/ops/upsample_bilinear2d.h>
1329
+ #include <ATen/ops/upsample_bilinear2d_backward.h>
1330
+ #include <ATen/ops/upsample_linear1d.h>
1331
+ #include <ATen/ops/upsample_linear1d_backward.h>
1332
+ #include <ATen/ops/upsample_nearest1d.h>
1333
+ #include <ATen/ops/upsample_nearest1d_backward.h>
1334
+ #include <ATen/ops/upsample_nearest2d.h>
1335
+ #include <ATen/ops/upsample_nearest2d_backward.h>
1336
+ #include <ATen/ops/upsample_nearest3d.h>
1337
+ #include <ATen/ops/upsample_nearest3d_backward.h>
1338
+ #include <ATen/ops/upsample_trilinear3d.h>
1339
+ #include <ATen/ops/upsample_trilinear3d_backward.h>
1340
+ #include <ATen/ops/value_selecting_reduction_backward.h>
1341
+ #include <ATen/ops/values.h>
1342
+ #include <ATen/ops/values_copy.h>
1343
+ #include <ATen/ops/vander.h>
1344
+ #include <ATen/ops/var.h>
1345
+ #include <ATen/ops/var_mean.h>
1346
+ #include <ATen/ops/vdot.h>
1347
+ #include <ATen/ops/view.h>
1348
+ #include <ATen/ops/view_as.h>
1349
+ #include <ATen/ops/view_as_complex.h>
1350
+ #include <ATen/ops/view_as_complex_copy.h>
1351
+ #include <ATen/ops/view_as_real.h>
1352
+ #include <ATen/ops/view_as_real_copy.h>
1353
+ #include <ATen/ops/view_copy.h>
1354
+ #include <ATen/ops/vsplit.h>
1355
+ #include <ATen/ops/vstack.h>
1356
+ #include <ATen/ops/where.h>
1357
+ #include <ATen/ops/xlogy.h>
1358
+ #include <ATen/ops/xor.h>
1359
+ #include <ATen/ops/zero.h>
1360
+ #include <ATen/ops/zeros.h>
1361
+ #include <ATen/ops/zeros_like.h>
1362
+
1363
+ namespace at {
1364
+
1365
+
1366
+
1367
+ // Special C++ only overloads for std()-like functions (See gh-40287)
1368
+ // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
1369
+ // So, for example std(0) would select the std(unbiased=False) overload
1370
+ TORCH_API inline Tensor var(const Tensor& self, int dim) {
1371
+ return at::var(self, IntArrayRef{dim});
1372
+ }
1373
+ TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
1374
+ return at::var_mean(self, IntArrayRef{dim});
1375
+ }
1376
+ TORCH_API inline Tensor std(const Tensor& self, int dim) {
1377
+ return at::std(self, IntArrayRef{dim});
1378
+ }
1379
+ TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
1380
+ return at::std_mean(self, IntArrayRef{dim});
1381
+ }
1382
+
1383
+ inline int64_t numel(const Tensor& tensor) {
1384
+ return tensor.numel();
1385
+ }
1386
+
1387
+ inline int64_t size(const Tensor& tensor, int64_t dim) {
1388
+ return tensor.size(dim);
1389
+ }
1390
+
1391
+ inline int64_t stride(const Tensor& tensor, int64_t dim) {
1392
+ return tensor.stride(dim);
1393
+ }
1394
+
1395
+ inline bool is_complex(const Tensor& tensor) {
1396
+ return tensor.is_complex();
1397
+ }
1398
+
1399
+ inline bool is_floating_point(const Tensor& tensor) {
1400
+ return tensor.is_floating_point();
1401
+ }
1402
+
1403
+ inline bool is_signed(const Tensor& tensor) {
1404
+ return tensor.is_signed();
1405
+ }
1406
+
1407
+ inline bool is_inference(const Tensor& tensor) {
1408
+ return tensor.is_inference();
1409
+ }
1410
+
1411
+ inline bool _is_zerotensor(const Tensor& tensor) {
1412
+ return tensor._is_zerotensor();
1413
+ }
1414
+
1415
+ inline bool is_conj(const Tensor& tensor) {
1416
+ return tensor.is_conj();
1417
+ }
1418
+
1419
+ inline Tensor conj(const Tensor& tensor) {
1420
+ return tensor.conj();
1421
+ }
1422
+
1423
+ inline bool is_neg(const Tensor& tensor) {
1424
+ return tensor.is_neg();
1425
+ }
1426
+
1427
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InferSize.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/core/SymIntArrayRef.h>
6
+ #include <c10/util/DimVector.h>
7
+ #include <c10/util/Optional.h>
8
+ #include <sstream>
9
+ #include <vector>
10
+
11
+ namespace at {
12
+
13
+ // Infers the size of a dim with size -1, if it exists. Also checks that new
14
+ // shape is compatible with the number of elements.
15
+ //
16
+ // templated to handle std::vector<int64_t> and DimVector use cases, see
17
+ // below
18
+ //
19
+ template <typename InputArrayRef, typename NumelType, typename ResultVec>
20
+ inline void infer_size_impl(
21
+ InputArrayRef shape,
22
+ NumelType numel,
23
+ ResultVec& res) {
24
+ NumelType newsize = 1;
25
+ // N.B. this is an index, not a sym dim!
26
+ auto infer_dim = c10::optional<int64_t>();
27
+ for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
28
+ if (shape[dim] == -1) {
29
+ if (infer_dim) {
30
+ throw std::runtime_error("only one dimension can be inferred");
31
+ }
32
+ infer_dim = dim;
33
+ } else if (shape[dim] >= 0) {
34
+ newsize *= shape[dim];
35
+ } else {
36
+ AT_ERROR("invalid shape dimension ", shape[dim]);
37
+ }
38
+ }
39
+
40
+ if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) {
41
+ if (infer_dim) {
42
+ // We have a degree of freedom here to select the dimension size; follow
43
+ // NumPy semantics and just bail. However, a nice error message is needed
44
+ // because users often use `view` as a way to flatten & unflatten
45
+ // dimensions and will otherwise be confused why
46
+ // empty_tensor.view( 0, 0)
47
+ // works yet
48
+ // empty_tensor.view(-1, 0)
49
+ // doesn't.
50
+ TORCH_CHECK(
51
+ newsize != 0,
52
+ "cannot reshape tensor of 0 elements into shape ",
53
+ shape,
54
+ " because the unspecified dimension size -1 can be any "
55
+ "value and is ambiguous");
56
+ res[*infer_dim] = numel / newsize;
57
+ }
58
+ return;
59
+ }
60
+
61
+ std::ostringstream ss;
62
+ ss << "shape '" << shape << "' is invalid for input of size " << numel;
63
+ throw std::runtime_error(ss.str());
64
+ }
65
+
66
+ inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
67
+ auto res = shape.vec();
68
+ infer_size_impl(shape, numel, res);
69
+ return res;
70
+ }
71
+
72
+ inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) {
73
+ auto res = at::DimVector(shape);
74
+ infer_size_impl(shape, numel, res);
75
+ return res;
76
+ }
77
+
78
+ inline at::SymDimVector infer_size_dv(
79
+ c10::SymIntArrayRef shape,
80
+ c10::SymInt numel) {
81
+ auto res = at::SymDimVector(shape);
82
+ infer_size_impl<c10::SymIntArrayRef, c10::SymInt, at::SymDimVector>(
83
+ shape, std::move(numel), res);
84
+ return res;
85
+ }
86
+
87
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/InitialTensorOptions.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/TensorOptions.h>
4
+
5
+ namespace at {
6
+
7
+ // Represents the initial TensorOptions, before the "defaults" are ever changed.
8
+ // This is designed to be used in library code, where the explicit devices,
9
+ // dtypes, etc. are known. NOTE: this is not a stable API.
10
+ inline TensorOptions initialTensorOptions() {
11
+ return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad(
12
+ false);
13
+ }
14
+
15
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ #include <ostream>
6
+ #include <string>
7
+
8
+ namespace at {
9
+
10
+ enum class LinalgBackend : int8_t { Default, Cusolver, Magma };
11
+
12
+ inline std::string LinalgBackendToString(at::LinalgBackend backend) {
13
+ switch (backend) {
14
+ case LinalgBackend::Default:
15
+ return "at::LinalgBackend::Default";
16
+ case LinalgBackend::Cusolver:
17
+ return "at::LinalgBackend::Cusolver";
18
+ case LinalgBackend::Magma:
19
+ return "at::LinalgBackend::Magma";
20
+ default:
21
+ TORCH_CHECK(false, "Unknown linalg backend");
22
+ }
23
+ }
24
+
25
+ inline std::ostream& operator<<(
26
+ std::ostream& stream,
27
+ at::LinalgBackend backend) {
28
+ return stream << LinalgBackendToString(backend);
29
+ }
30
+
31
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MapAllocator.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/string_view.h>
5
+
6
+ namespace at {
7
+
8
+ enum MappedAllocatorModes {
9
+ ALLOCATOR_MAPPED_SHARED = 1,
10
+ ALLOCATOR_MAPPED_SHAREDMEM = 2,
11
+ ALLOCATOR_MAPPED_EXCLUSIVE = 4,
12
+ ALLOCATOR_MAPPED_NOCREATE = 8,
13
+ ALLOCATOR_MAPPED_KEEPFD = 16,
14
+ ALLOCATOR_MAPPED_FROMFD = 32,
15
+ ALLOCATOR_MAPPED_UNLINK = 64
16
+ };
17
+
18
+ // Sentinel value/type to help distinguish the file descriptor constructor from
19
+ // the non-file descriptor constructor
20
+ enum WithFd { WITH_FD };
21
+
22
+ TORCH_API std::string NewProcessWideShmHandle();
23
+
24
+ class TORCH_API MapAllocator {
25
+ public:
26
+ MapAllocator(c10::string_view filename, int flags, size_t size);
27
+ MapAllocator(
28
+ WithFd,
29
+ c10::string_view filename,
30
+ int fd,
31
+ int flags,
32
+ size_t size);
33
+ MapAllocator(const MapAllocator&) = delete;
34
+ MapAllocator& operator=(const MapAllocator&) = delete;
35
+ MapAllocator(MapAllocator&&) = delete;
36
+ MapAllocator& operator=(MapAllocator&&) = delete;
37
+
38
+ const char* filename() const {
39
+ return filename_.c_str();
40
+ }
41
+ int fd() const {
42
+ #ifdef _WIN32
43
+ TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows");
44
+ #else
45
+ return fd_;
46
+ #endif
47
+ }
48
+ ptrdiff_t size() const {
49
+ return size_;
50
+ }
51
+ // Return a pointer to the actual data for this allocator
52
+ // (in the case of the refcounted allocator, this is offset
53
+ // from the base pointer.)
54
+ virtual void* data() const {
55
+ return base_ptr_;
56
+ }
57
+
58
+ static MapAllocator* fromDataPtr(const at::DataPtr&);
59
+ static at::DataPtr makeDataPtr(
60
+ c10::string_view filename,
61
+ int flags,
62
+ size_t size,
63
+ size_t* actual_size_out);
64
+ static at::DataPtr makeDataPtr(
65
+ WithFd,
66
+ const char* filename,
67
+ int fd,
68
+ int flags,
69
+ size_t size,
70
+ size_t* actual_size_out);
71
+
72
+ // Closes the data. Helps us avoid destructor shenanigans
73
+ virtual void close();
74
+
75
+ // This is very dangerous. You have to redefine this destructor for each
76
+ // subclass
77
+ virtual ~MapAllocator();
78
+
79
+ protected:
80
+ bool closed_ = false;
81
+ std::string filename_;
82
+ int flags_ = 0;
83
+ ptrdiff_t size_; /* mapped size */
84
+ #ifdef _WIN32
85
+ void* handle_;
86
+ void* event_;
87
+ std::string eventname_;
88
+ #else
89
+ int fd_ = -1;
90
+ #endif
91
+ void* base_ptr_ = nullptr;
92
+ };
93
+
94
+ // Base-from-member idiom
95
+ struct TORCH_API RefcountedMapAllocatorArgCheck {
96
+ RefcountedMapAllocatorArgCheck(int flags);
97
+ };
98
+
99
+ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
100
+ public MapAllocator {
101
+ public:
102
+ RefcountedMapAllocator(const char* filename, int flags, size_t size);
103
+ RefcountedMapAllocator(
104
+ WithFd,
105
+ const char* filename,
106
+ int fd,
107
+ int flags,
108
+ size_t size);
109
+
110
+ static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
111
+ static at::DataPtr makeDataPtr(
112
+ const char* filename,
113
+ int flags,
114
+ size_t size,
115
+ size_t* actual_size_out);
116
+ static at::DataPtr makeDataPtr(
117
+ WithFd,
118
+ const char* filename,
119
+ int fd,
120
+ int flags,
121
+ size_t size,
122
+ size_t* actual_size_out);
123
+
124
+ void* data() const override;
125
+
126
+ void incref();
127
+ int decref();
128
+ void close() override;
129
+
130
+ ~RefcountedMapAllocator() override {
131
+ RefcountedMapAllocator::close();
132
+ }
133
+
134
+ protected:
135
+ void checkFlags();
136
+ void initializeAlloc();
137
+ };
138
+
139
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MatrixRef.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/Utils.h>
3
+ #include <c10/util/ArrayRef.h>
4
+
5
+ #include <vector>
6
+
7
+ namespace at {
8
+ /// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that
9
+ /// we can easily view it as a multidimensional array.
10
+ ///
11
+ /// Like ArrayRef, this class does not own the underlying data, it is expected
12
+ /// to be used in situations where the data resides in some other buffer.
13
+ ///
14
+ /// This is intended to be trivially copyable, so it should be passed by
15
+ /// value.
16
+ ///
17
+ /// For now, 2D only (so the copies are actually cheap, without having
18
+ /// to write a SmallVector class) and contiguous only (so we can
19
+ /// return non-strided ArrayRef on index).
20
+ ///
21
+ /// P.S. dimension 0 indexes rows, dimension 1 indexes columns
22
+ template <typename T>
23
+ class MatrixRef {
24
+ public:
25
+ typedef size_t size_type;
26
+
27
+ private:
28
+ /// Underlying ArrayRef
29
+ ArrayRef<T> arr;
30
+
31
+ /// Stride of dim 0 (outer dimension)
32
+ size_type stride0;
33
+
34
+ // Stride of dim 1 is assumed to be 1
35
+
36
+ public:
37
+ /// Construct an empty Matrixref.
38
+ /*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {}
39
+
40
+ /// Construct an MatrixRef from an ArrayRef and outer stride.
41
+ /*implicit*/ MatrixRef(ArrayRef<T> arr, size_type stride0)
42
+ : arr(arr), stride0(stride0) {
43
+ TORCH_CHECK(
44
+ arr.size() % stride0 == 0,
45
+ "MatrixRef: ArrayRef size ",
46
+ arr.size(),
47
+ " not divisible by stride ",
48
+ stride0)
49
+ }
50
+
51
+ /// @}
52
+ /// @name Simple Operations
53
+ /// @{
54
+
55
+ /// empty - Check if the matrix is empty.
56
+ bool empty() const {
57
+ return arr.empty();
58
+ }
59
+
60
+ const T* data() const {
61
+ return arr.data();
62
+ }
63
+
64
+ /// size - Get size a dimension
65
+ size_t size(size_t dim) const {
66
+ if (dim == 0) {
67
+ return arr.size() / stride0;
68
+ } else if (dim == 1) {
69
+ return stride0;
70
+ } else {
71
+ TORCH_CHECK(
72
+ 0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1");
73
+ }
74
+ }
75
+
76
+ size_t numel() const {
77
+ return arr.size();
78
+ }
79
+
80
+ /// equals - Check for element-wise equality.
81
+ bool equals(MatrixRef RHS) const {
82
+ return stride0 == RHS.stride0 && arr.equals(RHS.arr);
83
+ }
84
+
85
+ /// @}
86
+ /// @name Operator Overloads
87
+ /// @{
88
+ ArrayRef<T> operator[](size_t Index) const {
89
+ return arr.slice(Index * stride0, stride0);
90
+ }
91
+
92
+ /// Disallow accidental assignment from a temporary.
93
+ ///
94
+ /// The declaration here is extra complicated so that "arrayRef = {}"
95
+ /// continues to select the move assignment operator.
96
+ template <typename U>
97
+ std::enable_if_t<std::is_same_v<U, T>, MatrixRef<T>>& operator=(
98
+ U&& Temporary) = delete;
99
+
100
+ /// Disallow accidental assignment from a temporary.
101
+ ///
102
+ /// The declaration here is extra complicated so that "arrayRef = {}"
103
+ /// continues to select the move assignment operator.
104
+ template <typename U>
105
+ std::enable_if_t<std::is_same_v<U, T>, MatrixRef<T>>& operator=(
106
+ std::initializer_list<U>) = delete;
107
+ };
108
+
109
+ } // end namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MemoryOverlap.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace c10 {
6
+ struct TensorImpl;
7
+ }
8
+
9
+ namespace at {
10
+ class TensorBase;
11
+
12
+ // MemOverlap: Whether or not there is memory overlap
13
+ //
14
+ // No: Absolutely no memory overlap
15
+ // Yes: Absolutely yes memory overlap
16
+ // TooHard: There might be memory overlap, but it was too expensive to compute.
17
+ //
18
+ // NB: Please update the python test for these if you renumber them.
19
+ enum class MemOverlap { No, Yes, TooHard };
20
+
21
+ enum class MemOverlapStatus { Full, Partial, No, TooHard };
22
+
23
+ TORCH_API MemOverlap has_internal_overlap(const TensorBase& t);
24
+ TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t);
25
+
26
+ TORCH_API void assert_no_internal_overlap(const TensorBase& t);
27
+ TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t);
28
+
29
+ TORCH_API MemOverlapStatus
30
+ get_overlap_status(const TensorBase& a, const TensorBase& b);
31
+ TORCH_API MemOverlapStatus
32
+ get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b);
33
+
34
+ TORCH_API void assert_no_partial_overlap(
35
+ const TensorBase& a,
36
+ const TensorBase& b);
37
+ void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
38
+
39
+ TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b);
40
+ TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b);
41
+
42
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/MetaFunctions_inl.h ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
12
+ #error This change adds a dependency on all pytorch operators, meaning the \
13
+ file will need to be re-compiled every time an operator is changed or added. \
14
+ Consider including a specific operator from \
15
+ <ATen/ops/{my_operator}_meta_dispatch.h>. \
16
+ See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
17
+ #endif
18
+
19
+ #include <ATen/ops/_add_relu_meta_dispatch.h>
20
+ #include <ATen/ops/_addmm_activation_meta_dispatch.h>
21
+ #include <ATen/ops/_amp_update_scale_meta_dispatch.h>
22
+ #include <ATen/ops/_coalesced_meta_dispatch.h>
23
+ #include <ATen/ops/_convert_indices_from_coo_to_csr_meta_dispatch.h>
24
+ #include <ATen/ops/_convert_indices_from_csr_to_coo_meta_dispatch.h>
25
+ #include <ATen/ops/_ctc_loss_meta_dispatch.h>
26
+ #include <ATen/ops/_efficientzerotensor_meta_dispatch.h>
27
+ #include <ATen/ops/_fill_mem_eff_dropout_mask_meta_dispatch.h>
28
+ #include <ATen/ops/_fused_sdp_choice_meta_dispatch.h>
29
+ #include <ATen/ops/_index_put_impl_meta_dispatch.h>
30
+ #include <ATen/ops/_linalg_det_meta_dispatch.h>
31
+ #include <ATen/ops/_linalg_eigh_meta_dispatch.h>
32
+ #include <ATen/ops/_linalg_slogdet_meta_dispatch.h>
33
+ #include <ATen/ops/_linalg_solve_ex_meta_dispatch.h>
34
+ #include <ATen/ops/_linalg_svd_meta_dispatch.h>
35
+ #include <ATen/ops/_log_softmax_meta_dispatch.h>
36
+ #include <ATen/ops/_log_softmax_backward_data_meta_dispatch.h>
37
+ #include <ATen/ops/_mkldnn_transpose_meta_dispatch.h>
38
+ #include <ATen/ops/_reshape_alias_meta_dispatch.h>
39
+ #include <ATen/ops/_resize_output_meta_dispatch.h>
40
+ #include <ATen/ops/_softmax_meta_dispatch.h>
41
+ #include <ATen/ops/_softmax_backward_data_meta_dispatch.h>
42
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h>
43
+ #include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_meta_dispatch.h>
44
+ #include <ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h>
45
+ #include <ATen/ops/_upsample_bicubic2d_aa_backward_meta_dispatch.h>
46
+ #include <ATen/ops/_upsample_bilinear2d_aa_meta_dispatch.h>
47
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_meta_dispatch.h>
48
+ #include <ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h>
49
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_meta_dispatch.h>
50
+ #include <ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h>
51
+ #include <ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h>
52
+ #include <ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h>
53
+ #include <ATen/ops/_upsample_nearest_exact3d_backward_meta_dispatch.h>
54
+ #include <ATen/ops/acos_meta_dispatch.h>
55
+ #include <ATen/ops/acosh_meta_dispatch.h>
56
+ #include <ATen/ops/adaptive_max_pool2d_meta_dispatch.h>
57
+ #include <ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h>
58
+ #include <ATen/ops/adaptive_max_pool3d_meta_dispatch.h>
59
+ #include <ATen/ops/adaptive_max_pool3d_backward_meta_dispatch.h>
60
+ #include <ATen/ops/add_meta_dispatch.h>
61
+ #include <ATen/ops/addbmm_meta_dispatch.h>
62
+ #include <ATen/ops/addcdiv_meta_dispatch.h>
63
+ #include <ATen/ops/addcmul_meta_dispatch.h>
64
+ #include <ATen/ops/addmm_meta_dispatch.h>
65
+ #include <ATen/ops/addmv_meta_dispatch.h>
66
+ #include <ATen/ops/all_meta_dispatch.h>
67
+ #include <ATen/ops/amax_meta_dispatch.h>
68
+ #include <ATen/ops/amin_meta_dispatch.h>
69
+ #include <ATen/ops/aminmax_meta_dispatch.h>
70
+ #include <ATen/ops/any_meta_dispatch.h>
71
+ #include <ATen/ops/arange_meta_dispatch.h>
72
+ #include <ATen/ops/argmax_meta_dispatch.h>
73
+ #include <ATen/ops/argmin_meta_dispatch.h>
74
+ #include <ATen/ops/as_strided_meta_dispatch.h>
75
+ #include <ATen/ops/asin_meta_dispatch.h>
76
+ #include <ATen/ops/asinh_meta_dispatch.h>
77
+ #include <ATen/ops/atan_meta_dispatch.h>
78
+ #include <ATen/ops/atan2_meta_dispatch.h>
79
+ #include <ATen/ops/atanh_meta_dispatch.h>
80
+ #include <ATen/ops/avg_pool2d_meta_dispatch.h>
81
+ #include <ATen/ops/avg_pool2d_backward_meta_dispatch.h>
82
+ #include <ATen/ops/avg_pool3d_meta_dispatch.h>
83
+ #include <ATen/ops/avg_pool3d_backward_meta_dispatch.h>
84
+ #include <ATen/ops/baddbmm_meta_dispatch.h>
85
+ #include <ATen/ops/bernoulli_meta_dispatch.h>
86
+ #include <ATen/ops/bitwise_and_meta_dispatch.h>
87
+ #include <ATen/ops/bitwise_left_shift_meta_dispatch.h>
88
+ #include <ATen/ops/bitwise_not_meta_dispatch.h>
89
+ #include <ATen/ops/bitwise_or_meta_dispatch.h>
90
+ #include <ATen/ops/bitwise_right_shift_meta_dispatch.h>
91
+ #include <ATen/ops/bitwise_xor_meta_dispatch.h>
92
+ #include <ATen/ops/bmm_meta_dispatch.h>
93
+ #include <ATen/ops/cat_meta_dispatch.h>
94
+ #include <ATen/ops/cauchy_meta_dispatch.h>
95
+ #include <ATen/ops/ceil_meta_dispatch.h>
96
+ #include <ATen/ops/clamp_meta_dispatch.h>
97
+ #include <ATen/ops/clamp_max_meta_dispatch.h>
98
+ #include <ATen/ops/clamp_min_meta_dispatch.h>
99
+ #include <ATen/ops/copy_sparse_to_sparse_meta_dispatch.h>
100
+ #include <ATen/ops/copysign_meta_dispatch.h>
101
+ #include <ATen/ops/cos_meta_dispatch.h>
102
+ #include <ATen/ops/cosh_meta_dispatch.h>
103
+ #include <ATen/ops/cumprod_meta_dispatch.h>
104
+ #include <ATen/ops/cumsum_meta_dispatch.h>
105
+ #include <ATen/ops/digamma_meta_dispatch.h>
106
+ #include <ATen/ops/div_meta_dispatch.h>
107
+ #include <ATen/ops/elu_meta_dispatch.h>
108
+ #include <ATen/ops/elu_backward_meta_dispatch.h>
109
+ #include <ATen/ops/embedding_renorm_meta_dispatch.h>
110
+ #include <ATen/ops/empty_meta_dispatch.h>
111
+ #include <ATen/ops/empty_strided_meta_dispatch.h>
112
+ #include <ATen/ops/eq_meta_dispatch.h>
113
+ #include <ATen/ops/erf_meta_dispatch.h>
114
+ #include <ATen/ops/erfc_meta_dispatch.h>
115
+ #include <ATen/ops/erfinv_meta_dispatch.h>
116
+ #include <ATen/ops/exp_meta_dispatch.h>
117
+ #include <ATen/ops/exp2_meta_dispatch.h>
118
+ #include <ATen/ops/expm1_meta_dispatch.h>
119
+ #include <ATen/ops/exponential_meta_dispatch.h>
120
+ #include <ATen/ops/eye_meta_dispatch.h>
121
+ #include <ATen/ops/fill_meta_dispatch.h>
122
+ #include <ATen/ops/floor_meta_dispatch.h>
123
+ #include <ATen/ops/floor_divide_meta_dispatch.h>
124
+ #include <ATen/ops/fmax_meta_dispatch.h>
125
+ #include <ATen/ops/fmin_meta_dispatch.h>
126
+ #include <ATen/ops/fmod_meta_dispatch.h>
127
+ #include <ATen/ops/frac_meta_dispatch.h>
128
+ #include <ATen/ops/fractional_max_pool2d_meta_dispatch.h>
129
+ #include <ATen/ops/fractional_max_pool2d_backward_meta_dispatch.h>
130
+ #include <ATen/ops/fractional_max_pool3d_meta_dispatch.h>
131
+ #include <ATen/ops/gather_meta_dispatch.h>
132
+ #include <ATen/ops/gcd_meta_dispatch.h>
133
+ #include <ATen/ops/ge_meta_dispatch.h>
134
+ #include <ATen/ops/gelu_meta_dispatch.h>
135
+ #include <ATen/ops/gelu_backward_meta_dispatch.h>
136
+ #include <ATen/ops/geometric_meta_dispatch.h>
137
+ #include <ATen/ops/glu_meta_dispatch.h>
138
+ #include <ATen/ops/gt_meta_dispatch.h>
139
+ #include <ATen/ops/hardshrink_meta_dispatch.h>
140
+ #include <ATen/ops/hardshrink_backward_meta_dispatch.h>
141
+ #include <ATen/ops/hardsigmoid_meta_dispatch.h>
142
+ #include <ATen/ops/hardsigmoid_backward_meta_dispatch.h>
143
+ #include <ATen/ops/hardswish_meta_dispatch.h>
144
+ #include <ATen/ops/hardtanh_meta_dispatch.h>
145
+ #include <ATen/ops/heaviside_meta_dispatch.h>
146
+ #include <ATen/ops/hypot_meta_dispatch.h>
147
+ #include <ATen/ops/i0_meta_dispatch.h>
148
+ #include <ATen/ops/igamma_meta_dispatch.h>
149
+ #include <ATen/ops/igammac_meta_dispatch.h>
150
+ #include <ATen/ops/index_meta_dispatch.h>
151
+ #include <ATen/ops/index_add_meta_dispatch.h>
152
+ #include <ATen/ops/index_copy_meta_dispatch.h>
153
+ #include <ATen/ops/index_fill_meta_dispatch.h>
154
+ #include <ATen/ops/index_reduce_meta_dispatch.h>
155
+ #include <ATen/ops/isin_meta_dispatch.h>
156
+ #include <ATen/ops/isneginf_meta_dispatch.h>
157
+ #include <ATen/ops/isposinf_meta_dispatch.h>
158
+ #include <ATen/ops/lcm_meta_dispatch.h>
159
+ #include <ATen/ops/le_meta_dispatch.h>
160
+ #include <ATen/ops/leaky_relu_meta_dispatch.h>
161
+ #include <ATen/ops/leaky_relu_backward_meta_dispatch.h>
162
+ #include <ATen/ops/lerp_meta_dispatch.h>
163
+ #include <ATen/ops/lgamma_meta_dispatch.h>
164
+ #include <ATen/ops/linalg_cholesky_ex_meta_dispatch.h>
165
+ #include <ATen/ops/linalg_cross_meta_dispatch.h>
166
+ #include <ATen/ops/linalg_inv_ex_meta_dispatch.h>
167
+ #include <ATen/ops/linalg_ldl_factor_ex_meta_dispatch.h>
168
+ #include <ATen/ops/linalg_ldl_solve_meta_dispatch.h>
169
+ #include <ATen/ops/linalg_lu_meta_dispatch.h>
170
+ #include <ATen/ops/linalg_lu_factor_ex_meta_dispatch.h>
171
+ #include <ATen/ops/linalg_lu_solve_meta_dispatch.h>
172
+ #include <ATen/ops/linalg_qr_meta_dispatch.h>
173
+ #include <ATen/ops/linalg_vector_norm_meta_dispatch.h>
174
+ #include <ATen/ops/linspace_meta_dispatch.h>
175
+ #include <ATen/ops/log_meta_dispatch.h>
176
+ #include <ATen/ops/log10_meta_dispatch.h>
177
+ #include <ATen/ops/log1p_meta_dispatch.h>
178
+ #include <ATen/ops/log2_meta_dispatch.h>
179
+ #include <ATen/ops/log_normal_meta_dispatch.h>
180
+ #include <ATen/ops/logaddexp_meta_dispatch.h>
181
+ #include <ATen/ops/logaddexp2_meta_dispatch.h>
182
+ #include <ATen/ops/logit_meta_dispatch.h>
183
+ #include <ATen/ops/logit_backward_meta_dispatch.h>
184
+ #include <ATen/ops/logspace_meta_dispatch.h>
185
+ #include <ATen/ops/lshift_meta_dispatch.h>
186
+ #include <ATen/ops/lt_meta_dispatch.h>
187
+ #include <ATen/ops/lu_unpack_meta_dispatch.h>
188
+ #include <ATen/ops/masked_fill_meta_dispatch.h>
189
+ #include <ATen/ops/masked_scatter_meta_dispatch.h>
190
+ #include <ATen/ops/max_meta_dispatch.h>
191
+ #include <ATen/ops/max_pool2d_with_indices_meta_dispatch.h>
192
+ #include <ATen/ops/max_pool2d_with_indices_backward_meta_dispatch.h>
193
+ #include <ATen/ops/maximum_meta_dispatch.h>
194
+ #include <ATen/ops/mean_meta_dispatch.h>
195
+ #include <ATen/ops/min_meta_dispatch.h>
196
+ #include <ATen/ops/minimum_meta_dispatch.h>
197
+ #include <ATen/ops/mish_meta_dispatch.h>
198
+ #include <ATen/ops/mm_meta_dispatch.h>
199
+ #include <ATen/ops/mse_loss_meta_dispatch.h>
200
+ #include <ATen/ops/mul_meta_dispatch.h>
201
+ #include <ATen/ops/ne_meta_dispatch.h>
202
+ #include <ATen/ops/neg_meta_dispatch.h>
203
+ #include <ATen/ops/nextafter_meta_dispatch.h>
204
+ #include <ATen/ops/nll_loss_backward_meta_dispatch.h>
205
+ #include <ATen/ops/nll_loss_forward_meta_dispatch.h>
206
+ #include <ATen/ops/norm_meta_dispatch.h>
207
+ #include <ATen/ops/normal_meta_dispatch.h>
208
+ #include <ATen/ops/polygamma_meta_dispatch.h>
209
+ #include <ATen/ops/pow_meta_dispatch.h>
210
+ #include <ATen/ops/prod_meta_dispatch.h>
211
+ #include <ATen/ops/put_meta_dispatch.h>
212
+ #include <ATen/ops/random_meta_dispatch.h>
213
+ #include <ATen/ops/range_meta_dispatch.h>
214
+ #include <ATen/ops/reciprocal_meta_dispatch.h>
215
+ #include <ATen/ops/reflection_pad1d_meta_dispatch.h>
216
+ #include <ATen/ops/reflection_pad1d_backward_meta_dispatch.h>
217
+ #include <ATen/ops/reflection_pad3d_meta_dispatch.h>
218
+ #include <ATen/ops/reflection_pad3d_backward_meta_dispatch.h>
219
+ #include <ATen/ops/relu_meta_dispatch.h>
220
+ #include <ATen/ops/remainder_meta_dispatch.h>
221
+ #include <ATen/ops/renorm_meta_dispatch.h>
222
+ #include <ATen/ops/replication_pad1d_meta_dispatch.h>
223
+ #include <ATen/ops/replication_pad1d_backward_meta_dispatch.h>
224
+ #include <ATen/ops/replication_pad2d_meta_dispatch.h>
225
+ #include <ATen/ops/replication_pad3d_meta_dispatch.h>
226
+ #include <ATen/ops/resize_meta_dispatch.h>
227
+ #include <ATen/ops/resize_as_sparse_meta_dispatch.h>
228
+ #include <ATen/ops/round_meta_dispatch.h>
229
+ #include <ATen/ops/rrelu_with_noise_meta_dispatch.h>
230
+ #include <ATen/ops/rshift_meta_dispatch.h>
231
+ #include <ATen/ops/rsqrt_meta_dispatch.h>
232
+ #include <ATen/ops/scatter_meta_dispatch.h>
233
+ #include <ATen/ops/scatter_add_meta_dispatch.h>
234
+ #include <ATen/ops/scatter_reduce_meta_dispatch.h>
235
+ #include <ATen/ops/set_meta_dispatch.h>
236
+ #include <ATen/ops/sgn_meta_dispatch.h>
237
+ #include <ATen/ops/sigmoid_meta_dispatch.h>
238
+ #include <ATen/ops/sigmoid_backward_meta_dispatch.h>
239
+ #include <ATen/ops/sign_meta_dispatch.h>
240
+ #include <ATen/ops/signbit_meta_dispatch.h>
241
+ #include <ATen/ops/silu_meta_dispatch.h>
242
+ #include <ATen/ops/silu_backward_meta_dispatch.h>
243
+ #include <ATen/ops/sin_meta_dispatch.h>
244
+ #include <ATen/ops/sinc_meta_dispatch.h>
245
+ #include <ATen/ops/sinh_meta_dispatch.h>
246
+ #include <ATen/ops/slow_conv_transpose2d_meta_dispatch.h>
247
+ #include <ATen/ops/smooth_l1_loss_meta_dispatch.h>
248
+ #include <ATen/ops/softplus_meta_dispatch.h>
249
+ #include <ATen/ops/softplus_backward_meta_dispatch.h>
250
+ #include <ATen/ops/softshrink_meta_dispatch.h>
251
+ #include <ATen/ops/softshrink_backward_meta_dispatch.h>
252
+ #include <ATen/ops/sort_meta_dispatch.h>
253
+ #include <ATen/ops/sparse_resize_meta_dispatch.h>
254
+ #include <ATen/ops/sparse_resize_and_clear_meta_dispatch.h>
255
+ #include <ATen/ops/special_airy_ai_meta_dispatch.h>
256
+ #include <ATen/ops/special_bessel_j0_meta_dispatch.h>
257
+ #include <ATen/ops/special_bessel_j1_meta_dispatch.h>
258
+ #include <ATen/ops/special_bessel_y0_meta_dispatch.h>
259
+ #include <ATen/ops/special_bessel_y1_meta_dispatch.h>
260
+ #include <ATen/ops/special_chebyshev_polynomial_t_meta_dispatch.h>
261
+ #include <ATen/ops/special_chebyshev_polynomial_u_meta_dispatch.h>
262
+ #include <ATen/ops/special_chebyshev_polynomial_v_meta_dispatch.h>
263
+ #include <ATen/ops/special_chebyshev_polynomial_w_meta_dispatch.h>
264
+ #include <ATen/ops/special_entr_meta_dispatch.h>
265
+ #include <ATen/ops/special_erfcx_meta_dispatch.h>
266
+ #include <ATen/ops/special_hermite_polynomial_h_meta_dispatch.h>
267
+ #include <ATen/ops/special_hermite_polynomial_he_meta_dispatch.h>
268
+ #include <ATen/ops/special_i0e_meta_dispatch.h>
269
+ #include <ATen/ops/special_i1_meta_dispatch.h>
270
+ #include <ATen/ops/special_i1e_meta_dispatch.h>
271
+ #include <ATen/ops/special_laguerre_polynomial_l_meta_dispatch.h>
272
+ #include <ATen/ops/special_legendre_polynomial_p_meta_dispatch.h>
273
+ #include <ATen/ops/special_log_ndtr_meta_dispatch.h>
274
+ #include <ATen/ops/special_modified_bessel_i0_meta_dispatch.h>
275
+ #include <ATen/ops/special_modified_bessel_i1_meta_dispatch.h>
276
+ #include <ATen/ops/special_modified_bessel_k0_meta_dispatch.h>
277
+ #include <ATen/ops/special_modified_bessel_k1_meta_dispatch.h>
278
+ #include <ATen/ops/special_ndtri_meta_dispatch.h>
279
+ #include <ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h>
280
+ #include <ATen/ops/special_scaled_modified_bessel_k1_meta_dispatch.h>
281
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_t_meta_dispatch.h>
282
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h>
283
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_v_meta_dispatch.h>
284
+ #include <ATen/ops/special_shifted_chebyshev_polynomial_w_meta_dispatch.h>
285
+ #include <ATen/ops/special_spherical_bessel_j0_meta_dispatch.h>
286
+ #include <ATen/ops/special_xlog1py_meta_dispatch.h>
287
+ #include <ATen/ops/special_zeta_meta_dispatch.h>
288
+ #include <ATen/ops/sqrt_meta_dispatch.h>
289
+ #include <ATen/ops/sub_meta_dispatch.h>
290
+ #include <ATen/ops/sum_meta_dispatch.h>
291
+ #include <ATen/ops/tan_meta_dispatch.h>
292
+ #include <ATen/ops/tanh_meta_dispatch.h>
293
+ #include <ATen/ops/tanh_backward_meta_dispatch.h>
294
+ #include <ATen/ops/threshold_meta_dispatch.h>
295
+ #include <ATen/ops/threshold_backward_meta_dispatch.h>
296
+ #include <ATen/ops/topk_meta_dispatch.h>
297
+ #include <ATen/ops/triangular_solve_meta_dispatch.h>
298
+ #include <ATen/ops/tril_meta_dispatch.h>
299
+ #include <ATen/ops/triu_meta_dispatch.h>
300
+ #include <ATen/ops/trunc_meta_dispatch.h>
301
+ #include <ATen/ops/unfold_meta_dispatch.h>
302
+ #include <ATen/ops/uniform_meta_dispatch.h>
303
+ #include <ATen/ops/upsample_bicubic2d_meta_dispatch.h>
304
+ #include <ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h>
305
+ #include <ATen/ops/upsample_bilinear2d_meta_dispatch.h>
306
+ #include <ATen/ops/upsample_bilinear2d_backward_meta_dispatch.h>
307
+ #include <ATen/ops/upsample_linear1d_meta_dispatch.h>
308
+ #include <ATen/ops/upsample_linear1d_backward_meta_dispatch.h>
309
+ #include <ATen/ops/upsample_nearest1d_meta_dispatch.h>
310
+ #include <ATen/ops/upsample_nearest1d_backward_meta_dispatch.h>
311
+ #include <ATen/ops/upsample_nearest2d_meta_dispatch.h>
312
+ #include <ATen/ops/upsample_nearest2d_backward_meta_dispatch.h>
313
+ #include <ATen/ops/upsample_nearest3d_meta_dispatch.h>
314
+ #include <ATen/ops/upsample_nearest3d_backward_meta_dispatch.h>
315
+ #include <ATen/ops/upsample_trilinear3d_meta_dispatch.h>
316
+ #include <ATen/ops/upsample_trilinear3d_backward_meta_dispatch.h>
317
+ #include <ATen/ops/view_meta_dispatch.h>
318
+ #include <ATen/ops/view_as_complex_meta_dispatch.h>
319
+ #include <ATen/ops/view_as_real_meta_dispatch.h>
320
+ #include <ATen/ops/xlogy_meta_dispatch.h>
321
+ #include <ATen/ops/zero_meta_dispatch.h>
322
+
323
+
324
+
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NamedTensorUtils.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/NamedTensor.h>
3
+ #include <ATen/TensorNames.h>
4
+ #include <ATen/WrapDimUtilsMulti.h>
5
+
6
+ #include <ATen/core/DimVector.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <functional>
9
+
10
+ namespace at {
11
+
12
+ using NameVector = SmallVector<Dimname, kDimVectorStaticSize>;
13
+
14
+ inline bool has_names(const ITensorListRef& tensors) {
15
+ return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) {
16
+ return t.has_names();
17
+ });
18
+ }
19
+
20
+ // Converts dim to an positional index. Errors if `dim` cannot be used to
21
+ // refer to any dimension of tensor.
22
+ TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
23
+ TORCH_API std::vector<int64_t> dimnames_to_positions(
24
+ const Tensor& tensor,
25
+ DimnameList dims);
26
+
27
+ // Unifies two DimnameList to produce a third. This is useful for implementing
28
+ // the named inference rule for binary broadcasting operations like add.
29
+ //
30
+ // There are three main constraints:
31
+ // 1) Check matching: Names must match positionally from the right.
32
+ // 2) Check misaligned: If a name `n` is in `names`, then it must appear at
33
+ // the same index from the right in other.
34
+ // 3) The output names are obtained by unifying the names individually from the
35
+ // right.
36
+ TORCH_API std::vector<Dimname> unify_from_right(
37
+ DimnameList names,
38
+ DimnameList other,
39
+ const char* action = "broadcast");
40
+
41
+ [[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
42
+ TORCH_CHECK(
43
+ false,
44
+ op_name,
45
+ ": You passed a dimname (string) to this op in place of a dimension "
46
+ "index but it does not yet support this behavior. Please pass a dimension "
47
+ "index to work around this.");
48
+ }
49
+
50
+ // [NOTE] Writing name inference rules
51
+ //
52
+ // Operators that support named tensors are either composed of operations that
53
+ // support named tensors or implement some name inference rule. An op that
54
+ // implements its own name inference rule generally looks like the following:
55
+ //
56
+ // Tensor op(...) {
57
+ // perform_shape_checks(...);
58
+ // # (1)
59
+ // auto maybe_outnames = compute_outnames(...);
60
+ // auto result = [&]() {
61
+ // NoNamesGuard guard;
62
+ // return op_impl(...);
63
+ // }();
64
+ // # (2)
65
+ // propagate_names_if_nonempty(result, maybe_outnames);
66
+ //
67
+ // Each op has (1) a compute outnames step and (2) a propagate names step.
68
+ //
69
+ // compute_outnames is responsible for checking that input names match and
70
+ // determining what the output names should be. It returns either:
71
+ // - {} (if the inputs tensors are all unnamed)
72
+ // - non-empty outnames.
73
+ //
74
+ // propagate_names_if_nonempty propagates the outnames if they exist to the
75
+ // result tensors.
76
+ //
77
+ // The {} case is an optimization; if the user does not use named tensors they
78
+ // pay no perf cost for it.
79
+
80
+ namespace namedinference {
81
+
82
+ const Tensor& propagate_names_if_present_and_nonempty(
83
+ const Tensor& result,
84
+ c10::optional<DimnameList> maybe_names,
85
+ bool validate_names = false);
86
+ // Propagates `names` to `result` if `names` is not empty.
87
+ // `names` can be empty; see [NOTE] Writing name inference rules
88
+ // If `names` is not empty, `names.size()` should equal `result.dim()`.
89
+ // When in doubt, use this overload instead of the others.
90
+ TORCH_API const Tensor& propagate_names_if_nonempty(
91
+ const Tensor& result,
92
+ DimnameList maybe_names,
93
+ bool validate_names = false);
94
+
95
+ // Propagates `names` to `result`. Only use this if we are certain that there
96
+ // are names to propagate (that names is not empty).
97
+ TORCH_API const Tensor& propagate_names(
98
+ const Tensor& result,
99
+ DimnameList names,
100
+ bool validate_names = false);
101
+
102
+ // Propagates all names from src to result.
103
+ TORCH_API void propagate_names(const Tensor& result, const Tensor& src);
104
+
105
+ // Propagates all names except for those at the excluded_idxs.
106
+ TORCH_API void propagate_names_except(
107
+ const Tensor& result,
108
+ const Tensor& src,
109
+ IntArrayRef excluded_idxs);
110
+
111
+ // Used for reduction ops that have a `keepdim` arg.
112
+ TORCH_API void propagate_names_for_reduction(
113
+ const Tensor& result,
114
+ const Tensor& src,
115
+ IntArrayRef excluded_idxs,
116
+ bool keepdim);
117
+
118
+ TORCH_API void propagate_names_for_expand(
119
+ const Tensor& result,
120
+ const Tensor& self);
121
+
122
+ TORCH_API std::vector<Dimname> compute_cat_outnames(
123
+ const MaterializedITensorListRef& tensors);
124
+
125
+ TORCH_API std::vector<Dimname> compute_broadcast_outnames(
126
+ const Tensor& self,
127
+ const Tensor& other);
128
+
129
+ TORCH_API std::vector<Dimname> broadcast_to_outnames(
130
+ const Tensor& tensor,
131
+ const Tensor& reference_tensor,
132
+ const char* op_name);
133
+
134
+ TORCH_API std::vector<Dimname> compute_matmul_outnames(
135
+ const Tensor& self,
136
+ const Tensor& other);
137
+
138
+ TORCH_API std::vector<Dimname> compute_cdist_outnames(
139
+ const Tensor& self,
140
+ const Tensor& other);
141
+
142
+ TORCH_API std::vector<Dimname> compute_bmm_outnames(
143
+ const Tensor& result,
144
+ const Tensor& self,
145
+ const Tensor& other);
146
+
147
+ TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
148
+ TORCH_API std::vector<Dimname> compute_squeeze_outnames(
149
+ const Tensor& tensor,
150
+ std::bitset<dim_bitset_size> dims);
151
+
152
+ std::vector<Dimname> compute_diagonal_outnames(
153
+ const Tensor& tensor,
154
+ int64_t dim1,
155
+ int64_t dim2);
156
+
157
+ // TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
158
+
159
+ TORCH_API TensorImpl* propagate_names_if_nonempty(
160
+ TensorImpl* result,
161
+ DimnameList maybe_names,
162
+ bool validate_names = false);
163
+
164
+ TORCH_API TensorImpl* propagate_names(
165
+ TensorImpl* result,
166
+ DimnameList names,
167
+ bool validate_names = false);
168
+
169
+ TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
170
+
171
+ TORCH_API inline void propagate_names(
172
+ const TensorBase& result,
173
+ DimnameList names,
174
+ bool validate_names = false) {
175
+ propagate_names(result.unsafeGetTensorImpl(), names, validate_names);
176
+ }
177
+
178
+ TORCH_API inline void propagate_names_if_nonempty(
179
+ const TensorBase& result,
180
+ DimnameList names,
181
+ bool validate_names = false) {
182
+ propagate_names_if_nonempty(
183
+ result.unsafeGetTensorImpl(), names, validate_names);
184
+ }
185
+
186
+ TORCH_API inline void propagate_names(
187
+ const TensorBase& result,
188
+ const TensorBase& src) {
189
+ propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl());
190
+ }
191
+
192
+ // result = m1 @ m2 + bias
193
+ TORCH_API std::vector<Dimname> propagate_names_for_addmm(
194
+ const Tensor& m1,
195
+ const Tensor& m2,
196
+ const Tensor& bias);
197
+
198
+ TORCH_API std::vector<Dimname> propagate_names_for_addmv(
199
+ const Tensor& mat,
200
+ const Tensor& vec,
201
+ const Tensor& bias);
202
+
203
+ TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
204
+
205
+ TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
206
+ const Tensor& result,
207
+ const Tensor& self,
208
+ const Tensor& other,
209
+ const Tensor& bias);
210
+
211
+ TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
212
+
213
+ } // namespace namedinference
214
+
215
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NestedTensorImpl.h ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/MemoryOverlap.h>
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/core/DispatchKeySet.h>
6
+ #include <c10/core/MemoryFormat.h>
7
+ #include <c10/core/TensorImpl.h>
8
+ #include <c10/util/ArrayRef.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/util/Metaprogramming.h>
11
+ #include <c10/util/irange.h>
12
+
13
+ namespace at::native {
14
+ struct NestedTensorImpl;
15
+ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt);
16
+ int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor);
17
+
18
+ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
19
+ explicit NestedTensorImpl(
20
+ Storage storage,
21
+ c10::DispatchKeySet key_set,
22
+ const caffe2::TypeMeta data_type,
23
+ at::Tensor nested_sizes,
24
+ at::Tensor nested_strides,
25
+ at::Tensor storage_offsets);
26
+
27
+ explicit NestedTensorImpl(
28
+ const at::Tensor& buffer,
29
+ at::Tensor nested_sizes,
30
+ at::Tensor nested_strides,
31
+ at::Tensor storage_offsets);
32
+ // assume contiguous, `nested_strides` and `offsets`
33
+ // can be infered from `nested_sizes`
34
+ explicit NestedTensorImpl(
35
+ const at::Tensor& buffer,
36
+ const at::Tensor& nested_sizes);
37
+
38
+ // This constructor is used creating view tensors from nested tensors
39
+ explicit NestedTensorImpl(
40
+ c10::TensorImpl::ImplType impl_type,
41
+ const at::Tensor& base_tensor,
42
+ at::Tensor nested_sizes,
43
+ at::Tensor nested_strides,
44
+ at::Tensor storage_offsets);
45
+
46
+ // TODO: don't expose private implementation details like this; in
47
+ // particular, resizing this tensor will mess up our dim() and
48
+ // callers cannot fix it.
49
+ const Tensor& get_nested_sizes() const {
50
+ return nested_sizes_;
51
+ }
52
+ // TODO: don't expose private implementation details like this
53
+ const Tensor& get_nested_strides() const {
54
+ return nested_strides_;
55
+ }
56
+ const Tensor& get_storage_offsets() const {
57
+ return storage_offsets_;
58
+ }
59
+ // Returns nullopt if the ith dimension is irregular. The ith dimension
60
+ // of a NestedTensor is regular if the unbound tensors match in
61
+ // size at the (i-1)th dimension.
62
+ c10::optional<int64_t> opt_size(int64_t d) const;
63
+
64
+ int64_t size(int64_t d) const {
65
+ c10::optional<int64_t> optional_size = this->opt_size(d);
66
+ TORCH_CHECK(
67
+ optional_size.has_value(),
68
+ "Given dimension ",
69
+ d,
70
+ " is irregular and does not have a size.");
71
+ return *optional_size;
72
+ }
73
+ /**
74
+ * Return a view of the nested tensor as a 1 dimensional contiguous tensor.
75
+ *
76
+ * The buffer tensor created by this function shares the same storage_impl as
77
+ * the original nested tensor, and therefore can be seen as a view.
78
+ *
79
+ * @return A newly constructed view tensor
80
+ */
81
+ at::Tensor get_buffer() const {
82
+ TORCH_CHECK(
83
+ nested_tensor_impl_is_contiguous(this),
84
+ "NestedTensor must be contiguous to get buffer.");
85
+ return get_unsafe_storage_as_tensor();
86
+ }
87
+ /**
88
+ * If possible use get_buffer() instead. This function returns the storage
89
+ * as a tensor directly, which is not safe to use in general. If using this
90
+ * function, The caller must ensure to account for nested_sizes,
91
+ * nested_strides and storage_offsets.
92
+ *
93
+ * @return A newly constructed view tensor
94
+ */
95
+ at::Tensor get_unsafe_storage_as_tensor() const {
96
+ auto buffer_key_set_ = generate_buffer_key_set();
97
+ const auto buffer_size = get_buffer_size();
98
+ auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
99
+ c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
100
+ buffer_tensor_impl->set_sizes_contiguous(
101
+ c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
102
+ return Tensor(buffer_tensor_impl);
103
+ }
104
+
105
+ size_t get_buffer_size() const {
106
+ return storage_.nbytes() / data_type_.itemsize();
107
+ }
108
+
109
+ protected:
110
+ const char* tensorimpl_type_name() const override;
111
+
112
+ // TODO: numel_custom and is_contiguous_custom can be profitably overridden
113
+ // with real implementations
114
+ int64_t numel_custom() const override;
115
+ c10::SymInt sym_numel_custom() const override;
116
+ bool is_contiguous_custom(MemoryFormat) const override;
117
+ int64_t size_custom(int64_t d) const override {
118
+ return this->size(d);
119
+ }
120
+ c10::SymInt sym_size_custom(int64_t d) const override {
121
+ return c10::SymInt{this->size(d)};
122
+ }
123
+ IntArrayRef sizes_custom() const override;
124
+ c10::SymIntArrayRef sym_sizes_custom() const override;
125
+ IntArrayRef strides_custom() const override;
126
+ c10::SymIntArrayRef sym_strides_custom() const override;
127
+
128
+ // this one is real
129
+ int64_t dim_custom() const override;
130
+
131
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
132
+ const c10::VariableVersion& version_counter,
133
+ bool allow_tensor_metadata_change) const override;
134
+
135
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
136
+ c10::VariableVersion&& version_counter,
137
+ bool allow_tensor_metadata_change) const override;
138
+
139
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
140
+ copy_tensor_metadata(
141
+ /*src_impl=*/impl.get(),
142
+ /*dest_impl=*/this,
143
+ /*version_counter=*/version_counter(),
144
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
145
+ }
146
+
147
+ private:
148
+ // Must be called after any changes to our dim() to sync the state
149
+ // to TensorImpl.
150
+ void refresh_dim();
151
+
152
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
153
+ const at::Tensor nested_sizes_, nested_strides_;
154
+ // The starting positions of the underlying tensors in contiguous buffer
155
+ // i.e. the buffer memory offsets to get the underlying tensors
156
+ // The reason to keep this metadata is that, without strong enough constraint
157
+ // it cannot be derived from `nested_sizes_`
158
+ // and `nested_strides_`:
159
+ // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2]
160
+ // this can happen e.g. after slicing a nested tensor
161
+ // 2. when multiple tensors share a same memory
162
+ // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2]
163
+ // Some strong enough constraints are:
164
+ // 1. every underlying tensor is contiguous in memory
165
+ // && nesting in ascending order
166
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
167
+ const at::Tensor storage_offsets_;
168
+ // NOTE: -1 here means the size is missing
169
+ // Optional to allow it to be computed lazily from nested.
170
+ // TODO: maybe we can remove this metadata since
171
+ // we can compute it from `nested_sizes_`
172
+ mutable c10::optional<std::vector<int64_t>> opt_sizes_;
173
+
174
+ template <typename VariableVersion>
175
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
176
+ VariableVersion&& version_counter,
177
+ bool allow_tensor_metadata_change) const;
178
+
179
+ /**
180
+ * Generates a non-nested key_set from a nested tensor.
181
+ *
182
+ * For many nested tensor kernel implementations a buffer tensor
183
+ * is generated and redispatched to a non-nested kernel this function
184
+ * generates the key set used by that buffer tensor
185
+ *
186
+ * @return Appropriate key set for non-nested tensor
187
+ */
188
+ inline c10::DispatchKeySet generate_buffer_key_set() const {
189
+ auto buffer_key_set = this->key_set();
190
+ const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset);
191
+ // Remove nested tensor specific keys
192
+ buffer_key_set = buffer_key_set -
193
+ c10::DispatchKeySet{
194
+ c10::DispatchKey::NestedTensor,
195
+ c10::DispatchKey::AutogradNestedTensor};
196
+
197
+ // Add dense tensor specific keys
198
+ buffer_key_set =
199
+ buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense};
200
+ buffer_key_set = Autograd
201
+ ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set
202
+ : buffer_key_set;
203
+
204
+ return buffer_key_set;
205
+ }
206
+ };
207
+
208
+ inline NestedTensorImpl* get_nested_tensor_impl_or_null(
209
+ const at::Tensor& tensor) {
210
+ if (tensor.is_nested()) {
211
+ return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
212
+ }
213
+ return nullptr;
214
+ }
215
+
216
+ inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) {
217
+ TORCH_CHECK(
218
+ tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor.");
219
+ return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
220
+ }
221
+
222
+ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
223
+ int64_t ntensors = nt->size(0);
224
+ if (ntensors == 0) {
225
+ return true;
226
+ }
227
+ const Tensor &sizemat = nt->get_nested_sizes(),
228
+ &stridemat = nt->get_nested_strides();
229
+ int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr<int64_t>();
230
+ int64_t orig_dim = sizemat.size(1);
231
+ // nesting scalars
232
+ if (orig_dim == 0) {
233
+ // each scalar must be contiguous
234
+ // if there is blank memory between underlying scalars
235
+ for (int64_t i = 0; i < ntensors; i++) {
236
+ if (offsets_ptr[i] != i) {
237
+ return false;
238
+ }
239
+ }
240
+ }
241
+ // nesting tensors
242
+ else {
243
+ // if any underlying tensor is non-contiguous
244
+ const int64_t *sizemat_ptr = sizemat.data_ptr<int64_t>(),
245
+ *stridemat_ptr = stridemat.data_ptr<int64_t>();
246
+ for (int64_t i = 0; i < ntensors; i++) {
247
+ if (stridemat_ptr[orig_dim - 1] != 1) {
248
+ return false;
249
+ }
250
+ int64_t product = sizemat_ptr[orig_dim - 1];
251
+ for (int64_t j = orig_dim - 2; j >= 0; j--) {
252
+ if (stridemat_ptr[j] != product) {
253
+ return false;
254
+ }
255
+ product *= sizemat_ptr[j];
256
+ }
257
+ sizemat_ptr += orig_dim;
258
+ stridemat_ptr += orig_dim;
259
+ }
260
+ // if there is blank memory between underlying tensors
261
+ if (offsets_ptr[0] != 0) {
262
+ return false;
263
+ }
264
+ sizemat_ptr = sizemat.data_ptr<int64_t>();
265
+ stridemat_ptr = stridemat.data_ptr<int64_t>();
266
+ for (int64_t i = 1; i < ntensors; i++) {
267
+ if (offsets_ptr[i] !=
268
+ offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) {
269
+ return false;
270
+ }
271
+ sizemat_ptr += orig_dim;
272
+ stridemat_ptr += orig_dim;
273
+ }
274
+ }
275
+ // everything is fine
276
+ return true;
277
+ }
278
+
279
+ inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) {
280
+ return get_nested_tensor_impl(tensor)->get_nested_sizes();
281
+ }
282
+
283
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/NumericUtils.h ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef __HIPCC__
4
+ #include <hip/hip_runtime.h>
5
+ #endif
6
+
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/BFloat16.h>
9
+ #include <c10/util/Float8_e4m3fn.h>
10
+ #include <c10/util/Float8_e4m3fnuz.h>
11
+ #include <c10/util/Float8_e5m2.h>
12
+ #include <c10/util/Float8_e5m2fnuz.h>
13
+ #include <c10/util/Half.h>
14
+ #include <c10/util/complex.h>
15
+
16
+ #include <cmath>
17
+ #include <type_traits>
18
+
19
+ namespace at {
20
+
21
+ // std::isnan isn't performant to use on integral types; it will
22
+ // (uselessly) convert to floating point and then do the test.
23
+ // This function is.
24
+
25
+ template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
26
+ inline C10_HOST_DEVICE bool _isnan(T /*val*/) {
27
+ return false;
28
+ }
29
+
30
+ template <typename T, std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
31
+ inline C10_HOST_DEVICE bool _isnan(T val) {
32
+ #if defined(__CUDACC__) || defined(__HIPCC__)
33
+ return ::isnan(val);
34
+ #else
35
+ return std::isnan(val);
36
+ #endif
37
+ }
38
+
39
+ template <typename T, std::enable_if_t<c10::is_complex<T>::value, int> = 0>
40
+ inline C10_HOST_DEVICE bool _isnan(T val) {
41
+ return std::isnan(val.real()) || std::isnan(val.imag());
42
+ }
43
+
44
+ template <typename T, std::enable_if_t<std::is_same_v<T, at::Half>, int> = 0>
45
+ inline C10_HOST_DEVICE bool _isnan(T val) {
46
+ return at::_isnan(static_cast<float>(val));
47
+ }
48
+
49
+ template <
50
+ typename T,
51
+ std::enable_if_t<std::is_same_v<T, at::BFloat16>, int> = 0>
52
+ inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) {
53
+ return at::_isnan(static_cast<float>(val));
54
+ }
55
+
56
+ inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) {
57
+ return at::_isnan(static_cast<float>(val));
58
+ }
59
+
60
+ template <
61
+ typename T,
62
+ std::enable_if_t<std::is_same_v<T, at::Float8_e5m2>, int> = 0>
63
+ inline C10_HOST_DEVICE bool _isnan(T val) {
64
+ return val.isnan();
65
+ }
66
+
67
+ template <
68
+ typename T,
69
+ std::enable_if_t<std::is_same_v<T, at::Float8_e4m3fn>, int> = 0>
70
+ inline C10_HOST_DEVICE bool _isnan(T val) {
71
+ return val.isnan();
72
+ }
73
+
74
+ template <
75
+ typename T,
76
+ std::enable_if_t<std::is_same_v<T, at::Float8_e5m2fnuz>, int> = 0>
77
+ inline C10_HOST_DEVICE bool _isnan(T val) {
78
+ return val.isnan();
79
+ }
80
+
81
+ template <
82
+ typename T,
83
+ std::enable_if_t<std::is_same_v<T, at::Float8_e4m3fnuz>, int> = 0>
84
+ inline C10_HOST_DEVICE bool _isnan(T val) {
85
+ return val.isnan();
86
+ }
87
+
88
+ // std::isinf isn't performant to use on integral types; it will
89
+ // (uselessly) convert to floating point and then do the test.
90
+ // This function is.
91
+
92
+ template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
93
+ inline C10_HOST_DEVICE bool _isinf(T /*val*/) {
94
+ return false;
95
+ }
96
+
97
+ template <typename T, std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
98
+ inline C10_HOST_DEVICE bool _isinf(T val) {
99
+ #if defined(__CUDACC__) || defined(__HIPCC__)
100
+ return ::isinf(val);
101
+ #else
102
+ return std::isinf(val);
103
+ #endif
104
+ }
105
+
106
+ inline C10_HOST_DEVICE bool _isinf(at::Half val) {
107
+ return at::_isinf(static_cast<float>(val));
108
+ }
109
+
110
+ inline C10_HOST_DEVICE bool _isinf(at::BFloat16 val) {
111
+ return at::_isinf(static_cast<float>(val));
112
+ }
113
+
114
+ inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2 val) {
115
+ return val.isinf();
116
+ }
117
+
118
+ inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fn val) {
119
+ return false;
120
+ }
121
+
122
+ inline C10_HOST_DEVICE bool _isinf(at::Float8_e5m2fnuz val) {
123
+ return false;
124
+ }
125
+
126
+ inline C10_HOST_DEVICE bool _isinf(at::Float8_e4m3fnuz val) {
127
+ return false;
128
+ }
129
+
130
+ template <typename T>
131
+ C10_HOST_DEVICE inline T exp(T x) {
132
+ static_assert(
133
+ !std::is_same_v<T, double>,
134
+ "this template must be used with float or less precise type");
135
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
136
+ // use __expf fast approximation for peak bandwidth
137
+ return __expf(x);
138
+ #else
139
+ return ::exp(x);
140
+ #endif
141
+ }
142
+
143
+ template <>
144
+ C10_HOST_DEVICE inline double exp<double>(double x) {
145
+ return ::exp(x);
146
+ }
147
+
148
+ template <typename T>
149
+ C10_HOST_DEVICE inline T log(T x) {
150
+ static_assert(
151
+ !std::is_same_v<T, double>,
152
+ "this template must be used with float or less precise type");
153
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
154
+ // use __logf fast approximation for peak bandwidth
155
+ return __logf(x);
156
+ #else
157
+ return ::log(x);
158
+ #endif
159
+ }
160
+
161
+ template <>
162
+ C10_HOST_DEVICE inline double log<double>(double x) {
163
+ return ::log(x);
164
+ }
165
+
166
+ template <typename T>
167
+ C10_HOST_DEVICE inline T log1p(T x) {
168
+ static_assert(
169
+ !std::is_same_v<T, double>,
170
+ "this template must be used with float or less precise type");
171
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
172
+ // use __logf fast approximation for peak bandwidth
173
+ // NOTE: There is no __log1pf so unfortunately we lose precision.
174
+ return __logf(1.0f + x);
175
+ #else
176
+ return ::log1p(x);
177
+ #endif
178
+ }
179
+
180
+ template <>
181
+ C10_HOST_DEVICE inline double log1p<double>(double x) {
182
+ return ::log1p(x);
183
+ }
184
+
185
+ template <typename T>
186
+ C10_HOST_DEVICE inline T tan(T x) {
187
+ static_assert(
188
+ !std::is_same_v<T, double>,
189
+ "this template must be used with float or less precise type");
190
+ #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__)
191
+ // use __tanf fast approximation for peak bandwidth
192
+ return __tanf(x);
193
+ #else
194
+ return ::tan(x);
195
+ #endif
196
+ }
197
+
198
+ template <>
199
+ C10_HOST_DEVICE inline double tan<double>(double x) {
200
+ return ::tan(x);
201
+ }
202
+
203
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/OpMathType.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/BFloat16.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e4m3fnuz.h>
8
+ #include <c10/util/Float8_e5m2.h>
9
+ #include <c10/util/Float8_e5m2fnuz.h>
10
+ #include <c10/util/Half.h>
11
+
12
+ namespace at {
13
+
14
+ // For FP16 or BFloat16 inputs, ops should perform internal math in FP32.
15
+ template <typename scalar_t>
16
+ struct OpMathType {
17
+ using type = scalar_t;
18
+ };
19
+ template <>
20
+ struct OpMathType<at::Half> {
21
+ using type = float;
22
+ };
23
+ template <>
24
+ struct OpMathType<at::BFloat16> {
25
+ using type = float;
26
+ };
27
+ template <>
28
+ struct OpMathType<at::Float8_e5m2> {
29
+ using type = float;
30
+ };
31
+ template <>
32
+ struct OpMathType<at::Float8_e4m3fn> {
33
+ using type = float;
34
+ };
35
+ template <>
36
+ struct OpMathType<at::Float8_e5m2fnuz> {
37
+ using type = float;
38
+ };
39
+ template <>
40
+ struct OpMathType<at::Float8_e4m3fnuz> {
41
+ using type = float;
42
+ };
43
+ template <>
44
+ struct OpMathType<c10::complex<Half>> {
45
+ using type = c10::complex<float>;
46
+ };
47
+
48
+ template <typename T>
49
+ using opmath_type = typename OpMathType<T>::type;
50
+
51
+ namespace {
52
+
53
+ inline c10::ScalarType toOpMathType(const c10::ScalarType type) {
54
+ switch (type) {
55
+ #define DEFINE_CASE(scalar_t, TypeNum) \
56
+ case ScalarType::TypeNum: \
57
+ return CppTypeToScalarType<at::opmath_type<scalar_t>>::value;
58
+
59
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
60
+ #undef DEFINE_CASE
61
+
62
+ default:
63
+ TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
64
+ }
65
+ }
66
+
67
+ } // namespace
68
+
69
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/PTThreadPool.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <c10/core/thread_pool.h>
5
+
6
+ namespace at {
7
+
8
+ class TORCH_API PTThreadPool : public c10::ThreadPool {
9
+ public:
10
+ explicit PTThreadPool(int pool_size, int numa_node_id = -1)
11
+ : c10::ThreadPool(pool_size, numa_node_id, []() {
12
+ c10::setThreadName("PTThreadPool");
13
+ at::init_num_threads();
14
+ }) {}
15
+ };
16
+
17
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelFuture.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <functional>
6
+
7
+ namespace at {
8
+
9
+ // Launches intra-op parallel task, returns a future
10
+ TORCH_API c10::intrusive_ptr<c10::ivalue::Future> intraop_launch_future(
11
+ std::function<void()> func);
12
+
13
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelNativeTBB.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <cstddef>
5
+ #include <exception>
6
+
7
+ #include <c10/util/Exception.h>
8
+
9
+ #ifdef _WIN32
10
+ #ifndef WIN32_LEAN_AND_MEAN
11
+ #define WIN32_LEAN_AND_MEAN
12
+ #endif
13
+ #endif
14
+ #include <tbb/tbb.h>
15
+
16
+ #define INTRA_OP_PARALLEL
17
+
18
+ namespace at::internal {
19
+
20
+ template <typename F>
21
+ inline void invoke_parallel(
22
+ const int64_t begin,
23
+ const int64_t end,
24
+ const int64_t grain_size,
25
+ const F& f) {
26
+ // Choose number of tasks based on grain size and number of threads.
27
+ int64_t chunk_size = divup((end - begin), get_num_threads());
28
+ // Make sure each task is at least grain_size size.
29
+ chunk_size = std::max(grain_size, chunk_size);
30
+
31
+ std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
32
+ std::exception_ptr eptr;
33
+ tbb::parallel_for(
34
+ tbb::blocked_range<int64_t>(begin, end, chunk_size),
35
+ [&eptr, &err_flag, f](const tbb::blocked_range<int64_t>& r) {
36
+ try {
37
+ internal::ThreadIdGuard tid_guard(
38
+ tbb::this_task_arena::current_thread_index());
39
+ f(r.begin(), r.end());
40
+ } catch (...) {
41
+ if (!err_flag.test_and_set()) {
42
+ eptr = std::current_exception();
43
+ }
44
+ }
45
+ },
46
+ tbb::static_partitioner{});
47
+ if (eptr) {
48
+ std::rethrow_exception(eptr);
49
+ }
50
+ }
51
+
52
+ } // namespace at::internal
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <atomic>
5
+ #include <cstddef>
6
+ #include <exception>
7
+
8
+ #ifdef _OPENMP
9
+ #define INTRA_OP_PARALLEL
10
+
11
+ #include <omp.h>
12
+ #endif
13
+
14
+ #ifdef _OPENMP
15
+ namespace at::internal {
16
+ template <typename F>
17
+ inline void invoke_parallel(
18
+ int64_t begin,
19
+ int64_t end,
20
+ int64_t grain_size,
21
+ const F& f) {
22
+ std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
23
+ std::exception_ptr eptr;
24
+
25
+ #pragma omp parallel
26
+ {
27
+ // choose number of tasks based on grain size and number of threads
28
+ // can't use num_threads clause due to bugs in GOMP's thread pool (See
29
+ // #32008)
30
+ int64_t num_threads = omp_get_num_threads();
31
+ if (grain_size > 0) {
32
+ num_threads = std::min(num_threads, divup((end - begin), grain_size));
33
+ }
34
+
35
+ int64_t tid = omp_get_thread_num();
36
+ int64_t chunk_size = divup((end - begin), num_threads);
37
+ int64_t begin_tid = begin + tid * chunk_size;
38
+ if (begin_tid < end) {
39
+ try {
40
+ internal::ThreadIdGuard tid_guard(tid);
41
+ f(begin_tid, std::min(end, chunk_size + begin_tid));
42
+ } catch (...) {
43
+ if (!err_flag.test_and_set()) {
44
+ eptr = std::current_exception();
45
+ }
46
+ }
47
+ }
48
+ }
49
+ if (eptr) {
50
+ std::rethrow_exception(eptr);
51
+ }
52
+ }
53
+ } // namespace at::internal
54
+ #endif // _OPENMP
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RedispatchFunctions.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/RegistrationDeclarations.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Scalar.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Scalar.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SmallVector.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/util/SmallVector.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseTensorImpl.h ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/TensorImpl.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/Functions.h>
10
+ #else
11
+ #include <ATen/ops/empty.h>
12
+ #include <ATen/ops/resize.h>
13
+ #endif
14
+
15
+ namespace at {
16
+ struct TORCH_API SparseTensorImpl : public TensorImpl {
17
+ // Stored in COO format, indices + values.
18
+
19
+ // INVARIANTS:
20
+ // sparse_dim: range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
21
+ // dense_dim : range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
22
+ // _indices.shape: dimensionality: 2, shape: (sparse_dim, nnz)
23
+ // _values.shape: dimensionality: 1 + dense_dim. shape: (nnz,
24
+ // shape[sparse_dim:])
25
+
26
+ int64_t sparse_dim_ = 0; // number of sparse dimensions
27
+ int64_t dense_dim_ = 0; // number of dense dimensions
28
+
29
+ Tensor indices_; // always a LongTensor
30
+ Tensor values_;
31
+
32
+ // A sparse tensor is 'coalesced' if every index occurs at most once in
33
+ // the indices tensor, and the indices are in sorted order. (This means
34
+ // that it is very easy to convert a coalesced tensor to CSR format: you
35
+ // need only compute CSR format indices.)
36
+ //
37
+ // Most math operations can only be performed on coalesced sparse tensors,
38
+ // because many algorithms proceed by merging two sorted lists (of indices).
39
+ bool coalesced_ = false;
40
+
41
+ // compute_numel with integer multiplication overflow check, see gh-57542
42
+ void refresh_numel() {
43
+ TensorImpl::safe_refresh_numel();
44
+ }
45
+
46
+ public:
47
+ // Public for now...
48
+ explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
49
+
50
+ void release_resources() override;
51
+
52
+ int64_t nnz() const {
53
+ return values_.size(0);
54
+ }
55
+
56
+ c10::SymInt sym_nnz() const {
57
+ return values_.sym_size(0);
58
+ }
59
+ int64_t sparse_dim() const {
60
+ return sparse_dim_;
61
+ }
62
+ int64_t dense_dim() const {
63
+ return dense_dim_;
64
+ }
65
+ bool coalesced() const {
66
+ return coalesced_;
67
+ }
68
+ Tensor indices() const {
69
+ return indices_;
70
+ }
71
+ Tensor values() const {
72
+ return values_;
73
+ }
74
+
75
+ void set_size(int64_t dim, int64_t new_size) override;
76
+ void set_stride(int64_t dim, int64_t new_stride) override;
77
+ void set_storage_offset(int64_t storage_offset) override;
78
+
79
+ #ifdef DEBUG
80
+ bool has_storage() const override;
81
+ #endif
82
+
83
+ // WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim
84
+ // with respect to indices and values
85
+ void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
86
+ TORCH_CHECK(
87
+ allow_tensor_metadata_change(),
88
+ "raw_resize_ ",
89
+ err_msg_tensor_metadata_change_not_allowed);
90
+ TORCH_CHECK(
91
+ !has_symbolic_sizes_strides_,
92
+ "raw_resize_ called on tensor with symbolic shape")
93
+ set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
94
+ sparse_dim_ = sparse_dim;
95
+ dense_dim_ = dense_dim;
96
+ refresh_numel();
97
+ }
98
+
99
+ // NOTE: This function preserves invariants of sparse_dim/dense_dim with
100
+ // respect to indices and values.
101
+ //
102
+ // NOTE: This function supports the following cases:
103
+ // 1. When we keep the number of dense dimensions unchanged, and NOT shrinking
104
+ // the size of any of the dense dimensions.
105
+ // 2. When we keep the number of sparse dimensions unchanged, and NOT
106
+ // shrinking the size of any of the sparse dimensions.
107
+ // 3. When the sparse tensor has zero nnz, in which case we are free to change
108
+ // the shapes of both its sparse and dense dimensions.
109
+ //
110
+ // This function DOESN'T support (and will throw an error) the following
111
+ // cases:
112
+ // 1. When we attempt to change the number of sparse dimensions on a non-empty
113
+ // sparse tensor (such an operation will invalidate the indices stored).
114
+ // 2. When we attempt to change the number of dense dimensions on a non-empty
115
+ // sparse tensor (such an operation will behave differently from an equivalent
116
+ // dense tensor's resize method, and for API consistency we don't support it).
117
+ // 3. When we attempt to shrink the size of any of the dense dimensions on a
118
+ // non-empty sparse tensor (such an operation will behave differently from an
119
+ // equivalent dense tensor's resize method, and for API consistency we don't
120
+ // support it).
121
+ // 4. When we attempt to shrink the size of any of the sparse dimensions on a
122
+ // non-empty sparse tensor (this could make some of the stored indices
123
+ // out-of-bound and thus unsafe).
124
+ template <typename T>
125
+ void _resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<T> size) {
126
+ TORCH_CHECK(
127
+ allow_tensor_metadata_change(),
128
+ "resize_ ",
129
+ err_msg_tensor_metadata_change_not_allowed);
130
+ TORCH_CHECK(
131
+ !has_symbolic_sizes_strides_,
132
+ "resize_ called on tensor with symbolic shape")
133
+ TORCH_CHECK(
134
+ sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
135
+ "number of dimensions must be sparse_dim (",
136
+ sparse_dim,
137
+ ") + dense_dim (",
138
+ dense_dim,
139
+ "), but got ",
140
+ size.size());
141
+ if (nnz() > 0) {
142
+ auto alt_options_msg =
143
+ "You could try the following options:\n\
144
+ 1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\
145
+ 2. If you need to resize this tensor, you have the following options:\n\
146
+ 1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\
147
+ 2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor.";
148
+
149
+ TORCH_CHECK(
150
+ sparse_dim == sparse_dim_,
151
+ "changing the number of sparse dimensions (from ",
152
+ sparse_dim_,
153
+ " to ",
154
+ sparse_dim,
155
+ ") on a non-empty sparse tensor is not supported.\n",
156
+ alt_options_msg);
157
+
158
+ TORCH_CHECK(
159
+ dense_dim == dense_dim_,
160
+ "changing the number of dense dimensions (from ",
161
+ dense_dim_,
162
+ " to ",
163
+ dense_dim,
164
+ ") on a non-empty sparse tensor is not supported.\n",
165
+ alt_options_msg);
166
+
167
+ bool shrinking_sparse_dims = false;
168
+ bool shrinking_dense_dim = false;
169
+ auto sparse_size_original = generic_sizes<T>().slice(0, sparse_dim);
170
+ auto sparse_size_new = size.slice(0, sparse_dim);
171
+ for (const auto i : c10::irange(sparse_dim)) {
172
+ if (sparse_size_new[i] < sparse_size_original[i]) {
173
+ shrinking_sparse_dims = true;
174
+ break;
175
+ }
176
+ }
177
+ auto dense_size_original = generic_sizes<T>().slice(sparse_dim);
178
+ auto dense_size_new = size.slice(sparse_dim);
179
+ for (const auto i : c10::irange(dense_dim)) {
180
+ if (dense_size_new[i] < dense_size_original[i]) {
181
+ shrinking_dense_dim = true;
182
+ break;
183
+ }
184
+ }
185
+
186
+ TORCH_CHECK(
187
+ !shrinking_sparse_dims,
188
+ "shrinking the size of sparse dimensions (from ",
189
+ sparse_size_original,
190
+ " to ",
191
+ sparse_size_new,
192
+ ") on a non-empty sparse tensor is not supported.\n",
193
+ alt_options_msg);
194
+
195
+ TORCH_CHECK(
196
+ !shrinking_dense_dim,
197
+ "shrinking the size of dense dimensions (from ",
198
+ dense_size_original,
199
+ " to ",
200
+ dense_size_new,
201
+ ") on a non-empty sparse tensor is not supported.\n",
202
+ alt_options_msg);
203
+ }
204
+
205
+ auto sizes_and_strides = generic_sizes<T>();
206
+ const bool size_equals_sizes = std::equal(
207
+ size.begin(),
208
+ size.end(),
209
+ sizes_and_strides.begin(),
210
+ sizes_and_strides.end());
211
+ if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) ||
212
+ (dense_dim != dense_dim_)) {
213
+ auto nnz = at::symint::sizes<T>(values())[0];
214
+ std::vector<T> values_size = {nnz};
215
+ auto dense_size = size.slice(sparse_dim);
216
+ values_size.insert(
217
+ values_size.end(), dense_size.begin(), dense_size.end());
218
+ at::symint::resize_<T>(values_, values_size);
219
+ at::symint::resize_<T>(indices_, {T(sparse_dim), nnz});
220
+ }
221
+
222
+ if (!size_equals_sizes) {
223
+ set_sizes_and_strides(size, std::vector<T>(size.size()));
224
+ }
225
+ sparse_dim_ = sparse_dim;
226
+ dense_dim_ = dense_dim;
227
+ refresh_numel();
228
+ }
229
+
230
+ void resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<int64_t> size) {
231
+ return _resize_(sparse_dim, dense_dim, size);
232
+ }
233
+
234
+ void resize_(
235
+ int64_t sparse_dim,
236
+ int64_t dense_dim,
237
+ ArrayRef<c10::SymInt> size) {
238
+ return _resize_(sparse_dim, dense_dim, size);
239
+ }
240
+
241
+ // NOTE: this function will resize the sparse tensor and also set `indices`
242
+ // and `values` to empty.
243
+ void resize_and_clear_(
244
+ int64_t sparse_dim,
245
+ int64_t dense_dim,
246
+ IntArrayRef size) {
247
+ TORCH_CHECK(
248
+ allow_tensor_metadata_change(),
249
+ "resize_and_clear_ ",
250
+ err_msg_tensor_metadata_change_not_allowed);
251
+ TORCH_CHECK(
252
+ !has_symbolic_sizes_strides_,
253
+ "resize_and_clear_ called on tensor with symbolic shape")
254
+ TORCH_CHECK(
255
+ sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
256
+ "number of dimensions must be sparse_dim (",
257
+ sparse_dim,
258
+ ") + dense_dim (",
259
+ dense_dim,
260
+ "), but got ",
261
+ size.size());
262
+
263
+ set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
264
+ sparse_dim_ = sparse_dim;
265
+ dense_dim_ = dense_dim;
266
+
267
+ auto empty_indices = at::empty({sparse_dim, 0}, indices().options());
268
+ std::vector<int64_t> values_size = {0};
269
+ auto dense_size = sizes().slice(sparse_dim);
270
+ values_size.insert(values_size.end(), dense_size.begin(), dense_size.end());
271
+ auto empty_values = at::empty(values_size, values().options());
272
+ set_indices_and_values_unsafe(empty_indices, empty_values);
273
+ refresh_numel();
274
+ }
275
+
276
+ void set_coalesced(bool coalesced) {
277
+ TORCH_CHECK(
278
+ allow_tensor_metadata_change(),
279
+ "set_coalesced ",
280
+ err_msg_tensor_metadata_change_not_allowed);
281
+ coalesced_ = coalesced;
282
+ }
283
+
284
+ // NOTE: this function is only used internally and not exposed to Python
285
+ // frontend
286
+ void set_nnz_and_narrow(int64_t new_nnz) {
287
+ TORCH_CHECK(
288
+ allow_tensor_metadata_change(),
289
+ "set_nnz_and_narrow ",
290
+ err_msg_tensor_metadata_change_not_allowed);
291
+ AT_ASSERT(new_nnz <= nnz());
292
+ indices_ = indices_.narrow(1, 0, new_nnz);
293
+ values_ = values_.narrow(0, 0, new_nnz);
294
+ if (new_nnz < 2) {
295
+ coalesced_ = true;
296
+ }
297
+ }
298
+
299
+ // Takes indices and values and directly puts them into the sparse tensor, no
300
+ // copy. NOTE: this function is unsafe because it doesn't check whether any
301
+ // indices are out of boundaries of `sizes`, so it should ONLY be used where
302
+ // we know that the indices are guaranteed to be within bounds. This used to
303
+ // be called THSTensor_(_move) NB: This used to be able to avoid a refcount
304
+ // bump, but I was too lazy to make it happen
305
+ void set_indices_and_values_unsafe(
306
+ const Tensor& indices,
307
+ const Tensor& values);
308
+
309
+ /**
310
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
311
+ *
312
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
313
+ * see NOTE [ TensorImpl Shallow-Copying ].
314
+ */
315
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
316
+ const c10::VariableVersion& version_counter,
317
+ bool allow_tensor_metadata_change) const override {
318
+ auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
319
+ copy_tensor_metadata(
320
+ /*src_sparse_impl=*/this,
321
+ /*dest_sparse_impl=*/impl.get(),
322
+ /*version_counter=*/version_counter,
323
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
324
+ impl->refresh_numel();
325
+ return impl;
326
+ }
327
+
328
+ /**
329
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
330
+ *
331
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
332
+ * see NOTE [ TensorImpl Shallow-Copying ].
333
+ */
334
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
335
+ c10::VariableVersion&& version_counter,
336
+ bool allow_tensor_metadata_change) const override {
337
+ auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
338
+ copy_tensor_metadata(
339
+ /*src_sparse_impl=*/this,
340
+ /*dest_sparse_impl=*/impl.get(),
341
+ /*version_counter=*/std::move(version_counter),
342
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
343
+ impl->refresh_numel();
344
+ return impl;
345
+ }
346
+
347
+ /**
348
+ * Shallow-copies data from another TensorImpl into this TensorImpl.
349
+ *
350
+ * For why this function doesn't check this TensorImpl's
351
+ * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
352
+ */
353
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
354
+ AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
355
+ auto sparse_impl = static_cast<const SparseTensorImpl*>(impl.get());
356
+ copy_tensor_metadata(
357
+ /*src_sparse_impl=*/sparse_impl,
358
+ /*dest_sparse_impl=*/this,
359
+ /*version_counter=*/version_counter(),
360
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
361
+ refresh_numel();
362
+ }
363
+
364
+ private:
365
+ explicit SparseTensorImpl(
366
+ at::DispatchKeySet,
367
+ const caffe2::TypeMeta,
368
+ at::Tensor indices,
369
+ at::Tensor values);
370
+
371
+ /**
372
+ * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
373
+ * storage_offset) from one TensorImpl to another TensorImpl.
374
+ *
375
+ * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
376
+ * [ TensorImpl Shallow-Copying ].
377
+ */
378
+ static void copy_tensor_metadata(
379
+ const SparseTensorImpl* src_sparse_impl,
380
+ SparseTensorImpl* dest_sparse_impl,
381
+ c10::VariableVersion version_counter,
382
+ bool allow_tensor_metadata_change) {
383
+ TensorImpl::copy_tensor_metadata(
384
+ src_sparse_impl,
385
+ dest_sparse_impl,
386
+ std::move(version_counter),
387
+ allow_tensor_metadata_change);
388
+
389
+ // Sparse-specific fields
390
+ dest_sparse_impl->sparse_dim_ = src_sparse_impl->sparse_dim();
391
+ dest_sparse_impl->dense_dim_ = src_sparse_impl->dense_dim();
392
+ dest_sparse_impl->indices_ = src_sparse_impl->indices();
393
+ dest_sparse_impl->values_ = src_sparse_impl->values();
394
+ dest_sparse_impl->coalesced_ = src_sparse_impl->coalesced();
395
+ }
396
+
397
+ const char* tensorimpl_type_name() const override;
398
+ };
399
+
400
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorGeometry.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <c10/core/WrapDimMinimal.h>
5
+
6
+ namespace at {
7
+
8
+ // Return if the tensor geometry represented by `sizes` and `strides` is
9
+ // contiguous Although we cache is_contiguous in tensor now, this is till useful
10
+ // because it allows checking if a particular geometry is contiguous without
11
+ // explicitly constructing a tensor, e.g., when you want to choose a kernel
12
+ // strategy based on whether a subgeometry is contiguous.
13
+ TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
14
+
15
+ struct TORCH_API TensorGeometry {
16
+ TensorGeometry() = default;
17
+
18
+ explicit TensorGeometry(c10::SymIntArrayRef sizes)
19
+ : sizes_(sizes.vec()),
20
+ strides_(sizes.size()),
21
+ has_symbolic_sizes_strides_(
22
+ !c10::asIntArrayRefSlowOpt(sizes).has_value()) {
23
+ int64_t dim = static_cast<int64_t>(sizes.size());
24
+ c10::SymInt expected_stride = 1;
25
+ for (int64_t i = dim - 1; i >= 0; i--) {
26
+ strides_[i] = expected_stride;
27
+ expected_stride *= sizes_[i];
28
+ }
29
+ numel_ = expected_stride;
30
+ }
31
+
32
+ explicit TensorGeometry(const TensorBase& t)
33
+ : sizes_(t.sym_sizes().vec()),
34
+ strides_(t.sym_strides().vec()),
35
+ storage_offset_(t.sym_storage_offset()),
36
+ numel_(t.sym_numel()),
37
+ has_symbolic_sizes_strides_(
38
+ t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {}
39
+
40
+ // true if the tensor is contiguous
41
+ bool is_contiguous() const;
42
+
43
+ int64_t dim() const {
44
+ return static_cast<int64_t>(sizes_.size());
45
+ }
46
+
47
+ int64_t size(int64_t dim) const {
48
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
49
+ dim = c10::maybe_wrap_dim(dim, this->dim());
50
+ return sizes_.at(static_cast<size_t>(dim)).as_int_unchecked();
51
+ }
52
+ c10::IntArrayRef sizes() const {
53
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
54
+ return c10::asIntArrayRefUnchecked(sizes_);
55
+ }
56
+ int64_t stride(int64_t dim) const {
57
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
58
+ dim = c10::maybe_wrap_dim(dim, this->dim());
59
+ return strides_.at(static_cast<size_t>(dim)).as_int_unchecked();
60
+ }
61
+ c10::IntArrayRef strides() const {
62
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
63
+ return c10::asIntArrayRefUnchecked(strides_);
64
+ }
65
+ int64_t storage_offset() const {
66
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
67
+ return storage_offset_.as_int_unchecked();
68
+ }
69
+ int64_t numel() const {
70
+ TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_);
71
+ return numel_.as_int_unchecked();
72
+ }
73
+
74
+ c10::SymInt sym_size(int64_t dim) const {
75
+ dim = c10::maybe_wrap_dim(dim, this->dim());
76
+ return sizes_.at(static_cast<size_t>(dim));
77
+ }
78
+ c10::SymIntArrayRef sym_sizes() const {
79
+ return sizes_;
80
+ }
81
+ c10::SymInt sym_stride(int64_t dim) const {
82
+ dim = c10::maybe_wrap_dim(dim, this->dim());
83
+ return strides_.at(static_cast<size_t>(dim));
84
+ }
85
+ c10::SymIntArrayRef sym_strides() const {
86
+ return strides_;
87
+ }
88
+ c10::SymInt sym_storage_offset() const {
89
+ return storage_offset_;
90
+ }
91
+ c10::SymInt sym_numel() const {
92
+ return numel_;
93
+ }
94
+
95
+ TensorGeometry transpose(int64_t dim0, int64_t dim1) {
96
+ TensorGeometry r = *this; // copy
97
+ TORCH_CHECK(
98
+ dim0 < dim(),
99
+ "transpose: dim0=",
100
+ dim0,
101
+ " out of range (dim=",
102
+ dim(),
103
+ ")")
104
+ TORCH_CHECK(
105
+ dim1 < dim(),
106
+ "transpose: dim1=",
107
+ dim1,
108
+ " out of range (dim=",
109
+ dim(),
110
+ ")")
111
+ std::swap(r.sizes_[dim0], r.sizes_[dim1]);
112
+ std::swap(r.strides_[dim0], r.strides_[dim1]);
113
+ return r;
114
+ }
115
+
116
+ std::vector<c10::SymInt>& mutable_sizes() {
117
+ return sizes_;
118
+ }
119
+ std::vector<c10::SymInt>& mutable_strides() {
120
+ return strides_;
121
+ }
122
+ c10::SymInt& mutable_storage_offset() {
123
+ return storage_offset_;
124
+ }
125
+ void recompute() {
126
+ // recalculate numel after a change
127
+ c10::SymInt numel = 1;
128
+ for (const auto& i : sizes_) {
129
+ numel = numel * i;
130
+ }
131
+ numel_ = std::move(numel);
132
+ has_symbolic_sizes_strides_ =
133
+ !c10::asIntArrayRefSlowOpt(sizes_).has_value();
134
+ }
135
+
136
+ private:
137
+ std::vector<c10::SymInt> sizes_;
138
+ std::vector<c10::SymInt> strides_;
139
+ c10::SymInt storage_offset_;
140
+ c10::SymInt numel_;
141
+ bool has_symbolic_sizes_strides_{false};
142
+ };
143
+
144
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorIndexing.h ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ExpandUtils.h>
4
+ #include <ATen/ScalarOps.h>
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/core/TensorBody.h>
7
+ #include <c10/core/SymInt.h>
8
+ #include <c10/util/Optional.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/Functions.h>
13
+ #include <ATen/NativeFunctions.h>
14
+ #else
15
+ #include <ATen/ops/alias.h>
16
+ #include <ATen/ops/empty.h>
17
+ #include <ATen/ops/scalar_tensor.h>
18
+ #include <ATen/ops/zeros.h>
19
+ #endif
20
+
21
+ #include <ATen/core/List.h>
22
+
23
+ #include <utility>
24
+
25
+ namespace at::indexing {
26
+
27
+ constexpr int64_t INDEX_MIN = c10::SymInt::min_representable_int();
28
+ constexpr int64_t INDEX_MAX = -(INDEX_MIN + 1);
29
+
30
+ enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor };
31
+
32
+ constexpr c10::nullopt_t None = c10::nullopt;
33
+
34
+ struct TORCH_API EllipsisIndexType final {
35
+ EllipsisIndexType() = default;
36
+ };
37
+ TORCH_API extern const EllipsisIndexType Ellipsis;
38
+
39
+ struct TORCH_API Slice final {
40
+ public:
41
+ Slice(
42
+ c10::optional<c10::SymInt> start_index = c10::nullopt,
43
+ c10::optional<c10::SymInt> stop_index = c10::nullopt,
44
+ c10::optional<c10::SymInt> step_index = c10::nullopt) {
45
+ if (!step_index.has_value()) {
46
+ step_ = c10::SymInt(1);
47
+ } else {
48
+ step_ = std::move(step_index).value();
49
+ }
50
+
51
+ TORCH_CHECK_VALUE(step_ != 0, "slice step cannot be zero");
52
+
53
+ if (!start_index.has_value()) {
54
+ start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0);
55
+ } else {
56
+ start_ = std::move(start_index).value();
57
+ }
58
+
59
+ if (!stop_index.has_value()) {
60
+ stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX);
61
+ } else {
62
+ stop_ = std::move(stop_index).value();
63
+ }
64
+ }
65
+
66
+ inline c10::SymInt start() const {
67
+ return start_;
68
+ }
69
+
70
+ inline c10::SymInt stop() const {
71
+ return stop_;
72
+ }
73
+
74
+ inline c10::SymInt step() const {
75
+ return step_;
76
+ }
77
+
78
+ private:
79
+ c10::SymInt start_;
80
+ c10::SymInt stop_;
81
+ c10::SymInt step_;
82
+ };
83
+
84
+ TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
85
+
86
+ // `at::indexing::TensorIndex` is used for converting C++ tensor indices such as
87
+ // `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}`
88
+ // into its equivalent `std::vector<TensorIndex>`, so that further tensor
89
+ // indexing operations can be performed using the supplied indices.
90
+ //
91
+ // There is one-to-one correspondence between Python and C++ tensor index types:
92
+ // Python | C++
93
+ // -----------------------------------------------------
94
+ // `None` | `at::indexing::None`
95
+ // `Ellipsis` | `at::indexing::Ellipsis`
96
+ // `...` | `"..."`
97
+ // `123` | `123`
98
+ // `True` / `False` | `true` / `false`
99
+ // `:` | `Slice()` / `Slice(None, None)`
100
+ // `::` | `Slice()` / `Slice(None, None, None)`
101
+ // `1:` | `Slice(1, None)`
102
+ // `1::` | `Slice(1, None, None)`
103
+ // `:3` | `Slice(None, 3)`
104
+ // `:3:` | `Slice(None, 3, None)`
105
+ // `::2` | `Slice(None, None, 2)`
106
+ // `1:3` | `Slice(1, 3)`
107
+ // `1::2` | `Slice(1, None, 2)`
108
+ // `:3:2` | `Slice(None, 3, 2)`
109
+ // `1:3:2` | `Slice(1, 3, 2)`
110
+ // `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
111
+ struct TORCH_API TensorIndex final {
112
+ // Case 1: `at::indexing::None`
113
+ TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {}
114
+
115
+ // Case 2: "..." / `at::indexing::Ellipsis`
116
+ TensorIndex(at::indexing::EllipsisIndexType)
117
+ : type_(TensorIndexType::Ellipsis) {}
118
+ TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) {
119
+ TORCH_CHECK_VALUE(
120
+ strcmp(str, "...") == 0,
121
+ "Expected \"...\" to represent an ellipsis index, but got \"",
122
+ str,
123
+ "\"");
124
+ }
125
+
126
+ // Case 3: (Sym) Integer value
127
+ TensorIndex(SymInt integer)
128
+ : integer_(std::move(integer)), type_(TensorIndexType::SymInt) {}
129
+ TensorIndex(int64_t integer) : TensorIndex(SymInt(integer)) {}
130
+ TensorIndex(int integer) : TensorIndex(SymInt(integer)) {}
131
+
132
+ // Case 4: Boolean value
133
+ template <class T, class = std::enable_if_t<std::is_same_v<bool, T>>>
134
+ TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {}
135
+
136
+ // Case 5: Slice represented in `at::indexing::Slice` form
137
+ TensorIndex(Slice slice)
138
+ : slice_(std::move(slice)), type_(TensorIndexType::Slice) {}
139
+
140
+ // Case 6: Tensor value
141
+ TensorIndex(Tensor tensor)
142
+ : tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {}
143
+
144
+ inline bool is_none() const {
145
+ return type_ == TensorIndexType::None;
146
+ }
147
+
148
+ inline bool is_ellipsis() const {
149
+ return type_ == TensorIndexType::Ellipsis;
150
+ }
151
+
152
+ inline bool is_integer() const {
153
+ return type_ == TensorIndexType::SymInt;
154
+ }
155
+
156
+ inline SymInt integer() const {
157
+ return integer_;
158
+ }
159
+
160
+ inline bool is_boolean() const {
161
+ return type_ == TensorIndexType::Boolean;
162
+ }
163
+
164
+ inline bool boolean() const {
165
+ return boolean_;
166
+ }
167
+
168
+ inline bool is_slice() const {
169
+ return type_ == TensorIndexType::Slice;
170
+ }
171
+
172
+ inline const Slice& slice() const {
173
+ return slice_;
174
+ }
175
+
176
+ inline bool is_tensor() const {
177
+ return type_ == TensorIndexType::Tensor;
178
+ }
179
+
180
+ inline const Tensor& tensor() const {
181
+ return tensor_;
182
+ }
183
+
184
+ private:
185
+ SymInt integer_ = 0;
186
+ bool boolean_ = false;
187
+ Slice slice_;
188
+ Tensor tensor_;
189
+ TensorIndexType type_;
190
+ };
191
+
192
+ TORCH_API std::ostream& operator<<(
193
+ std::ostream& stream,
194
+ const TensorIndex& tensor_index);
195
+ TORCH_API std::ostream& operator<<(
196
+ std::ostream& stream,
197
+ const std::vector<TensorIndex>& tensor_indices);
198
+
199
+ namespace impl {
200
+ static inline Tensor applySlice(
201
+ const Tensor& self,
202
+ int64_t dim,
203
+ c10::SymInt start,
204
+ c10::SymInt stop,
205
+ c10::SymInt step,
206
+ bool disable_slice_optimization,
207
+ const at::Device& self_device,
208
+ const c10::optional<SymIntArrayRef>& self_sizes) {
209
+ // TODO: implement negative step
210
+ TORCH_CHECK_VALUE(step > 0, "step must be greater than zero");
211
+
212
+ // See NOTE [nested tensor size for indexing]
213
+ if (self_sizes.has_value()) {
214
+ // Skip this optimization if we are tracing, as the trace may be polymorphic
215
+ // over the shape of the `self` tensor, and we still want to record
216
+ // the slice.
217
+ SymInt length = (self_device == at::kCPU || self_device == at::kCUDA)
218
+ ? (*self_sizes)[dim]
219
+ : self.sym_size(dim);
220
+ if (!disable_slice_optimization &&
221
+ TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) && length == stop &&
222
+ step == 1) {
223
+ return self;
224
+ }
225
+ }
226
+ return self.slice_symint(
227
+ dim, std::move(start), std::move(stop), std::move(step));
228
+ }
229
+
230
+ static inline Tensor applySelect(
231
+ const Tensor& self,
232
+ int64_t dim,
233
+ SymInt index,
234
+ int64_t real_dim,
235
+ const at::Device& /*self_device*/,
236
+ const c10::optional<SymIntArrayRef>& self_sizes) {
237
+ // See NOTE [nested tensor size for indexing]
238
+ if (self_sizes.has_value()) {
239
+ auto maybe_index = index.maybe_as_int();
240
+ if (maybe_index.has_value()) {
241
+ TORCH_CHECK_INDEX(
242
+ !(maybe_index.value() == 0 && dim == 0 && self_sizes->empty()),
243
+ "invalid index of a 0-dim tensor. ",
244
+ "Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number");
245
+ }
246
+
247
+ auto size = (*self_sizes)[dim];
248
+ // Note: `size >= -index` is not equivalent to `size > -1 - index` if index
249
+ // is INT64_MIN For std::numeric_limits<int64_t>::min() result of unary
250
+ // minus is undefined by the standard but in practice is equal to self. On
251
+ // the other hand, indexing wraping is valid for all negative int64_t
252
+ // values, as x[INT64_MIN] is the same as x[INT64_MAX]
253
+ TORCH_CHECK_INDEX(
254
+ size > -1 - index && size > index,
255
+ "index ",
256
+ index,
257
+ " is out of bounds for dimension ",
258
+ real_dim,
259
+ " with size ",
260
+ size);
261
+ }
262
+
263
+ // if the index is negative, do not normalize it because that would fix the
264
+ // index on the current tensor size in the tracer. aten::select also works on
265
+ // negative indices
266
+ return self.select_symint(dim, std::move(index));
267
+ }
268
+
269
+ static inline Tensor boolToIndexingTensorCPUOrCUDA(
270
+ const Tensor& self,
271
+ bool value) {
272
+ // booleans add a dimension of size 1. true indexes this dimension as if 0:,
273
+ // false as empty.
274
+ if (value) {
275
+ return at::empty({1}, self.options().dtype(kLong)).fill_(0.);
276
+ } else {
277
+ return at::empty({0}, self.options().dtype(kLong));
278
+ }
279
+ }
280
+
281
+ static inline Tensor boolToIndexingTensorNonNativeDeviceType(
282
+ const Tensor& self,
283
+ bool value) {
284
+ // booleans add a dimension of size 1. true indexes this dimension as if 0:,
285
+ // false as empty.
286
+ if (value) {
287
+ return at::zeros({1}, self.options().dtype(kLong));
288
+ } else {
289
+ return at::empty({0}, self.options().dtype(kLong));
290
+ }
291
+ }
292
+
293
+ static inline Tensor boolToIndexingTensor(
294
+ const Tensor& self,
295
+ bool value,
296
+ const at::Device& self_device) {
297
+ if (self_device == at::kCPU || self_device == at::kCUDA) {
298
+ return boolToIndexingTensorCPUOrCUDA(self, value);
299
+ } else {
300
+ return boolToIndexingTensorNonNativeDeviceType(self, value);
301
+ }
302
+ }
303
+
304
+ static inline Tensor scalarToTensorNonNativeDeviceType(
305
+ const Scalar& v,
306
+ const TensorOptions& options) {
307
+ return at::scalar_tensor(v, options);
308
+ }
309
+
310
+ static inline void recordTensorIndex(
311
+ const Tensor& tensor,
312
+ std::vector<Tensor>& outIndices,
313
+ int64_t* dim_ptr) {
314
+ // TODO: check scalarType
315
+ outIndices.resize(*dim_ptr + 1);
316
+ outIndices[*dim_ptr] = tensor;
317
+ (*dim_ptr)++;
318
+ };
319
+
320
+ static inline c10::List<c10::optional<Tensor>> typeConvertIndices(
321
+ const Tensor& /*self*/,
322
+ std::vector<Tensor>&& indices) {
323
+ c10::List<c10::optional<Tensor>> converted_inds;
324
+ converted_inds.reserve(indices.size());
325
+ for (auto&& i : std::move(indices)) {
326
+ converted_inds.push_back(std::move(i));
327
+ }
328
+ return converted_inds;
329
+ }
330
+
331
+ // NOTE: Why do we mirror instead of replace the `count_specified_dimensions`
332
+ // function in torch/csrc/autograd/python_variable_indexing.cpp? It's because
333
+ // `count_specified_dimensions` is on the hot path of Python tensor multi-dim
334
+ // indexing (i.e. it's called by `applySlicing` which is called by
335
+ // `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more
336
+ // than one dimension). If we were to merge the Python/C++
337
+ // `count_specified_dimensions` function, on the Python side we would have to
338
+ // construct a `std::vector` container to be consumed by the C++
339
+ // `count_specified_dimensions` function, which adds 100s of nanoseconds
340
+ // overhead and is undesirable.
341
+ static inline int64_t count_specified_dimensions(
342
+ const ArrayRef<TensorIndex>& indices) {
343
+ // Count the number of indexed dimensions (everything but ellipsis and None)
344
+ int64_t count = 0;
345
+ for (auto& obj : indices) {
346
+ if (obj.is_tensor()) {
347
+ auto& tensor = obj.tensor();
348
+ if (tensor.scalar_type() == kByte || tensor.scalar_type() == kBool) {
349
+ count += tensor.dim();
350
+ } else {
351
+ count++;
352
+ }
353
+ } else if (!obj.is_none() && !obj.is_ellipsis() && !obj.is_boolean()) {
354
+ count++;
355
+ }
356
+ }
357
+ return count;
358
+ }
359
+ } // namespace impl
360
+
361
+ // NOTE: Many functions below are only for consumption from Python indexing
362
+ // implementation, they include:
363
+ //
364
+ // - `Tensor scalarToTensor(...)`
365
+ // - `IntArrayRef slicePrefix1sSize(...)`
366
+ // - `void copy_to(...)`
367
+ // - `Tensor handleDimInMultiDimIndexing(...)`
368
+ // - `Tensor dispatch_index(...)`
369
+ // - `Tensor dispatch_index_put_(...)`
370
+ // - `Tensor get_item(...)`
371
+ // - `void set_item(...)`
372
+ //
373
+ // The rest of the functions are in `at::indexing::impl` namespace, signifying
374
+ // that they shouldn't be used from Python indexing implementation.
375
+ static inline Tensor scalarToTensor(
376
+ const Scalar& v,
377
+ const TensorOptions& options,
378
+ const at::Device& self_device) {
379
+ if (self_device == at::kCPU && !v.isSymbolic()) {
380
+ return at::detail::scalar_tensor_static(
381
+ v, options.dtype_opt()->toScalarType(), self_device);
382
+ } else {
383
+ return impl::scalarToTensorNonNativeDeviceType(v, options);
384
+ }
385
+ }
386
+
387
+ // To match numpy semantics:
388
+ // As a special case for backwards compatibility,
389
+ // strip away unit dimensions from the left of 'src'
390
+ static inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) {
391
+ size_t first_non1_src = sizes.size();
392
+ for (const auto i : c10::irange(sizes.size())) {
393
+ // Unbacked SymInt has different behavior, but this is sound because
394
+ // failing to slice will only ever cause an error, not divergent
395
+ // behavior
396
+ if (!sizes[i].has_hint() || sizes[i] != 1) {
397
+ first_non1_src = i;
398
+ break;
399
+ }
400
+ }
401
+
402
+ return sizes.slice(first_non1_src);
403
+ }
404
+
405
+ static inline void copy_to(const Tensor& dst, const Tensor& src) {
406
+ if (dst.sym_sizes().equals(src.sym_sizes())) {
407
+ // A shortcut to avoid generating hard-coded constant sizes during tracing.
408
+ // This is not a perfect solution: when src & dst have different shapes,
409
+ // constants will still appear. Users can workaround that case by
410
+ // dst[index..] = src.reshape(..)
411
+ dst.copy_(src);
412
+ return;
413
+ } else if (src.dim() == 0 && src.device().type() == at::kCPU) {
414
+ dst.fill_(src);
415
+ return;
416
+ }
417
+ auto src_view = src.view_symint(slicePrefix1sSize(src.sym_sizes()));
418
+ c10::MaybeOwned<Tensor> b_src = expand_inplace(dst, src_view, "setitem");
419
+ dst.copy_(*b_src);
420
+ }
421
+
422
+ // See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor
423
+ // indexing functions from Python ]
424
+ static inline Tensor handleDimInMultiDimIndexing(
425
+ const Tensor& prev_dim_result,
426
+ const Tensor& original_tensor,
427
+ const TensorIndex& index,
428
+ int64_t* dim_ptr,
429
+ int64_t* specified_dims_ptr,
430
+ int64_t real_dim,
431
+ std::vector<Tensor>& outIndices,
432
+ bool disable_slice_optimization,
433
+ const at::Device& original_tensor_device,
434
+ const c10::optional<SymIntArrayRef>& prev_dim_result_sizes) {
435
+ if (index.is_integer()) {
436
+ return impl::applySelect(
437
+ prev_dim_result,
438
+ *dim_ptr,
439
+ index.integer(),
440
+ real_dim,
441
+ original_tensor_device,
442
+ prev_dim_result_sizes);
443
+ } else if (index.is_slice()) {
444
+ Tensor result = impl::applySlice(
445
+ prev_dim_result,
446
+ *dim_ptr,
447
+ index.slice().start(),
448
+ index.slice().stop(),
449
+ index.slice().step(),
450
+ /*disable_slice_optimization=*/disable_slice_optimization,
451
+ original_tensor_device,
452
+ prev_dim_result_sizes);
453
+ (*dim_ptr)++;
454
+ return result;
455
+ } else if (index.is_ellipsis()) {
456
+ (*dim_ptr) += original_tensor.dim() - (*specified_dims_ptr);
457
+ return prev_dim_result;
458
+ } else if (index.is_none()) {
459
+ Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
460
+ (*dim_ptr)++;
461
+ return result;
462
+ } else if (index.is_boolean()) {
463
+ Tensor result = prev_dim_result.unsqueeze(*dim_ptr);
464
+ impl::recordTensorIndex(
465
+ impl::boolToIndexingTensor(
466
+ result, index.boolean(), original_tensor_device),
467
+ outIndices,
468
+ dim_ptr);
469
+ return result;
470
+ } else if (index.is_tensor()) {
471
+ Tensor result = prev_dim_result;
472
+ const Tensor& tensor = index.tensor();
473
+ auto scalar_type = tensor.scalar_type();
474
+ if (tensor.dim() == 0 &&
475
+ at::isIntegralType(scalar_type, /*includeBool=*/true)) {
476
+ if (scalar_type != at::kByte && scalar_type != at::kBool) {
477
+ result = impl::applySelect(
478
+ result,
479
+ *dim_ptr,
480
+ tensor.item<int64_t>(),
481
+ real_dim,
482
+ original_tensor_device,
483
+ prev_dim_result_sizes);
484
+ } else {
485
+ result = result.unsqueeze(*dim_ptr);
486
+ if (scalar_type == at::kBool) {
487
+ impl::recordTensorIndex(
488
+ impl::boolToIndexingTensor(
489
+ result, tensor.item<bool>() != 0, original_tensor_device),
490
+ outIndices,
491
+ dim_ptr);
492
+ } else {
493
+ impl::recordTensorIndex(
494
+ impl::boolToIndexingTensor(
495
+ result, tensor.item<uint8_t>() != 0, original_tensor_device),
496
+ outIndices,
497
+ dim_ptr);
498
+ }
499
+ }
500
+ } else {
501
+ impl::recordTensorIndex(tensor, outIndices, dim_ptr);
502
+ }
503
+ return result;
504
+ } else {
505
+ TORCH_INTERNAL_ASSERT(false, "Invalid TensorIndex type");
506
+ }
507
+ }
508
+
509
+ namespace impl {
510
+ // This mirrors `applySlicing` in
511
+ // torch/csrc/autograd/python_variable_indexing.cpp
512
+ static inline Tensor applySlicing(
513
+ const Tensor& self,
514
+ const ArrayRef<TensorIndex>& indices,
515
+ std::vector<Tensor>& outIndices,
516
+ bool disable_slice_optimization,
517
+ const at::Device& self_device,
518
+ const c10::optional<SymIntArrayRef>& self_sizes) {
519
+ int64_t dim = 0;
520
+ int64_t specified_dims = impl::count_specified_dimensions(indices);
521
+
522
+ // See NOTE [nested tensor size for indexing]
523
+ if (self_sizes.has_value()) {
524
+ TORCH_CHECK_INDEX(
525
+ specified_dims <= (int64_t)self_sizes->size(),
526
+ "too many indices for tensor of dimension ",
527
+ (int)self_sizes->size());
528
+ }
529
+
530
+ Tensor result = self;
531
+ for (const auto i : c10::irange(indices.size())) {
532
+ auto& obj = indices[i];
533
+ // See NOTE [nested tensor size for indexing]
534
+ c10::optional<SymIntArrayRef> result_sizes = result.is_nested()
535
+ ? c10::optional<SymIntArrayRef>(c10::nullopt)
536
+ : c10::optional<SymIntArrayRef>(result.sym_sizes());
537
+ result = handleDimInMultiDimIndexing(
538
+ /*prev_dim_result=*/result,
539
+ /*original_tensor=*/self,
540
+ /*index=*/obj,
541
+ /*dim_ptr=*/&dim,
542
+ /*specified_dims_ptr=*/&specified_dims,
543
+ /*real_dim=*/static_cast<int64_t>(i),
544
+ /*outIndices=*/outIndices,
545
+ /*disable_slice_optimization=*/disable_slice_optimization,
546
+ /*original_tensor_device=*/self_device,
547
+ /*prev_dim_result_sizes=*/result_sizes);
548
+ }
549
+ return result;
550
+ }
551
+ } // namespace impl
552
+
553
+ static inline Tensor dispatch_index(
554
+ const Tensor& self,
555
+ std::vector<Tensor>&& indices) {
556
+ return self.index(impl::typeConvertIndices(self, std::move(indices)));
557
+ }
558
+
559
+ static inline Tensor dispatch_index_put_(
560
+ Tensor& self,
561
+ std::vector<Tensor>&& indices,
562
+ const Tensor& value) {
563
+ return self.index_put_(
564
+ impl::typeConvertIndices(self, std::move(indices)), value);
565
+ }
566
+
567
+ // NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing
568
+ // functions from Python ]
569
+ //
570
+ // Question: When should we set `disable_slice_optimization` to `true` when
571
+ // calling C++ tensor indexing functions from Python indexing code?
572
+ //
573
+ // Answer: What "slice optimization" means: when we have a slicing expression
574
+ // like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we
575
+ // would skip dispatching the actual slice call as an optimization. However,
576
+ // here are the cases where we DON'T want this optimization:
577
+ //
578
+ // 1. When we are doing 1-D slicing (e.g. `tensor[:]`).
579
+ // Reason: we always return a shallow copy for expressions such as
580
+ // `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:,
581
+ // :]`, we return an alias of `tensor` by doing the following:
582
+ // ```
583
+ // Tensor sliced = impl::applySlicing(self, indices, tensorIndices,
584
+ // disable_slice_optimization, self_device, self_sizes); if
585
+ // (tensorIndices.empty()) {
586
+ // if (sliced.is_same(self)) {
587
+ // // ensure we return a shallow copy for things like x[...]
588
+ // sliced = at::alias(sliced);
589
+ // }
590
+ // return sliced;
591
+ // }
592
+ // ```)
593
+ // 2. When we are doing JIT tracing.
594
+ // Reason: JIT tracing needs the `self.slice(...)` call to properly trace the
595
+ // slice operation.
596
+
597
+ // This mirrors `THPVariable_getitem` in
598
+ // torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting
599
+ // `disable_slice_optimization` when calling C++ tensor indexing functions from
600
+ // Python ]
601
+ static inline Tensor get_item(
602
+ const Tensor& self,
603
+ const ArrayRef<TensorIndex>& indices,
604
+ bool disable_slice_optimization = false) {
605
+ at::Device self_device = self.device();
606
+ // NOTE [nested tensor size for indexing]
607
+ // nested tensor does not have a size (yet) so for now we represent its size
608
+ // as null may need to be changed after we reach a better solution for nested
609
+ // tensor size
610
+ c10::optional<SymIntArrayRef> self_sizes = self.is_nested()
611
+ ? c10::optional<SymIntArrayRef>(c10::nullopt)
612
+ : c10::optional<SymIntArrayRef>(self.sym_sizes());
613
+
614
+ // handle simple types: integers, slices, none, ellipsis, bool
615
+ if (indices.size() == 1) {
616
+ const TensorIndex& index = indices[0];
617
+ if (index.is_integer()) {
618
+ return impl::applySelect(
619
+ self, 0, index.integer(), 0, self_device, self_sizes);
620
+ } else if (index.is_slice()) {
621
+ return impl::applySlice(
622
+ self,
623
+ 0,
624
+ index.slice().start(),
625
+ index.slice().stop(),
626
+ index.slice().step(),
627
+ /*disable_slice_optimization=*/true,
628
+ self_device,
629
+ self_sizes);
630
+ } else if (index.is_none()) {
631
+ return self.unsqueeze(0);
632
+ } else if (index.is_ellipsis()) {
633
+ return at::alias(self);
634
+ } else if (index.is_boolean()) {
635
+ Tensor result = self.unsqueeze(0);
636
+ return dispatch_index(
637
+ result,
638
+ std::vector<Tensor>{impl::boolToIndexingTensor(
639
+ result, index.boolean(), self_device)});
640
+ }
641
+ }
642
+
643
+ std::vector<Tensor> tensorIndices;
644
+ Tensor sliced = impl::applySlicing(
645
+ self,
646
+ indices,
647
+ tensorIndices,
648
+ disable_slice_optimization,
649
+ self_device,
650
+ self_sizes);
651
+ if (tensorIndices.empty()) {
652
+ if (sliced.is_same(self)) {
653
+ // ensure we return a shallow copy for things like x[...]
654
+ sliced = at::alias(sliced);
655
+ }
656
+ return sliced;
657
+ }
658
+
659
+ // indexing by tensors ("advanced" indexing)
660
+ return dispatch_index(sliced, std::move(tensorIndices));
661
+ }
662
+
663
+ // This mirrors `THPVariable_setitem` in
664
+ // torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a
665
+ // Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++
666
+ // tensor indexing functions from Python ]
667
+ static inline void set_item(
668
+ const Tensor& self,
669
+ const ArrayRef<TensorIndex>& indices,
670
+ const Tensor& value,
671
+ bool disable_slice_optimization = false) {
672
+ at::Device self_device = self.device();
673
+ SymIntArrayRef self_sizes = self.sym_sizes();
674
+
675
+ // handle simple types: integers, slices, ellipsis, bool
676
+ if (indices.size() == 1) {
677
+ const TensorIndex& index = indices[0];
678
+ if (index.is_boolean() && !index.boolean()) {
679
+ // do nothing for false (technically we should check the size, but we
680
+ // don't have real 0-sized shapes.
681
+ return;
682
+ } else if (index.is_ellipsis()) {
683
+ copy_to(self, value);
684
+ return;
685
+ } else if (index.is_none() || (index.is_boolean() && index.boolean())) {
686
+ copy_to(self.unsqueeze(0), value);
687
+ return;
688
+ } else if (index.is_integer()) {
689
+ copy_to(
690
+ impl::applySelect(
691
+ self, 0, index.integer(), 0, self_device, self_sizes),
692
+ value);
693
+ return;
694
+ } else if (index.is_slice()) {
695
+ copy_to(
696
+ impl::applySlice(
697
+ self,
698
+ 0,
699
+ index.slice().start(),
700
+ index.slice().stop(),
701
+ index.slice().step(),
702
+ /*disable_slice_optimization=*/disable_slice_optimization,
703
+ self_device,
704
+ self_sizes),
705
+ value);
706
+ return;
707
+ }
708
+ }
709
+
710
+ std::vector<Tensor> tensorIndices;
711
+ Tensor sliced = impl::applySlicing(
712
+ self,
713
+ indices,
714
+ tensorIndices,
715
+ disable_slice_optimization,
716
+ self_device,
717
+ self_sizes);
718
+ if (tensorIndices.empty()) {
719
+ copy_to(sliced, value);
720
+ return;
721
+ }
722
+
723
+ SymIntArrayRef valueSizes = value.sym_sizes();
724
+ SymIntArrayRef slicedValueSizes = slicePrefix1sSize(valueSizes);
725
+ Tensor valuesSliced;
726
+ if (!valueSizes.equals(slicedValueSizes)) {
727
+ valuesSliced = value.view_symint(slicedValueSizes);
728
+ } else {
729
+ valuesSliced = value;
730
+ }
731
+ dispatch_index_put_(sliced, std::move(tensorIndices), valuesSliced);
732
+ return;
733
+ }
734
+
735
+ } // namespace at::indexing
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorMeta.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <ATen/core/Dimname.h>
5
+ #include <c10/core/TensorOptions.h>
6
+ #include <c10/util/strides.h>
7
+
8
+ namespace at {
9
+
10
+ class Tensor;
11
+
12
+ namespace impl {
13
+
14
+ // Use this to define the prototype for a meta function. There are two
15
+ // versions; one that takes one argument (just the operator name), or FUNC2
16
+ // variant that takes two arguments (operator name and overload name).
17
+ //
18
+ // Example usage:
19
+ //
20
+ // TORCH_META_FUNC2(add, Tensor) (
21
+ // const Tensor& self, const Tensor& other
22
+ // ) {
23
+ // ... compute sizes and options ...
24
+ // set_output(sizes, options);
25
+ // }
26
+ //
27
+ #define TORCH_META_FUNC(name) void structured_##name::meta
28
+ #define TORCH_META_FUNC2(name, overload) \
29
+ void structured_##name##_##overload::meta
30
+
31
+ // These are versions of TORCH_META_FUNC(2) that include a precompute_out struct
32
+ // as a return value. They should be used when the kernel in question has
33
+ // precomputed values declared in native_functions.yaml and the corresponding
34
+ // implementation should return an instance of the aforementioned struct.
35
+ #define TORCH_PRECOMPUTE_META_FUNC(name) \
36
+ structured_##name::meta_return_ty structured_##name::meta
37
+ #define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \
38
+ structured_##name##_##overload::meta_return_ty \
39
+ structured_##name##_##overload::meta
40
+
41
+ // Use this to create a precompute struct in a meta function.
42
+ #define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<>
43
+ #define TORCH_PRECOMPUTE_STRUCT2(name, overload) \
44
+ structured_##name##_##overload::precompute_out<>
45
+
46
+ // Use this to define the prototype for an implementation. This takes only
47
+ // one argument, which is the name of the dispatch key entry you're
48
+ // implementing.
49
+ //
50
+ // Example usage:
51
+ //
52
+ // TORCH_IMPL_FUNC(add_cpu) (
53
+ // Tensor& result, const Tensor& self, const Tensor& other
54
+ // ) {
55
+ // ... do the actual implementation ...
56
+ // }
57
+ //
58
+ #define TORCH_IMPL_FUNC(name) void structured_##name::impl
59
+
60
+ // Base class for all structured kernel classes. The set_output virtual
61
+ // method is varied depending whether or not the operator is
62
+ // functional/out/inplace, and could also be specialized for CPU/CUDA/etc
63
+ // (although presently it isn't).
64
+ //
65
+ // A notable subclass of this interface is TensorIteratorBase.
66
+ struct TORCH_API MetaBase {
67
+ MetaBase() = default;
68
+ MetaBase(const MetaBase&) = default;
69
+ MetaBase& operator=(const MetaBase&) = default;
70
+ MetaBase(MetaBase&&) noexcept = default;
71
+ MetaBase& operator=(MetaBase&&) noexcept = default;
72
+ virtual const Tensor& maybe_get_output(int64_t output_idx) = 0;
73
+
74
+ // Note: [set_output_*]
75
+ // See: https://github.com/pytorch/pytorch/issues/69813
76
+ // Whenever defining the output properties in the META function of a
77
+ // structured kernel (what was usually done with `set_output`), use one of
78
+ // these 3 variants, instead. In order to decide which variant to use, check
79
+ // the following decision tree:
80
+ //
81
+ // - Can the kernel you are going to implement support output tensors
82
+ // with arbitrary strides?
83
+ // |
84
+ // -- YES: `set_output_raw_strided`
85
+ // |
86
+ // -- NO: Should the output tensor strides be contiguous?
87
+ // |
88
+ // -- YES: `set_output_contiguous`
89
+ // |
90
+ // -- NO: `set_output_strided`
91
+ //
92
+ // Use this function whenever the kernel requires specific strides for the
93
+ // output. If `strides` does not match the given output strides, proxy outputs
94
+ // will be created and passed to the IMPL function.
95
+ virtual void set_output_strided(
96
+ int64_t output_idx,
97
+ IntArrayRef sizes,
98
+ IntArrayRef strides,
99
+ TensorOptions options,
100
+ DimnameList names = {}) {
101
+ TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
102
+ }
103
+
104
+ // Use this function whenever the kernel knows how to handle arbitrary strided
105
+ // outputs. This function has the same behavior as the old `set_output`: it
106
+ // will only re-stride if the given output was resized.
107
+ virtual void set_output_raw_strided(
108
+ int64_t output_idx,
109
+ IntArrayRef sizes,
110
+ IntArrayRef strides_hint,
111
+ TensorOptions options,
112
+ DimnameList names = {}) {
113
+ TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
114
+ }
115
+
116
+ // Use this function if the kernel requires contiguous strides.
117
+ // Alias for `set_output_strided`, but with contiguous strides.
118
+ void set_output_contiguous(
119
+ int64_t output_idx,
120
+ IntArrayRef sizes,
121
+ TensorOptions options,
122
+ DimnameList names = {}) {
123
+ auto strides = c10::contiguous_strides(sizes);
124
+ set_output_strided(output_idx, sizes, strides, options, names);
125
+ }
126
+
127
+ // Returns a reference to an undefined tensor if there is no presupplied
128
+ // output
129
+ const Tensor& maybe_get_output() {
130
+ return maybe_get_output(0);
131
+ }
132
+ virtual ~MetaBase() = default;
133
+ };
134
+
135
+ } // namespace impl
136
+
137
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorNames.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/WrapDimUtils.h>
4
+
5
+ namespace at::namedinference {
6
+
7
+ // TensorName and TensorNames are wrappers around Dimname and DimnameList
8
+ // that contain helper functions to make writing name inference rules easier.
9
+ //
10
+ // A TensorName represents a Dimname associated with some DimnameList (from a
11
+ // Tensor). This encapsulates all the information that is needed to check if
12
+ // names *match* and to *unify* names.
13
+ //
14
+ // Definition: Two names in two tensors *match* if they are equal, or if at
15
+ // least one of them is a wildcard that can be *refined* to the other name.
16
+ //
17
+ // Definition: unify(name, other) fails if the names do not match. Otherwise,
18
+ // it returns the most refined of name and other.
19
+ //
20
+ // Here is an example of checking if two names match.
21
+ // tensor: Tensor[A, None]
22
+ // other: Tensor[A]
23
+ //
24
+ // Let's say we wish to check if tensor.names[-1] matches other.names[-1].
25
+ // None (in tensor) cannot match A (in other) because if the None were refined
26
+ // to A, `tensor` would have duplicate names [A, A]. Therefore we need to check
27
+ // tensor.names [A, None] for the existence of A.
28
+ struct TORCH_API TensorName {
29
+ explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
30
+ : origin_(origin),
31
+ name_(origin[maybe_wrap_dim(
32
+ origin_idx,
33
+ static_cast<int64_t>(origin.size()))]),
34
+ origin_idx_(origin_idx) {}
35
+
36
+ // op_name is only used for error reporting.
37
+ const TensorName& unify(const TensorName& other, const char* op_name) const;
38
+ Dimname toDimname() const;
39
+
40
+ private:
41
+ ArrayRef<Dimname> origin_;
42
+ Dimname name_;
43
+ int origin_idx_; // A named tensor can have at most 64 dims.
44
+
45
+ TORCH_API friend std::ostream& operator<<(
46
+ std::ostream& out,
47
+ const TensorName& tensorname);
48
+ };
49
+
50
+ using TensorNameVec = SmallVector<TensorName, 10>;
51
+
52
+ struct TORCH_API TensorNames {
53
+ explicit TensorNames(ArrayRef<Dimname> names);
54
+
55
+ // Create TensorNames from names[start:end]. Each individual TensorName stores
56
+ // `names`, NOT names[start:end], because the original tensor's names are
57
+ // `names`.
58
+ explicit TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end);
59
+
60
+ // op_name is only used for error reporting.
61
+ TensorNames& unifyFromRightInplace(
62
+ const TensorNames& other,
63
+ const char* op_name = "unify");
64
+ void checkUnique(const char* op_name) const;
65
+
66
+ void append(TensorName name);
67
+ std::vector<Dimname> toDimnameVec() const;
68
+
69
+ private:
70
+ explicit TensorNames(TensorNameVec&& names) : names_(std::move(names)){};
71
+
72
+ TensorNameVec names_;
73
+ };
74
+
75
+ } // namespace at::namedinference
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOperators.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/core/Scalar.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/Functions.h>
8
+ #else
9
+ #include <ATen/ops/empty_like.h>
10
+ #endif
11
+
12
+ namespace at {
13
+
14
+ #define AT_FORALL_BINARY_OPS(_) \
15
+ _(+, x.add(y), y.add(x)) \
16
+ _(*, x.mul(y), y.mul(x)) \
17
+ _(-, \
18
+ x.sub(y), \
19
+ ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).sub_(y)) \
20
+ _(/, \
21
+ x.div(y), \
22
+ ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).div_(y)) \
23
+ _(%, \
24
+ x.remainder(y), \
25
+ ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).remainder_(y)) \
26
+ _(&, x.bitwise_and(y), y.bitwise_and(x)) \
27
+ _(|, x.bitwise_or(y), y.bitwise_or(x)) \
28
+ _(^, x.bitwise_xor(y), y.bitwise_xor(x)) \
29
+ _(<, x.lt(y), y.gt(x)) \
30
+ _(<=, x.le(y), y.ge(x)) \
31
+ _(>, x.gt(y), y.lt(x)) \
32
+ _(>=, x.ge(y), y.le(x)) \
33
+ _(==, x.eq(y), y.eq(x)) \
34
+ _(!=, x.ne(y), y.ne(x))
35
+
36
+ #define DEFINE_OPERATOR(op, body, reverse_scalar_body) \
37
+ static inline Tensor operator op(const Tensor& x, const Tensor& y) { \
38
+ return body; \
39
+ } \
40
+ static inline Tensor operator op(const Tensor& x, const Scalar& y) { \
41
+ return body; \
42
+ } \
43
+ static inline Tensor operator op(const Scalar& x, const Tensor& y) { \
44
+ return reverse_scalar_body; \
45
+ }
46
+
47
+ AT_FORALL_BINARY_OPS(DEFINE_OPERATOR)
48
+ #undef DEFINE_OPERATOR
49
+ #undef AT_FORALL_BINARY_OPS
50
+
51
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorOptions.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <c10/core/TensorOptions.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorSubclassLikeUtils.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/List.h>
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/core/impl/TorchDispatchModeTLS.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/Functions.h>
8
+ #else
9
+ #include <ATen/ops/equal.h>
10
+ #endif
11
+
12
+ namespace at {
13
+
14
+ // Note [Tensor-subclass-like Tensors]
15
+ // Tensor-subclass-like is defined as:
16
+ // - a Tensor subclass (via __torch_dispatch__ in Python or extending
17
+ // TensorImpl in C++)
18
+ // - anything else that shares the same perils as Tensor subclasses.
19
+ // For example, many Tensor subclasses do not have storage and meta Tensors
20
+ // do not have storage either, so meta Tensors belong here.
21
+ //
22
+ // We should ensure that PyTorch internals supports Tensor-subclass-like
23
+ // objects. In particular, Tensor-subclass-like objects struggle with two
24
+ // classes of operations that are problematic for Tensor subclasses:
25
+ // 1. Because some Tensor subclasses do not have storage, .item() or
26
+ // .data_ptr() calls are not good.
27
+ // 2. Certain in-place operations can eliminate the typing of the Tensor
28
+ // subclass. For example:
29
+ // >>> torch.zeros(input.sizes(), grad.options()).diag().copy_(input)
30
+ // If input is a Tensor subclass, then the above ends up either erroring out
31
+ // or returning a regular non-Tensor-subclass Tensor!
32
+
33
+ constexpr auto kFunctorchWrappedTensors = DispatchKeySet(
34
+ {DispatchKey::FuncTorchGradWrapper,
35
+ DispatchKey::FuncTorchBatched,
36
+ DispatchKey::Functionalize});
37
+
38
+ constexpr auto kTensorSubclassLike =
39
+ kFunctorchWrappedTensors |
40
+ DispatchKeySet(
41
+ {// WARNING: DO NOT put combined backend component + functionality keys
42
+ // here, you will incorrectly always match on the functionality key
43
+ // no matter the backend component
44
+ DispatchKey::Batched,
45
+ DispatchKey::Sparse,
46
+ DispatchKey::SparseCsr,
47
+ DispatchKey::Python}) |
48
+ DispatchKeySet(BackendComponent::MetaBit);
49
+
50
+ inline bool isTensorSubclassLike(const Tensor& tensor) {
51
+ if (c10::impl::dispatch_mode_enabled())
52
+ return true;
53
+ auto key_set = tensor.unsafeGetTensorImpl()->key_set();
54
+ return !(key_set & kTensorSubclassLike).empty();
55
+ }
56
+
57
+ inline bool areAnyTensorSubclassLike(TensorList tensors) {
58
+ if (c10::impl::dispatch_mode_enabled())
59
+ return true;
60
+ return std::any_of(tensors.begin(), tensors.end(), isTensorSubclassLike);
61
+ }
62
+
63
+ inline bool areAnyOptionalTensorSubclassLike(
64
+ const c10::List<c10::optional<Tensor>>& tensors) {
65
+ if (c10::impl::dispatch_mode_enabled())
66
+ return true;
67
+ return std::any_of(
68
+ tensors.begin(), tensors.end(), [](const optional<Tensor>& opt_tensor) {
69
+ return (
70
+ opt_tensor.has_value() && isTensorSubclassLike(opt_tensor.value()));
71
+ });
72
+ }
73
+
74
+ // Helper function to deal testing truthfulness of a scalar tensor
75
+ // in a Composite Compliant manner.
76
+ // NOTE: This function expects a scalar tensor of boolean dtype.
77
+ // Eg.
78
+ // Non-Composite Compliant Pattern : (t == 0).all().item<bool>()
79
+ // Composite Compliant Patter : is_salar_tensor_true((t == 0).all())
80
+ inline bool is_scalar_tensor_true(const Tensor& t) {
81
+ TORCH_INTERNAL_ASSERT(t.dim() == 0)
82
+ TORCH_INTERNAL_ASSERT(t.scalar_type() == kBool)
83
+ return at::equal(t, t.new_ones({}, t.options()));
84
+ }
85
+
86
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ThreadLocalPythonObjects.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SafePyObject.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <unordered_map>
6
+
7
+ namespace at::impl {
8
+
9
+ struct TORCH_API ThreadLocalPythonObjects {
10
+ static void set(const std::string& key, std::shared_ptr<SafePyObject> value);
11
+ static const std::shared_ptr<SafePyObject>& get(const std::string& key);
12
+ static bool contains(const std::string& key);
13
+
14
+ static const ThreadLocalPythonObjects& get_state();
15
+ static void set_state(ThreadLocalPythonObjects state);
16
+
17
+ private:
18
+ std::unordered_map<std::string, std::shared_ptr<c10::SafePyObject>> obj_dict_;
19
+ };
20
+
21
+ } // namespace at::impl
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/Version.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Context.h>
2
+
3
+ namespace at {
4
+
5
+ /// Returns a detailed string describing the configuration PyTorch.
6
+ TORCH_API std::string show_config();
7
+
8
+ TORCH_API std::string get_mkl_version();
9
+
10
+ TORCH_API std::string get_mkldnn_version();
11
+
12
+ TORCH_API std::string get_openmp_version();
13
+
14
+ TORCH_API std::string get_cxx_flags();
15
+
16
+ TORCH_API std::string get_cpu_capability();
17
+
18
+ } // namespace at