applied-ai-018 commited on
Commit
e30745c
·
verified ·
1 Parent(s): 396e7f7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Activation.h +98 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/AdaptivePooling.h +39 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/AmpKernels.h +28 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h +321 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h +119 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h +173 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CPUBlas.h +189 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CPUFallback.h +45 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h +13 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h +97 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessor.h +34 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessorCommon.h +263 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ConvUtils.h +446 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ConvolutionMM3d.h +14 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h +20 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h +14 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h +229 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h +315 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h +394 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Distributions.h +518 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Fill.h +21 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h +371 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/FractionalMaxPooling.h +80 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/FunctionOfAMatrixUtils.h +20 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h +298 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h +109 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h +16 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/IndexingUtils.h +160 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h +46 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/LossMulti.h +72 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Math.h +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h +71 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h +157 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h +26 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h +11 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h +62 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h +47 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h +340 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h +53 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/RangeFactories.h +12 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h +16 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h +56 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h +40 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h +75 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h +128 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SegmentReduce.h +50 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SharedReduceOps.h +544 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SobolEngineOpsUtils.h +55 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SortingUtils.h +88 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SparseTensorUtils.h +190 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Activation.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/string_view.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ struct TensorIterator;
13
+ struct TensorIteratorBase;
14
+ class TensorBase;
15
+ }
16
+
17
+ namespace at::native {
18
+
19
+ // These constants control the approximation behavior of gelu function.
20
+ enum class GeluType {
21
+ None, // Baseline Gelu
22
+ Tanh, // Tahn Gelu Approximation
23
+ END
24
+ };
25
+
26
+ static GeluType get_gelutype_enum(const c10::string_view approximate) {
27
+ if (approximate == "none") {
28
+ return GeluType::None;
29
+ } else if (approximate == "tanh") {
30
+ return GeluType::Tanh;
31
+ } else {
32
+ TORCH_CHECK(false, "approximate argument must be either none or tanh.");
33
+ }
34
+ }
35
+
36
+ static std::string gelutype_to_string(const GeluType type) {
37
+ switch(type) {
38
+ case GeluType::None: return "none";
39
+ case GeluType::Tanh: return "tanh";
40
+ default: TORCH_CHECK(false, "unknown GELU type: ", static_cast<int>(type));
41
+ }
42
+ }
43
+
44
+ using structured_activation_fn = void (*)(TensorIteratorBase&);
45
+ using structured_activation_backward_fn = void (*)(TensorIteratorBase&);
46
+
47
+ using activation_fn = void (*)(TensorIterator&);
48
+ using activation_backward_fn = void (*)(TensorIterator&);
49
+ using softplus_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
50
+ using softplus_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
51
+ using threshold_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
52
+ using hardtanh_backward_fn = void (*)(TensorIterator&, const c10::Scalar&, const c10::Scalar&);
53
+ using hardsigmoid_fn = void(*)(TensorIteratorBase&);
54
+ using hardsigmoid_backward_fn = void(*)(TensorIteratorBase&);
55
+ using hardswish_fn = void(*)(TensorIterator&);
56
+ using hardswish_backward_fn = void(*)(TensorIterator&);
57
+ using shrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
58
+ using softshrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
59
+ using shrink_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
60
+ using elu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&);
61
+ using elu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&, bool);
62
+ using leaky_relu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
63
+ using leaky_relu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
64
+ using log_sigmoid_cpu_fn = void (*)(TensorBase&, TensorBase&, const TensorBase&);
65
+ using gelu_fn = void (*)(TensorIteratorBase&, GeluType);
66
+ using gelu_backward_fn = void (*)(TensorIteratorBase&, GeluType);
67
+ using glu_jvp_fn = void (*)(TensorIteratorBase&);
68
+
69
+ DECLARE_DISPATCH(elu_fn, elu_stub);
70
+ DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub);
71
+ DECLARE_DISPATCH(softplus_fn, softplus_stub);
72
+ DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub);
73
+ DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub);
74
+ DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub);
75
+ DECLARE_DISPATCH(threshold_fn, threshold_stub);
76
+ DECLARE_DISPATCH(gelu_fn, GeluKernel);
77
+ DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel);
78
+ DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub);
79
+ DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub);
80
+ DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub);
81
+ DECLARE_DISPATCH(hardswish_fn, hardswish_stub);
82
+ DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub);
83
+ DECLARE_DISPATCH(shrink_fn, hardshrink_stub);
84
+ DECLARE_DISPATCH(softshrink_fn, softshrink_stub);
85
+ DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub);
86
+ DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub);
87
+ DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub);
88
+ DECLARE_DISPATCH(structured_activation_fn, glu_stub);
89
+ DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub);
90
+ DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub);
91
+ DECLARE_DISPATCH(structured_activation_fn, silu_stub);
92
+ DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub);
93
+ DECLARE_DISPATCH(structured_activation_fn, mish_stub);
94
+ DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub);
95
+ DECLARE_DISPATCH(activation_fn, prelu_stub);
96
+ DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub);
97
+
98
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/AdaptivePooling.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <c10/util/ArrayRef.h>
6
+ #include <c10/util/irange.h>
7
+ #include <cmath>
8
+
9
+ namespace at::native {
10
+
11
+ using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
12
+ using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
13
+ DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel);
14
+ DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel);
15
+
16
+ using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
17
+ using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
18
+ DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel);
19
+ DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel);
20
+
21
+ static inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
22
+ return (a / b) * c + ((a % b) * c) / b;
23
+ }
24
+
25
+ static inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
26
+ return 1 + ((a + 1) * c - 1) / b;
27
+ }
28
+
29
+ static inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
30
+ int64_t ndim = gradOutput_.ndimension();
31
+ for (const auto i : c10::irange(1, ndim)) {
32
+ TORCH_CHECK(gradOutput_.size(i) > 0,
33
+ arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
34
+ "but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
35
+ " being empty");
36
+ }
37
+ }
38
+
39
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/AmpKernels.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/core/ATen_fwd.h>
5
+
6
+ namespace at {
7
+ class Tensor;
8
+
9
+ namespace native {
10
+
11
+ using _amp_foreach_non_finite_check_and_unscale_cpu__fn = void (*)(
12
+ TensorList,
13
+ Tensor&,
14
+ const Tensor&);
15
+
16
+ using _amp_update_scale_cpu__fn = Tensor& (*)(
17
+ Tensor&,
18
+ Tensor&,
19
+ const Tensor&,
20
+ double,
21
+ double,
22
+ int64_t);
23
+
24
+ DECLARE_DISPATCH(_amp_foreach_non_finite_check_and_unscale_cpu__fn, _amp_foreach_non_finite_check_and_unscale_cpu_stub);
25
+ DECLARE_DISPATCH(_amp_update_scale_cpu__fn, _amp_update_scale_cpu_stub);
26
+
27
+ } // namespace native
28
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <c10/util/string_view.h>
5
+ #include <ATen/Config.h>
6
+ #include <ATen/native/DispatchStub.h>
7
+
8
+ // Forward declare TI
9
+ namespace at {
10
+ class Tensor;
11
+ struct TensorIterator;
12
+
13
+ namespace native {
14
+ enum class TransposeType;
15
+ }
16
+
17
+ }
18
+
19
+ namespace at::native {
20
+
21
+ enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss};
22
+
23
+ #if AT_BUILD_WITH_LAPACK()
24
+ // Define per-batch functions to be used in the implementation of batched
25
+ // linear algebra operations
26
+
27
+ template <class scalar_t>
28
+ void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info);
29
+
30
+ template <class scalar_t>
31
+ void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info);
32
+
33
+ template <class scalar_t, class value_t=scalar_t>
34
+ void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info);
35
+
36
+ template <class scalar_t>
37
+ void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
38
+
39
+ template <class scalar_t>
40
+ void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
41
+
42
+ template <class scalar_t>
43
+ void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info);
44
+
45
+ template <class scalar_t, class value_t = scalar_t>
46
+ void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info);
47
+
48
+ template <class scalar_t>
49
+ void lapackGels(char trans, int m, int n, int nrhs,
50
+ scalar_t *a, int lda, scalar_t *b, int ldb,
51
+ scalar_t *work, int lwork, int *info);
52
+
53
+ template <class scalar_t, class value_t = scalar_t>
54
+ void lapackGelsd(int m, int n, int nrhs,
55
+ scalar_t *a, int lda, scalar_t *b, int ldb,
56
+ value_t *s, value_t rcond, int *rank,
57
+ scalar_t* work, int lwork,
58
+ value_t *rwork, int* iwork, int *info);
59
+
60
+ template <class scalar_t, class value_t = scalar_t>
61
+ void lapackGelsy(int m, int n, int nrhs,
62
+ scalar_t *a, int lda, scalar_t *b, int ldb,
63
+ int *jpvt, value_t rcond, int *rank,
64
+ scalar_t *work, int lwork, value_t* rwork, int *info);
65
+
66
+ template <class scalar_t, class value_t = scalar_t>
67
+ void lapackGelss(int m, int n, int nrhs,
68
+ scalar_t *a, int lda, scalar_t *b, int ldb,
69
+ value_t *s, value_t rcond, int *rank,
70
+ scalar_t *work, int lwork,
71
+ value_t *rwork, int *info);
72
+
73
+ template <LapackLstsqDriverType, class scalar_t, class value_t = scalar_t>
74
+ struct lapackLstsq_impl;
75
+
76
+ template <class scalar_t, class value_t>
77
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gels, scalar_t, value_t> {
78
+ static void call(
79
+ char trans, int m, int n, int nrhs,
80
+ scalar_t *a, int lda, scalar_t *b, int ldb,
81
+ scalar_t *work, int lwork, int *info, // Gels flavor
82
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
83
+ value_t *s, // Gelss flavor
84
+ int *iwork // Gelsd flavor
85
+ ) {
86
+ lapackGels<scalar_t>(
87
+ trans, m, n, nrhs,
88
+ a, lda, b, ldb,
89
+ work, lwork, info);
90
+ }
91
+ };
92
+
93
+ template <class scalar_t, class value_t>
94
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelsy, scalar_t, value_t> {
95
+ static void call(
96
+ char trans, int m, int n, int nrhs,
97
+ scalar_t *a, int lda, scalar_t *b, int ldb,
98
+ scalar_t *work, int lwork, int *info, // Gels flavor
99
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
100
+ value_t *s, // Gelss flavor
101
+ int *iwork // Gelsd flavor
102
+ ) {
103
+ lapackGelsy<scalar_t, value_t>(
104
+ m, n, nrhs,
105
+ a, lda, b, ldb,
106
+ jpvt, rcond, rank,
107
+ work, lwork, rwork, info);
108
+ }
109
+ };
110
+
111
+ template <class scalar_t, class value_t>
112
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelsd, scalar_t, value_t> {
113
+ static void call(
114
+ char trans, int m, int n, int nrhs,
115
+ scalar_t *a, int lda, scalar_t *b, int ldb,
116
+ scalar_t *work, int lwork, int *info, // Gels flavor
117
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
118
+ value_t *s, // Gelss flavor
119
+ int *iwork // Gelsd flavor
120
+ ) {
121
+ lapackGelsd<scalar_t, value_t>(
122
+ m, n, nrhs,
123
+ a, lda, b, ldb,
124
+ s, rcond, rank,
125
+ work, lwork,
126
+ rwork, iwork, info);
127
+ }
128
+ };
129
+
130
+ template <class scalar_t, class value_t>
131
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelss, scalar_t, value_t> {
132
+ static void call(
133
+ char trans, int m, int n, int nrhs,
134
+ scalar_t *a, int lda, scalar_t *b, int ldb,
135
+ scalar_t *work, int lwork, int *info, // Gels flavor
136
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
137
+ value_t *s, // Gelss flavor
138
+ int *iwork // Gelsd flavor
139
+ ) {
140
+ lapackGelss<scalar_t, value_t>(
141
+ m, n, nrhs,
142
+ a, lda, b, ldb,
143
+ s, rcond, rank,
144
+ work, lwork,
145
+ rwork, info);
146
+ }
147
+ };
148
+
149
+ template <LapackLstsqDriverType driver_type, class scalar_t, class value_t = scalar_t>
150
+ void lapackLstsq(
151
+ char trans, int m, int n, int nrhs,
152
+ scalar_t *a, int lda, scalar_t *b, int ldb,
153
+ scalar_t *work, int lwork, int *info, // Gels flavor
154
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
155
+ value_t *s, // Gelss flavor
156
+ int *iwork // Gelsd flavor
157
+ ) {
158
+ lapackLstsq_impl<driver_type, scalar_t, value_t>::call(
159
+ trans, m, n, nrhs,
160
+ a, lda, b, ldb,
161
+ work, lwork, info,
162
+ jpvt, rcond, rank, rwork,
163
+ s,
164
+ iwork);
165
+ }
166
+
167
+ template <class scalar_t>
168
+ void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info);
169
+
170
+ template <class scalar_t>
171
+ void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info);
172
+
173
+ template <class scalar_t>
174
+ void lapackLdlHermitian(
175
+ char uplo,
176
+ int n,
177
+ scalar_t* a,
178
+ int lda,
179
+ int* ipiv,
180
+ scalar_t* work,
181
+ int lwork,
182
+ int* info);
183
+
184
+ template <class scalar_t>
185
+ void lapackLdlSymmetric(
186
+ char uplo,
187
+ int n,
188
+ scalar_t* a,
189
+ int lda,
190
+ int* ipiv,
191
+ scalar_t* work,
192
+ int lwork,
193
+ int* info);
194
+
195
+ template <class scalar_t>
196
+ void lapackLdlSolveHermitian(
197
+ char uplo,
198
+ int n,
199
+ int nrhs,
200
+ scalar_t* a,
201
+ int lda,
202
+ int* ipiv,
203
+ scalar_t* b,
204
+ int ldb,
205
+ int* info);
206
+
207
+ template <class scalar_t>
208
+ void lapackLdlSolveSymmetric(
209
+ char uplo,
210
+ int n,
211
+ int nrhs,
212
+ scalar_t* a,
213
+ int lda,
214
+ int* ipiv,
215
+ scalar_t* b,
216
+ int ldb,
217
+ int* info);
218
+
219
+ template<class scalar_t, class value_t=scalar_t>
220
+ void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info);
221
+ #endif
222
+
223
+ #if AT_BUILD_WITH_BLAS()
224
+ template <class scalar_t>
225
+ void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb);
226
+ #endif
227
+
228
+ using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/);
229
+ DECLARE_DISPATCH(cholesky_fn, cholesky_stub);
230
+
231
+ using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/);
232
+
233
+ DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub);
234
+
235
+ using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/);
236
+
237
+ DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub);
238
+
239
+ using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/);
240
+ DECLARE_DISPATCH(geqrf_fn, geqrf_stub);
241
+
242
+ using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/);
243
+ DECLARE_DISPATCH(orgqr_fn, orgqr_stub);
244
+
245
+ using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/);
246
+ DECLARE_DISPATCH(ormqr_fn, ormqr_stub);
247
+
248
+ using linalg_eigh_fn = void (*)(
249
+ const Tensor& /*eigenvalues*/,
250
+ const Tensor& /*eigenvectors*/,
251
+ const Tensor& /*infos*/,
252
+ bool /*upper*/,
253
+ bool /*compute_eigenvectors*/);
254
+ DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub);
255
+
256
+ using lstsq_fn = void (*)(
257
+ const Tensor& /*a*/,
258
+ Tensor& /*b*/,
259
+ Tensor& /*rank*/,
260
+ Tensor& /*singular_values*/,
261
+ Tensor& /*infos*/,
262
+ double /*rcond*/,
263
+ std::string /*driver_name*/);
264
+ DECLARE_DISPATCH(lstsq_fn, lstsq_stub);
265
+
266
+ using triangular_solve_fn = void (*)(
267
+ const Tensor& /*A*/,
268
+ const Tensor& /*B*/,
269
+ bool /*left*/,
270
+ bool /*upper*/,
271
+ TransposeType /*transpose*/,
272
+ bool /*unitriangular*/);
273
+ DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub);
274
+
275
+ using lu_factor_fn = void (*)(
276
+ const Tensor& /*input*/,
277
+ const Tensor& /*pivots*/,
278
+ const Tensor& /*infos*/,
279
+ bool /*compute_pivots*/);
280
+ DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub);
281
+
282
+ using unpack_pivots_fn = void(*)(
283
+ TensorIterator& iter,
284
+ const int64_t dim_size,
285
+ const int64_t max_pivot);
286
+ DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub);
287
+
288
+ using lu_solve_fn = void (*)(
289
+ const Tensor& /*LU*/,
290
+ const Tensor& /*pivots*/,
291
+ const Tensor& /*B*/,
292
+ TransposeType /*trans*/);
293
+ DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub);
294
+
295
+ using ldl_factor_fn = void (*)(
296
+ const Tensor& /*LD*/,
297
+ const Tensor& /*pivots*/,
298
+ const Tensor& /*info*/,
299
+ bool /*upper*/,
300
+ bool /*hermitian*/);
301
+ DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub);
302
+
303
+ using svd_fn = void (*)(
304
+ const Tensor& /*A*/,
305
+ const bool /*full_matrices*/,
306
+ const bool /*compute_uv*/,
307
+ const c10::optional<c10::string_view>& /*driver*/,
308
+ const Tensor& /*U*/,
309
+ const Tensor& /*S*/,
310
+ const Tensor& /*Vh*/,
311
+ const Tensor& /*info*/);
312
+ DECLARE_DISPATCH(svd_fn, svd_stub);
313
+
314
+ using ldl_solve_fn = void (*)(
315
+ const Tensor& /*LD*/,
316
+ const Tensor& /*pivots*/,
317
+ const Tensor& /*result*/,
318
+ bool /*upper*/,
319
+ bool /*hermitian*/);
320
+ DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub);
321
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/util/TypeSafeSignMath.h>
7
+
8
+
9
+ namespace at {
10
+ struct TensorIterator;
11
+ struct TensorIteratorBase;
12
+ }
13
+
14
+ namespace at::native {
15
+
16
+ inline void alpha_check(const ScalarType dtype, const Scalar& alpha) {
17
+ TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool,
18
+ "Boolean alpha only supported for Boolean results.");
19
+ TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype)
20
+ || alpha.isIntegral(true),
21
+ "For integral input tensors, argument alpha must not be a floating point number.");
22
+ TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(),
23
+ "For non-complex input tensors, argument alpha must not be a complex number.")
24
+ }
25
+
26
+ // Basic checking for all sub functions.
27
+ inline void sub_check(const TensorBase& self, const TensorBase& other) {
28
+ TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool,
29
+ "Subtraction, the `-` operator, with two bool tensors is not supported. "
30
+ "Use the `^` or `logical_xor()` operator instead.")
31
+ TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool,
32
+ "Subtraction, the `-` operator, with a bool tensor is not supported. "
33
+ "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
34
+ }
35
+
36
+ inline void sub_check(const TensorBase& self, const Scalar& scalar) {
37
+ TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(),
38
+ "Subtraction, the `-` operator, with two bool tensors is not supported. "
39
+ "Use the `^` or `logical_xor()` operator instead.")
40
+ TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(),
41
+ "Subtraction, the `-` operator, with a bool tensor is not supported. "
42
+ "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
43
+ }
44
+
45
+ using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
46
+ using structured_binary_fn_double = void(*)(TensorIteratorBase&, double);
47
+ using structured_binary_fn = void(*)(TensorIteratorBase&);
48
+
49
+ using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
50
+ using binary_fn_double = void(*)(TensorIterator&, double);
51
+ using binary_fn = void(*)(TensorIterator&);
52
+ using binary_clamp_fn_alpha =
53
+ void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val);
54
+
55
+ // NB: codegenned
56
+ DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
57
+
58
+ DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub);
59
+ DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub);
60
+ DECLARE_DISPATCH(structured_binary_fn, mul_stub);
61
+ DECLARE_DISPATCH(structured_binary_fn, div_true_stub);
62
+ DECLARE_DISPATCH(structured_binary_fn, div_floor_stub);
63
+ DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub);
64
+ DECLARE_DISPATCH(structured_binary_fn, atan2_stub);
65
+ DECLARE_DISPATCH(structured_binary_fn, remainder_stub);
66
+ DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub);
67
+ DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub);
68
+ DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub);
69
+ DECLARE_DISPATCH(structured_binary_fn, lshift_stub);
70
+ DECLARE_DISPATCH(structured_binary_fn, rshift_stub);
71
+ DECLARE_DISPATCH(binary_fn, logical_xor_stub);
72
+ DECLARE_DISPATCH(binary_fn, logical_and_stub);
73
+ DECLARE_DISPATCH(binary_fn, logical_or_stub);
74
+ DECLARE_DISPATCH(structured_binary_fn, lt_stub);
75
+ DECLARE_DISPATCH(structured_binary_fn, le_stub);
76
+ DECLARE_DISPATCH(structured_binary_fn, gt_stub);
77
+ DECLARE_DISPATCH(structured_binary_fn, ge_stub);
78
+ DECLARE_DISPATCH(structured_binary_fn, eq_stub);
79
+ DECLARE_DISPATCH(structured_binary_fn, ne_stub);
80
+ DECLARE_DISPATCH(binary_fn, max_elementwise_stub);
81
+ DECLARE_DISPATCH(binary_fn, min_elementwise_stub);
82
+ DECLARE_DISPATCH(structured_binary_fn, maximum_stub);
83
+ DECLARE_DISPATCH(structured_binary_fn, minimum_stub);
84
+ DECLARE_DISPATCH(structured_binary_fn, fmax_stub);
85
+ DECLARE_DISPATCH(structured_binary_fn, fmin_stub);
86
+ DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub);
87
+ DECLARE_DISPATCH(binary_fn_double, huber_stub);
88
+ DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub);
89
+ DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub);
90
+ DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub);
91
+ DECLARE_DISPATCH(structured_binary_fn, mse_stub);
92
+ DECLARE_DISPATCH(structured_binary_fn, fmod_stub);
93
+ DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub);
94
+ DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub);
95
+ DECLARE_DISPATCH(structured_binary_fn, gcd_stub);
96
+ DECLARE_DISPATCH(structured_binary_fn, lcm_stub);
97
+ DECLARE_DISPATCH(structured_binary_fn, hypot_stub);
98
+ DECLARE_DISPATCH(structured_binary_fn, igamma_stub);
99
+ DECLARE_DISPATCH(structured_binary_fn, igammac_stub);
100
+ DECLARE_DISPATCH(structured_binary_fn, nextafter_stub);
101
+ DECLARE_DISPATCH(structured_binary_fn, heaviside_stub);
102
+ DECLARE_DISPATCH(structured_binary_fn, copysign_stub);
103
+ DECLARE_DISPATCH(structured_binary_fn, xlogy_stub);
104
+ DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub);
105
+ DECLARE_DISPATCH(structured_binary_fn, zeta_stub);
106
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub);
107
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub);
108
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub);
109
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub);
110
+ DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub);
111
+ DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub);
112
+ DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub);
113
+ DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub);
114
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub);
115
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub);
116
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub);
117
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub);
118
+
119
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/TypeProperties.h>
5
+ #include <ATen/ScalarOps.h>
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/NativeFunctions.h>
9
+ #else
10
+ #include <ATen/ops/result_type.h>
11
+ #endif
12
+
13
+ namespace at::native {
14
+
15
+ // original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to
16
+ // the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not
17
+ // match, will change them to be a common super type so comparisons are done between the same types.
18
+ // For any trimmed_* tensor, if its outgoing value matches what it was incoming (typically null), then the
19
+ // corresponding raw_* version should be used since it was already contiguous of the right type.
20
+ inline void searchsorted_maybe_trim_input_tensors(
21
+ Tensor& trimmed_input,
22
+ Tensor& trimmed_boundaries,
23
+ Tensor& trimmed_sorter,
24
+ const Tensor& raw_input,
25
+ const Tensor& raw_boundaries,
26
+ const Tensor& raw_sorter) {
27
+ bool in_is_contiguous = raw_input.is_contiguous();
28
+ bool bd_is_contiguous = raw_boundaries.is_contiguous();
29
+ bool sort_is_contiguous = raw_sorter.is_contiguous();
30
+
31
+ if (!in_is_contiguous) {
32
+ TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the performance due "
33
+ "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous input value "
34
+ "tensor if possible. This message will only appear once per program.");
35
+ trimmed_input = raw_input.contiguous();
36
+ }
37
+ if (!bd_is_contiguous) {
38
+ TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the performance due "
39
+ "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous boundary "
40
+ "tensor if possible. This message will only appear once per program.");
41
+ trimmed_boundaries = raw_boundaries.contiguous();
42
+ }
43
+ if (!sort_is_contiguous) {
44
+ TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the performance due "
45
+ "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sorter "
46
+ "tensor if possible. This message will only appear once per program.");
47
+ trimmed_sorter = raw_sorter.contiguous();
48
+ }
49
+ if (raw_input.dtype() != raw_boundaries.dtype()) {
50
+ at::native::ResultTypeState state = {};
51
+ state = at::native::update_result_type_state(raw_boundaries, state);
52
+ state = at::native::update_result_type_state(raw_input, state);
53
+ ScalarType common_stype = at::native::result_type(state);
54
+
55
+ TORCH_INTERNAL_ASSERT(common_stype != ScalarType::Undefined);
56
+ if (common_stype != raw_input.scalar_type()) {
57
+ trimmed_input = in_is_contiguous ? raw_input.to(common_stype) : trimmed_input.to(common_stype);
58
+ }
59
+ if (common_stype != raw_boundaries.scalar_type()) {
60
+ trimmed_boundaries = bd_is_contiguous ? raw_boundaries.to(common_stype) : trimmed_boundaries.to(common_stype);
61
+ }
62
+ }
63
+ }
64
+
65
+ /* unused but needed for internal jagged tensor class */
66
+ inline void searchsorted_maybe_trim_input_tensors(
67
+ Tensor& trimmed_input,
68
+ Tensor& trimmed_boundaries,
69
+ const Tensor& raw_input,
70
+ const Tensor& raw_boundaries) {
71
+ Tensor trimmed_sorter;
72
+ Tensor raw_sorter;
73
+ return searchsorted_maybe_trim_input_tensors(
74
+ trimmed_input,
75
+ trimmed_boundaries,
76
+ trimmed_sorter,
77
+ raw_input,
78
+ raw_boundaries,
79
+ raw_sorter);
80
+ }
81
+
82
+ inline bool searchsorted_dims_matched_before_last_dim(const Tensor& boundaries, const Tensor& input) {
83
+ if (boundaries.dim() != input.dim()) {
84
+ return false;
85
+ }
86
+ const auto& dims_bd = boundaries.sizes();
87
+ const auto& dims_in = input.sizes();
88
+ for (int64_t dim = 0; dim + 1 < boundaries.dim(); ++dim) {
89
+ if (dims_bd[dim] != dims_in[dim]) {
90
+ return false;
91
+ }
92
+ }
93
+ return true;
94
+ }
95
+
96
+ inline Tensor searchsorted_scalar_tensor(const Scalar& scalar, const c10::Device& device) {
97
+ auto tensor = c10::scalar_to_tensor(scalar, device);
98
+ // This is to adopt the scalar promotion rules defined in native/TypeProperties.h
99
+ // So we have the same type promotion rules as binary operations.
100
+ tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
101
+ return tensor;
102
+ }
103
+
104
+ inline void searchsorted_pre_check(
105
+ const Tensor& boundaries,
106
+ const Tensor& input,
107
+ const Tensor& output,
108
+ const bool out_int32,
109
+ const bool right,
110
+ const c10::optional<c10::string_view> side_opt,
111
+ const Tensor& sorter) {
112
+ if (side_opt) {
113
+ const c10::string_view side = *side_opt;
114
+ TORCH_CHECK(side == "left" || side == "right", "torch.searchsorted(): side can only be 'left' or 'right' but ",
115
+ "got ", side);
116
+
117
+ // assume the user has not explicitly set (right=False, side="right")
118
+ TORCH_CHECK(!right || side == "right", "torch.searchsorted(): side and right can't be set to opposites, got side "
119
+ "of ", side, " while right was True");
120
+ }
121
+
122
+ TORCH_CHECK(boundaries.device() == input.device(), "torch.searchsorted(): boundaries and input value tensors ",
123
+ "should have same device type, but got boundaries tensor device type ", boundaries.device(), " and input value ",
124
+ "tensor device type ", input.device());
125
+
126
+ if (sorter.defined()) {
127
+ TORCH_CHECK(sorter.device() == boundaries.device(), "torch.searchsorted(): sorter and boundary tensors should ",
128
+ "have same device type, but got sorter tensor device type ", sorter.device(), " and input value tensor ",
129
+ "device type ", boundaries.device());
130
+
131
+ TORCH_CHECK(sorter.sizes() == boundaries.sizes(), "torch.searchsorted(): boundary and sorter must have the same "
132
+ "size, but got boundary tensor ", boundaries.sizes(), "and got sorter tensor ", sorter.sizes());
133
+
134
+ TORCH_CHECK(sorter.scalar_type() == ScalarType::Long, "torch.searchsorted(): sorter must be a tensor of long ",
135
+ "dtype but got dtype ", sorter.scalar_type());
136
+
137
+ if (sorter.numel() > 0) {
138
+ auto minmax = sorter.aminmax();
139
+ int64_t vmin = std::get<0>(minmax).item().toLong();
140
+ int64_t vmax = std::get<1>(minmax).item().toLong();
141
+ TORCH_CHECK(vmin >= 0 && vmax < sorter.sizes().back(), "torch.searchsorted(): sorter index out of range");
142
+ }
143
+ }
144
+
145
+ TORCH_CHECK(input.dim() > 0 || (input.dim() == 0 && input.numel() == 1 && boundaries.dim() == 1),
146
+ "torch.searchsorted(): input value can be a scalar only when boundaries tensor dimension is 1, but we got ",
147
+ "boundaries tensor dim(", boundaries.dim(), ") and input value's dim(", input.dim(), ") numel(",
148
+ input.numel(), ")");
149
+
150
+ TORCH_CHECK(boundaries.dim() != 0, "torch.searchsorted(): boundaries tensor should have positive dimension, but ",
151
+ "got 0 dimension");
152
+
153
+ TORCH_CHECK(boundaries.dim() == 1 || searchsorted_dims_matched_before_last_dim(boundaries, input),
154
+ "torch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor ",
155
+ "and input value tensor must match, but we got boundaries tensor ", boundaries.sizes(), " and input value tensor ",
156
+ input.sizes());
157
+
158
+ ScalarType output_dtype = output.scalar_type();
159
+ TORCH_CHECK(
160
+ (output_dtype == ScalarType::Long && !out_int32) ||
161
+ (output_dtype == ScalarType::Int && out_int32),
162
+ "torch.searchsorted(): output tensor's dtype is wrong, it can only be Int(int32) or Long(int64) depending on ",
163
+ "whether out_int32 flag is True, but we got output tensor's dtype ", output_dtype,
164
+ " and out_int32 flag is ", (out_int32 ? "True" : "False"));
165
+
166
+ if (out_int32) {
167
+ TORCH_CHECK(boundaries.sizes().back() < INT_MAX,
168
+ "torch.searchsorted(): the size of boundaries' last dimension should be less than ", INT_MAX, ", but we got ",
169
+ boundaries.sizes().back());
170
+ }
171
+ }
172
+
173
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CPUBlas.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/OpMathType.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/native/TransposeType.h>
6
+ #include <c10/util/complex.h>
7
+ #include <c10/core/ScalarType.h>
8
+ #include <c10/core/Scalar.h>
9
+
10
+ namespace at::native::cpublas {
11
+
12
+ namespace internal {
13
+ void normalize_last_dims(
14
+ TransposeType transa, TransposeType transb,
15
+ int64_t m, int64_t n, int64_t k,
16
+ int64_t *lda, int64_t *ldb, int64_t *ldc);
17
+ } // namespace internal
18
+
19
+ using gemm_fn = void(*)(
20
+ at::ScalarType type,
21
+ TransposeType transa, TransposeType transb,
22
+ int64_t m, int64_t n, int64_t k,
23
+ const Scalar& alpha,
24
+ const void *a, int64_t lda,
25
+ const void *b, int64_t ldb,
26
+ const Scalar& beta,
27
+ void *c, int64_t ldc);
28
+
29
+ DECLARE_DISPATCH(gemm_fn, gemm_stub);
30
+
31
+ template <typename scalar_t>
32
+ void gemm(
33
+ TransposeType transa, TransposeType transb,
34
+ int64_t m, int64_t n, int64_t k,
35
+ at::opmath_type<scalar_t> alpha,
36
+ const scalar_t *a, int64_t lda,
37
+ const scalar_t *b, int64_t ldb,
38
+ at::opmath_type<scalar_t> beta,
39
+ scalar_t *c, int64_t ldc) {
40
+ internal::normalize_last_dims(transa, transb, m, n, k, &lda, &ldb, &ldc);
41
+ gemm_stub(
42
+ kCPU, c10::CppTypeToScalarType<scalar_t>::value,
43
+ transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
44
+ }
45
+
46
+ void gemm(
47
+ TransposeType transa, TransposeType transb,
48
+ int64_t m, int64_t n, int64_t k,
49
+ double alpha,
50
+ const double *a, int64_t lda,
51
+ const double *b, int64_t ldb,
52
+ double beta,
53
+ double *c, int64_t ldc);
54
+
55
+ void gemm(
56
+ TransposeType transa, TransposeType transb,
57
+ int64_t m, int64_t n, int64_t k,
58
+ float alpha,
59
+ const float *a, int64_t lda,
60
+ const float *b, int64_t ldb,
61
+ float beta,
62
+ float *c, int64_t ldc);
63
+
64
+ void gemm(
65
+ TransposeType transa, TransposeType transb,
66
+ int64_t m, int64_t n, int64_t k,
67
+ float alpha,
68
+ const at::BFloat16 *a, int64_t lda,
69
+ const at::BFloat16 *b, int64_t ldb,
70
+ float beta,
71
+ at::BFloat16 *c, int64_t ldc);
72
+
73
+ void gemm(
74
+ TransposeType transa, TransposeType transb,
75
+ int64_t m, int64_t n, int64_t k,
76
+ const float alpha,
77
+ const at::BFloat16 *a, int64_t lda,
78
+ const at::BFloat16 *b, int64_t ldb,
79
+ const float beta,
80
+ float *c, int64_t ldc);
81
+
82
+ void gemm(
83
+ TransposeType transa, TransposeType transb,
84
+ int64_t m, int64_t n, int64_t k,
85
+ float alpha,
86
+ const at::Half *a, int64_t lda,
87
+ const at::Half *b, int64_t ldb,
88
+ float beta,
89
+ at::Half *c, int64_t ldc);
90
+
91
+ void gemm(
92
+ TransposeType transa, TransposeType transb,
93
+ int64_t m, int64_t n, int64_t k,
94
+ const float alpha,
95
+ const at::Half *a, int64_t lda,
96
+ const at::Half *b, int64_t ldb,
97
+ const float beta,
98
+ float *c, int64_t ldc);
99
+
100
+ void gemm(
101
+ TransposeType transa, TransposeType transb,
102
+ int64_t m, int64_t n, int64_t k,
103
+ c10::complex<double> alpha,
104
+ const c10::complex<double> *a, int64_t lda,
105
+ const c10::complex<double> *b, int64_t ldb,
106
+ c10::complex<double> beta,
107
+ c10::complex<double> *c, int64_t ldc);
108
+
109
+ void gemm(
110
+ TransposeType transa, TransposeType transb,
111
+ int64_t m, int64_t n, int64_t k,
112
+ c10::complex<float> alpha,
113
+ const c10::complex<float> *a, int64_t lda,
114
+ const c10::complex<float> *b, int64_t ldb,
115
+ c10::complex<float> beta,
116
+ c10::complex<float> *c, int64_t ldc);
117
+
118
+ void gemm(
119
+ TransposeType transa, TransposeType transb,
120
+ int64_t m, int64_t n, int64_t k,
121
+ int64_t alpha,
122
+ const int64_t *a, int64_t lda,
123
+ const int64_t *b, int64_t ldb,
124
+ int64_t beta,
125
+ int64_t *c, int64_t ldc);
126
+
127
+ template <typename scalar_t>
128
+ void gemm_batched(
129
+ TransposeType transa, TransposeType transb,
130
+ int64_t batch_size, int64_t m, int64_t n, int64_t k,
131
+ scalar_t alpha,
132
+ const scalar_t * const *a, int64_t lda,
133
+ const scalar_t * const *b, int64_t ldb,
134
+ const scalar_t beta,
135
+ scalar_t * const *c, int64_t ldc);
136
+
137
+ template <typename scalar_t>
138
+ void gemm_batched_with_stride(
139
+ TransposeType transa, TransposeType transb,
140
+ int64_t batch_size, int64_t m, int64_t n, int64_t k,
141
+ scalar_t alpha,
142
+ const scalar_t *a, int64_t lda, int64_t batch_stride_a,
143
+ const scalar_t *b, int64_t ldb, int64_t batch_stride_b,
144
+ scalar_t beta,
145
+ scalar_t *c, int64_t ldc, int64_t batch_stride_c);
146
+
147
+ using axpy_fn = void(*)(at::ScalarType type, int64_t n, const Scalar& a, const void *x, int64_t incx, void *y, int64_t incy);
148
+
149
+ DECLARE_DISPATCH(axpy_fn, axpy_stub);
150
+
151
+ template<typename scalar_t>
152
+ void axpy(int64_t n, scalar_t a, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy){
153
+ if(n == 1)
154
+ {
155
+ incx = 1;
156
+ incy = 1;
157
+ }
158
+ axpy_stub(
159
+ kCPU, c10::CppTypeToScalarType<scalar_t>::value,
160
+ n, a, x, incx, y, incy);
161
+ }
162
+
163
+ void axpy(int64_t n, double a, const double *x, int64_t incx, double *y, int64_t incy);
164
+ void axpy(int64_t n, float a, const float *x, int64_t incx, float *y, int64_t incy);
165
+ void axpy(int64_t n, c10::complex<double> a, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
166
+ void axpy(int64_t n, c10::complex<float> a, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
167
+
168
+ using copy_fn = void(*)(at::ScalarType type, int64_t n, const void *x, int64_t incx, void *y, int64_t incy);
169
+
170
+ DECLARE_DISPATCH(copy_fn, copy_stub);
171
+
172
+ template<typename scalar_t>
173
+ void copy(int64_t n, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) {
174
+ if(n == 1)
175
+ {
176
+ incx = 1;
177
+ incy = 1;
178
+ }
179
+ copy_stub(
180
+ kCPU, c10::CppTypeToScalarType<scalar_t>::value,
181
+ n, x, incx, y, incy);
182
+ }
183
+
184
+ void copy(int64_t n, const double *x, int64_t incx, double *y, int64_t incy);
185
+ void copy(int64_t n, const float *x, int64_t incx, float *y, int64_t incy);
186
+ void copy(int64_t n, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
187
+ void copy(int64_t n, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
188
+
189
+ } // namespace at::native::cpublas
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CPUFallback.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/stack.h>
5
+ #include <ATen/core/boxing/KernelFunction.h>
6
+ #include <ATen/core/dispatch/Dispatcher.h>
7
+ #include <c10/util/Metaprogramming.h>
8
+ #include <torch/library.h>
9
+
10
+ namespace at::native {
11
+
12
+ // This function implements a boxed fallback to CPU.
13
+ // External backends can add their own custom logging on top if it to customize their own CPU fallbacks.
14
+ TORCH_API void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool error_on_views = false);
15
+
16
+ // This is a helper function that backends can use to directly call their boxed CPU fallback
17
+ // TODO: update and add a usage example after https://github.com/pytorch/pytorch/pull/58092 lands.
18
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
19
+ struct _call_fallback_fn final {};
20
+
21
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
22
+ struct _call_fallback_fn<fallback_fn, Op, symint, ReturnType(ParameterTypes...)> final {
23
+ static ReturnType call(typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) {
24
+ auto op = c10::Dispatcher::singleton()
25
+ // TODO: figure out how to make compiler happy without dynamic casts
26
+ .findSchemaOrThrow((const char*) Op::name, (const char*) Op::overload_name)
27
+ //.findSchemaOrThrow("a", "b")
28
+ .typed<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>();
29
+ return c10::impl::BoxedKernelWrapper<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>::call(
30
+ c10::BoxedKernel::makeFromFunction<fallback_fn>(),
31
+ op,
32
+ c10::DispatchKeySet(), // we know that the cpu_fallback doesn't use the dispatch keyset.
33
+ // TODO: get std::forward<> to work
34
+ args...
35
+ );
36
+ }
37
+ };
38
+
39
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
40
+ using call_fallback_fn_symint = _call_fallback_fn<fallback_fn, Op, true, typename Op::schema>;
41
+
42
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
43
+ using call_fallback_fn = _call_fallback_fn<fallback_fn, Op, false, typename Op::schema>;
44
+
45
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <limits>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
12
+
13
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/util/irange.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/NativeFunctions.h>
8
+ #else
9
+ #include <ATen/ops/view_as_real_native.h>
10
+ #include <ATen/ops/view_as_complex_native.h>
11
+
12
+ #include <utility>
13
+ #endif
14
+
15
+ // WARNING: this header contains non-inline functions and should be only
16
+ // included from ONE cpp file
17
+
18
+ namespace at::native {
19
+
20
+ // View tensor with new dtype, storage offset, sizes and strides
21
+ inline Tensor view_tensor(
22
+ const Tensor &tensor, ScalarType dtype,
23
+ c10::SymInt offset, SymIntArrayRef sizes, SymIntArrayRef strides) {
24
+ Storage storage = tensor.storage();
25
+ auto key_set = tensor.key_set().remove(DispatchKey::Conjugate);
26
+ auto new_tensor = detail::make_tensor<TensorImpl>(
27
+ c10::TensorImpl::VIEW, std::move(storage), key_set, scalarTypeToTypeMeta(dtype));
28
+ auto * impl = new_tensor.unsafeGetTensorImpl();
29
+ impl->set_sizes_and_strides(sizes, strides, offset);
30
+ return new_tensor;
31
+ }
32
+
33
+ inline SymDimVector computeStrideForViewAsReal(SymIntArrayRef oldstride) {
34
+ SymDimVector res(oldstride.size() + 1);
35
+ for (const auto i : c10::irange(oldstride.size())) {
36
+ res[i] = oldstride[i] * 2;
37
+ }
38
+ res.back() = 1;
39
+ return res;
40
+ }
41
+
42
+ inline Tensor _view_as_real_physical(const Tensor& self) {
43
+ TORCH_CHECK(self.is_complex(), "view_as_real is only supported for complex tensors");
44
+ auto old_sizes = self.sym_sizes();
45
+ SymDimVector new_sizes(old_sizes.size() + 1);
46
+ std::copy(old_sizes.begin(), old_sizes.end(), new_sizes.begin());
47
+ // last dimension will always have two elements containing the real and imag vals
48
+ new_sizes.back() = 2;
49
+ auto new_strides = computeStrideForViewAsReal(self.sym_strides());
50
+ auto new_storage_offset = self.sym_storage_offset() * 2;
51
+ const auto float_type = c10::toRealValueType(self.scalar_type());
52
+ auto real_tensor = view_tensor(self, float_type, std::move(new_storage_offset), new_sizes, new_strides);
53
+ return real_tensor;
54
+ }
55
+
56
+ // expects as input a complex tensor and returns back a tensor
57
+ // with corresponding real dtype containing the complex values
58
+ // in the last two dimensions
59
+ Tensor view_as_real(const Tensor& self) {
60
+ TORCH_CHECK(!self.is_conj(), "view_as_real doesn't work on unresolved conjugated tensors. To resolve the conjugate tensor so you can view it as real, use self.resolve_conj(); however, be warned that the resulting tensor will NOT alias the original.");
61
+ return _view_as_real_physical(self);
62
+ }
63
+
64
+ inline SymDimVector computeStrideForViewAsComplex(SymIntArrayRef oldstride) {
65
+ const int64_t dim = oldstride.size();
66
+ TORCH_CHECK(oldstride[dim-1] == 1, "Tensor must have a last dimension with stride 1");
67
+
68
+ SymDimVector res(dim - 1);
69
+ for (const auto i : c10::irange(res.size())) {
70
+ TORCH_CHECK(oldstride[i] % 2 == 0, "Tensor must have a stride divisible by 2 for all but last dimension");
71
+ res[i] = oldstride[i] / 2;
72
+ }
73
+ return res;
74
+ }
75
+
76
+ // expects as input a float or double tensor with last dimension of size 2
77
+ // and returns back a tensor with corresponding complex dtype
78
+ Tensor view_as_complex(const Tensor& self) {
79
+ TORCH_CHECK(
80
+ self.scalar_type() == kFloat || self.scalar_type() == kDouble || self.scalar_type() == kHalf,
81
+ "view_as_complex is only supported for half, float and double tensors, but got a tensor of scalar type: ", self.scalar_type());
82
+
83
+ auto old_sizes = self.sym_sizes();
84
+ TORCH_CHECK(!old_sizes.empty(), "Input tensor must have one or more dimensions");
85
+ TORCH_CHECK(old_sizes[old_sizes.size()-1] == 2, "Tensor must have a last dimension of size 2");
86
+ SymDimVector new_sizes(old_sizes.begin(), old_sizes.end() - 1);
87
+
88
+ const auto new_strides = computeStrideForViewAsComplex(self.sym_strides());
89
+ const auto complex_type = c10::toComplexType(self.scalar_type());
90
+
91
+ TORCH_CHECK(self.sym_storage_offset() % 2 == 0, "Tensor must have a storage_offset divisible by 2");
92
+ const auto new_storage_offset = self.sym_storage_offset() / 2;
93
+
94
+ return view_tensor(self, complex_type, new_storage_offset, new_sizes, new_strides);
95
+ }
96
+
97
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessor.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/CompositeRandomAccessorCommon.h>
4
+
5
+ namespace at::native {
6
+
7
+ struct TupleInfoCPU {
8
+ template <typename ...Types>
9
+ using tuple = std::tuple<Types...>;
10
+
11
+ template <typename ...Types>
12
+ static constexpr auto tie(Types&... args) noexcept {
13
+ return std::tie(args...);
14
+ }
15
+ };
16
+
17
+ template <typename KeyAccessor, typename ValueAccessor>
18
+ using CompositeRandomAccessorCPU =
19
+ CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
20
+
21
+ template <typename Values, typename References>
22
+ void swap(
23
+ references_holder<Values, References> rh1,
24
+ references_holder<Values, References> rh2
25
+ ) {
26
+ return std::swap(rh1.data(), rh2.data());
27
+ }
28
+
29
+ template <int N, typename Values, typename References>
30
+ auto get(references_holder<Values, References> rh) -> decltype(std::get<N>(rh.data())) {
31
+ return std::get<N>(rh.data());
32
+ }
33
+
34
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessorCommon.h ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <utility>
2
+
3
+ #pragma once
4
+
5
+ namespace at::native {
6
+
7
+ namespace {
8
+
9
+ // operator_brackets_proxy is used in
10
+ // CompositeRandomAccessor in place of operator[].
11
+ // For some iterators, references returned by operator[]
12
+ // could become invalid, operator_brackets_proxy tries to
13
+ // resolve that by making accessor[n] to be equivalent to
14
+ // *(accessor + n).
15
+ template <typename Accessor>
16
+ class operator_brackets_proxy {
17
+ using reference = typename std::iterator_traits<Accessor>::reference;
18
+ using value_type = typename std::iterator_traits<Accessor>::value_type;
19
+
20
+ public:
21
+ C10_HOST_DEVICE
22
+ operator_brackets_proxy(Accessor const& accessor)
23
+ : accessor(accessor)
24
+ {}
25
+
26
+ C10_HOST_DEVICE
27
+ operator reference() {
28
+ return *accessor;
29
+ }
30
+
31
+ C10_HOST_DEVICE
32
+ reference operator*() {
33
+ return *accessor;
34
+ }
35
+
36
+ C10_HOST_DEVICE
37
+ operator_brackets_proxy& operator=(value_type const& val) {
38
+ *accessor = val;
39
+ return *this;
40
+ }
41
+
42
+ private:
43
+ Accessor accessor;
44
+ };
45
+
46
+ }
47
+
48
+ // references_holder is used as a surrogate for the
49
+ // references type from std::iterator_traits in CompositeRandomAccessor.
50
+ // It is assumed in CompositeRandomAccessor that
51
+ // References = tuple<Types&...>,
52
+ // Values = tuple<Types...> by default,
53
+ // but they could be anything as long as References could be
54
+ // cast to Values.
55
+ // If you plan to use it with STL, for example, you will need to
56
+ // define 'swap` and `get`(aka std::get) methods.
57
+ template <typename Values, typename References>
58
+ class references_holder {
59
+ public:
60
+ using values = Values;
61
+ using references = References;
62
+
63
+ C10_HOST_DEVICE
64
+ references_holder(references refs)
65
+ : refs{std::move(refs)}
66
+ {}
67
+
68
+ C10_HOST_DEVICE
69
+ operator references() {
70
+ return refs;
71
+ }
72
+
73
+ C10_HOST_DEVICE
74
+ operator values() {
75
+ return refs;
76
+ }
77
+
78
+ C10_HOST_DEVICE
79
+ references_holder& operator=(values vals) {
80
+ refs = vals;
81
+ return *this;
82
+ }
83
+
84
+ C10_HOST_DEVICE
85
+ references& data() {
86
+ return refs;
87
+ }
88
+
89
+ protected:
90
+ references refs;
91
+ };
92
+
93
+ // CompositeRandomAccessor is essentially a simplified version of
94
+ // a random access iterator over two random access iterators.
95
+ // TupleInfo should contain a variadic type `tuple`, and a method `tie`,
96
+ // which constructs a tuple of references from a variadic list of arguments.
97
+ template <typename KeyAccessor, typename ValueAccessor, typename TupleInfo>
98
+ class CompositeRandomAccessor {
99
+ using self_type = CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfo>;
100
+
101
+ using key_accessor_value_type =
102
+ typename std::iterator_traits<KeyAccessor>::value_type;
103
+ using value_accessor_value_type =
104
+ typename std::iterator_traits<ValueAccessor>::value_type;
105
+ using key_accessor_reference_type =
106
+ typename std::iterator_traits<KeyAccessor>::reference;
107
+ using value_accessor_reference_type =
108
+ typename std::iterator_traits<ValueAccessor>::reference;
109
+
110
+ using composite_value_type = typename TupleInfo::template tuple<
111
+ key_accessor_value_type,
112
+ value_accessor_value_type>;
113
+ using composite_reference = typename TupleInfo::template tuple<
114
+ key_accessor_reference_type,
115
+ value_accessor_reference_type>;
116
+
117
+ public:
118
+ using value_type = composite_value_type;
119
+ using reference = references_holder<composite_value_type, composite_reference>;
120
+ // Note that CompositeRandomAccessor does not hold key and values
121
+ // in a specific datastructure, which means that a pointer to a (key, value)
122
+ // is not defined. Hence we just use a pointer type of the KeyAccessor.
123
+ using pointer = typename std::iterator_traits<KeyAccessor>::pointer;
124
+ using difference_type = typename std::iterator_traits<KeyAccessor>::difference_type;
125
+ using iterator_category = std::random_access_iterator_tag;
126
+
127
+ C10_HOST_DEVICE
128
+ CompositeRandomAccessor() = default;
129
+
130
+ C10_HOST_DEVICE
131
+ CompositeRandomAccessor(KeyAccessor keys, ValueAccessor values)
132
+ : keys(keys), values(values)
133
+ {}
134
+
135
+ // Pointer-like operations {
136
+ C10_HOST_DEVICE
137
+ reference operator*() const {
138
+ return TupleInfo::tie(*keys, *values);
139
+ }
140
+
141
+ // operator->() is supposed to return a pointer type.
142
+ // Since CompositeRandomAccessor does not hold pointers to pairs,
143
+ // we just return a pointer to a key.
144
+ C10_HOST_DEVICE
145
+ auto* operator->() const {
146
+ return keys.operator->();
147
+ }
148
+
149
+ C10_HOST_DEVICE
150
+ reference operator[](difference_type idx) {
151
+ return operator_brackets_proxy<self_type>(
152
+ CompositeRandomAccessor(keys + idx, values + idx)
153
+ );
154
+ }
155
+ // }
156
+
157
+ // Prefix/postfix increment/decrement {
158
+ C10_HOST_DEVICE
159
+ CompositeRandomAccessor& operator++() {
160
+ ++keys;
161
+ ++values;
162
+ return *this;
163
+ }
164
+
165
+ C10_HOST_DEVICE
166
+ CompositeRandomAccessor operator++(int) {
167
+ CompositeRandomAccessor copy(*this);
168
+ ++*this;
169
+ return copy;
170
+ }
171
+
172
+ C10_HOST_DEVICE
173
+ CompositeRandomAccessor& operator--() {
174
+ --keys;
175
+ --values;
176
+ return *this;
177
+ }
178
+
179
+ C10_HOST_DEVICE
180
+ CompositeRandomAccessor operator--(int) {
181
+ CompositeRandomAccessor copy(*this);
182
+ --*this;
183
+ return copy;
184
+ }
185
+ // }
186
+
187
+ // Arithmetic operations {
188
+ C10_HOST_DEVICE
189
+ CompositeRandomAccessor& operator+=(difference_type offset) {
190
+ keys += offset;
191
+ values += offset;
192
+ return *this;
193
+ }
194
+
195
+ C10_HOST_DEVICE
196
+ CompositeRandomAccessor operator+(difference_type offset) const {
197
+ return CompositeRandomAccessor(keys + offset, values + offset);
198
+ }
199
+
200
+ C10_HOST_DEVICE
201
+ friend CompositeRandomAccessor operator+(
202
+ difference_type offset,
203
+ const CompositeRandomAccessor& accessor
204
+ ) {
205
+ return accessor + offset;
206
+ }
207
+
208
+ C10_HOST_DEVICE
209
+ CompositeRandomAccessor& operator-=(difference_type offset) {
210
+ keys -= offset;
211
+ values -= offset;
212
+ return *this;
213
+ }
214
+
215
+ C10_HOST_DEVICE
216
+ CompositeRandomAccessor operator-(difference_type offset) const {
217
+ return CompositeRandomAccessor(keys - offset, values - offset);
218
+ }
219
+
220
+ C10_HOST_DEVICE
221
+ difference_type operator-(const CompositeRandomAccessor& other) const {
222
+ return keys - other.keys;
223
+ }
224
+ // }
225
+
226
+ // Comparison operators {
227
+ C10_HOST_DEVICE
228
+ bool operator==(const CompositeRandomAccessor& other) const {
229
+ return keys == other.keys;
230
+ }
231
+
232
+ C10_HOST_DEVICE
233
+ bool operator!=(const CompositeRandomAccessor& other) const {
234
+ return keys != other.keys;
235
+ }
236
+
237
+ C10_HOST_DEVICE
238
+ bool operator<(const CompositeRandomAccessor& other) const {
239
+ return keys < other.keys;
240
+ }
241
+
242
+ C10_HOST_DEVICE
243
+ bool operator<=(const CompositeRandomAccessor& other) const {
244
+ return keys <= other.keys;
245
+ }
246
+
247
+ C10_HOST_DEVICE
248
+ bool operator>(const CompositeRandomAccessor& other) const {
249
+ return keys > other.keys;
250
+ }
251
+
252
+ C10_HOST_DEVICE
253
+ bool operator>=(const CompositeRandomAccessor& other) const {
254
+ return keys >= other.keys;
255
+ }
256
+ // }
257
+
258
+ protected:
259
+ KeyAccessor keys;
260
+ ValueAccessor values;
261
+ };
262
+
263
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ConvUtils.h ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <ATen/detail/CUDAHooksInterface.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+ #include <c10/util/env.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::native {
10
+
11
+ using conv_depthwise2d_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
12
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
13
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 2>);
14
+ DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub);
15
+ using conv_depthwise3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
16
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
17
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
18
+ DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub);
19
+ using cudnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
20
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
21
+ at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
22
+ DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub);
23
+ using mps_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
24
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
25
+ at::IntArrayRef, int64_t, std::array<bool,3>);
26
+ DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub);
27
+ using cudnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
28
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
29
+ at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
30
+ DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub);
31
+ using miopen_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
32
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
33
+ at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
34
+ DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub);
35
+ using miopen_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
36
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
37
+ at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
38
+ DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub);
39
+ using miopen_depthwise_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
40
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
41
+ at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
42
+ DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub);
43
+ using mkldnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
44
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
45
+ at::IntArrayRef, int64_t, std::array<bool,3>);
46
+ DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub);
47
+ using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const c10::optional<Tensor>&,
48
+ IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t);
49
+ DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub);
50
+ using mkldnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
51
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
52
+ at::IntArrayRef, at::IntArrayRef, int64_t, std::array<bool,3>);
53
+ DECLARE_DISPATCH(mkldnn_convolution_transpose_backward_fn, mkldnn_convolution_transpose_backward_stub);
54
+ using slow_conv_dilated2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
55
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
56
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
57
+ DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub);
58
+ using slow_conv_dilated3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
59
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
60
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
61
+ DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub);
62
+ using slow_conv_transpose2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
63
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
64
+ at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
65
+ DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub);
66
+ using slow_conv_transpose3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
67
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
68
+ at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
69
+ DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub);
70
+
71
+ namespace {
72
+ static bool cudnnv8_heuristic_mode_b = c10::utils::check_env("TORCH_CUDNN_USE_HEURISTIC_MODE_B") == true;
73
+ }
74
+
75
+ static inline bool cudnnv8_enabled_check_debug() {
76
+ static bool cudnnv8_flag = c10::utils::check_env("TORCH_CUDNN_V8_API_DISABLED") != true;
77
+ static bool cudnnv8_debug = c10::utils::check_env("TORCH_CUDNN_V8_API_DEBUG") == true;
78
+ static uint8_t cudnnv8_debugcount = 0;
79
+ if (cudnnv8_debug == 1 && cudnnv8_debugcount < 10) {
80
+ TORCH_WARN("TORCH_CUDNN_V8_DEBUG ON, V8 ON: ", cudnnv8_flag, " TORCH_CUDNN_USE_HEURISTIC_MODE B: ", cudnnv8_heuristic_mode_b);
81
+ cudnnv8_debugcount++;
82
+ }
83
+ return cudnnv8_flag == 1;
84
+ }
85
+
86
+ static inline bool cudnnv8_use_heur_mode_b() {
87
+ return cudnnv8_heuristic_mode_b;
88
+ }
89
+
90
+ // Keep in sync with py::enum_ in Module.cpp
91
+ enum class ConvBackend {
92
+ CudaDepthwise2d,
93
+ CudaDepthwise3d,
94
+ Cudnn,
95
+ CudnnTranspose,
96
+ Empty,
97
+ Miopen,
98
+ MiopenDepthwise,
99
+ MiopenTranspose,
100
+ Mkldnn,
101
+ MkldnnTranspose,
102
+ MkldnnEmpty,
103
+ NnpackSpatial,
104
+ Overrideable,
105
+ Slow2d,
106
+ Slow3d,
107
+ SlowDilated2d,
108
+ SlowDilated3d,
109
+ SlowTranspose2d,
110
+ SlowTranspose3d,
111
+ Winograd3x3Depthwise,
112
+ Xnnpack2d,
113
+ Mps,
114
+ MpsTranspose,
115
+ };
116
+
117
+ // Overload for selecting the convolution backend from the full set of convolution inputs.
118
+ // This overload is exposed to python for testing, etc.
119
+ TORCH_API ConvBackend select_conv_backend(
120
+ const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt,
121
+ SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation,
122
+ bool transposed, SymIntArrayRef output_padding, c10::SymInt groups, const at::OptionalSymIntArrayRef bias_sizes_opt);
123
+
124
+ TORCH_API at::MemoryFormat _determine_backend_memory_format(const Tensor& input,
125
+ const Tensor& weight,
126
+ const ConvBackend backend);
127
+
128
+ // ---------------------------------------------------------------------
129
+ //
130
+ // Math
131
+ //
132
+ // ---------------------------------------------------------------------
133
+
134
+ constexpr int input_batch_size_dim = 0; // also grad_input
135
+ constexpr int input_channels_dim = 1;
136
+ constexpr int output_batch_size_dim = 0; // also grad_output
137
+ constexpr int output_channels_dim = 1;
138
+ constexpr int weight_output_channels_dim = 0;
139
+ constexpr int weight_input_channels_dim = 1;
140
+
141
+ // Often written as 2 + max_dim (extra dims for batch size and channels)
142
+ constexpr int max_dim = 3;
143
+
144
+ // ---------------------------------------------------------------------
145
+ //
146
+ // Checking
147
+ //
148
+ // ---------------------------------------------------------------------
149
+
150
+ // Used on pad, stride and dilation
151
+ static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name)
152
+ {
153
+ TORCH_CHECK(args.size() <= expected_size,
154
+ "Too many ", arg_name, " values (", args.size(), ") supplied, expecting ",
155
+ expected_size, " (while checking arguments for ", c, ")");
156
+ TORCH_CHECK(args.size() >= expected_size,
157
+ "Not enough ", arg_name, " values (", args.size(), ") supplied, expecting ",
158
+ expected_size, " (while checking arguments for ", c, ")");
159
+
160
+ auto num_negative_values = std::count_if(args.begin(), args.end(), [](int x){return x < 0;});
161
+ if (num_negative_values > 0){
162
+ std::stringstream ss;
163
+ ss << arg_name << " should be greater than zero but got (";
164
+ std::copy(args.begin(), args.end() - 1, std::ostream_iterator<int>(ss,", "));
165
+ ss << args.back() << ")" << " (while checking arguments for " << c << ")";
166
+ AT_ERROR(ss.str());
167
+ }
168
+ }
169
+
170
+
171
+ // NOTE [ Convolution checks ]
172
+ //
173
+ // NB: For many call sites, it is not strictly necessary to check all of
174
+ // these relationships (for example, for forward convolution, we compute
175
+ // the size of output ourselves, so we don't actually need to check
176
+ // output. However, writing a single function that does everything
177
+ // means we get to reuse it for both forwards and all backwards
178
+ // variants, even when the set of "real" inputs varies. The magic of
179
+ // relational computing!
180
+ //
181
+ // (There is one downside, which is that it is slightly harder to write
182
+ // error messages which are able to distinguish between real inputs
183
+ // (which the user can change) and computed inputs (which the user can
184
+ // only indirectly affect). It would be an interesting exercise to
185
+ // come up with a general framework to handle such situations.)
186
+ static void convolution_shape_check(
187
+ CheckedFrom c,
188
+ const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output,
189
+ IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
190
+ {
191
+ check_args(c, padding, input->dim() - 2, "padding");
192
+ check_args(c, stride, padding.size(), "stride");
193
+ check_args(c, dilation, padding.size(), "dilation");
194
+
195
+ // Input
196
+ checkDimRange(c, input, 3, 6 /* exclusive */);
197
+ checkSize_symint(c, input, input_channels_dim, weight->size(1) * groups);
198
+
199
+ // Weight
200
+ checkSameDim(c, input, weight);
201
+
202
+ // TODO: check that output->size() matches output_sizes
203
+ // TODO: check that weight matches output->sizes()
204
+ checkSameDim(c, input, output);
205
+ }
206
+
207
+ // NB: conv_output_size and conv_input_size are not bijections,
208
+ // as conv_output_size loses information; this is why conv_input_size
209
+ // takes an extra output_padding argument to resolve the ambiguity.
210
+
211
+ template <typename T>
212
+ static inline std::vector<T> _conv_output_size(
213
+ ArrayRef<T> input_size, ArrayRef<T> weight_size,
214
+ ArrayRef<T> padding, ArrayRef<T> stride, ArrayRef<T> dilation = ArrayRef<T>()
215
+ ) {
216
+ // ASSERT(input_size.size() > 2)
217
+ // ASSERT(input_size.size() == weight_size.size())
218
+ bool has_dilation = !dilation.empty();
219
+ auto dim = input_size.size();
220
+ std::vector<T> output_size(dim);
221
+ output_size[0] = input_size[input_batch_size_dim];
222
+ output_size[1] = weight_size[weight_output_channels_dim];
223
+ for (const auto d : c10::irange(2, dim)) {
224
+ auto dilation_ = has_dilation ? dilation[d - 2] : 1;
225
+ auto kernel = dilation_ * (weight_size[d] - 1) + 1;
226
+ output_size[d] = (input_size[d] + (2 * padding[d - 2]) - kernel) / stride[d - 2] + 1;
227
+ }
228
+ return output_size;
229
+ }
230
+
231
+ static inline std::vector<int64_t> conv_output_size(
232
+ IntArrayRef input_size, IntArrayRef weight_size,
233
+ IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef()
234
+ ) {
235
+ return _conv_output_size(input_size, weight_size, padding, stride, dilation);
236
+ }
237
+
238
+ static inline std::vector<c10::SymInt> conv_output_size(
239
+ SymIntArrayRef input_size, SymIntArrayRef weight_size,
240
+ SymIntArrayRef padding, SymIntArrayRef stride, SymIntArrayRef dilation = SymIntArrayRef()
241
+ ) {
242
+ return _conv_output_size(input_size, weight_size, padding, stride, dilation);
243
+ }
244
+
245
+ template <typename T>
246
+ std::vector<T> _conv_input_size(
247
+ ArrayRef<T> output_size, ArrayRef<T> weight_size,
248
+ ArrayRef<T> padding, ArrayRef<T> output_padding, ArrayRef<T> stride, ArrayRef<T> dilation, T groups
249
+ ) {
250
+ // ASSERT(output_size.size() > 2)
251
+ // ASSERT(output_size.size() == weight_size.size())
252
+ auto dim = output_size.size();
253
+ std::vector<T> input_size(dim);
254
+ input_size[0] = output_size[output_batch_size_dim];
255
+ input_size[1] = weight_size[weight_input_channels_dim] * groups;
256
+ for (const auto d : c10::irange(2, dim)) {
257
+ auto kernel = (weight_size[d] - 1) * dilation[d - 2] + 1;
258
+ input_size[d] = (output_size[d] - 1) * stride[d - 2] - (padding[d - 2] * 2) +
259
+ kernel + output_padding[d - 2];
260
+ }
261
+ return input_size;
262
+ }
263
+
264
+ static inline std::vector<c10::SymInt> conv_input_size(
265
+ SymIntArrayRef output_size, SymIntArrayRef weight_size,
266
+ SymIntArrayRef padding, SymIntArrayRef output_padding, SymIntArrayRef stride, SymIntArrayRef dilation, c10::SymInt groups
267
+ ) {
268
+ return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
269
+ }
270
+
271
+ static inline std::vector<int64_t> conv_input_size(
272
+ IntArrayRef output_size, IntArrayRef weight_size,
273
+ IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
274
+ ) {
275
+ return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
276
+ }
277
+
278
+ template <typename T>
279
+ std::vector<T> _conv_weight_size(
280
+ ArrayRef<T> input_size, ArrayRef<T> output_size,
281
+ ArrayRef<T> padding, ArrayRef<T> output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
282
+ ) {
283
+ auto dim = input_size.size();
284
+ std::vector<T> weight_size(dim);
285
+ weight_size[0] = output_size[1];
286
+ weight_size[1] = input_size[1] / groups;
287
+ for (const auto d : c10::irange(2, dim)) {
288
+ auto kernel = input_size[d] - (output_size[d] - 1) * stride[d - 2]
289
+ + padding[d - 2] * 2 - output_padding[d - 2];
290
+ weight_size[d] = (kernel - 1) / dilation[d - 2] + 1;
291
+ }
292
+ return weight_size;
293
+ }
294
+
295
+ static inline std::vector<c10::SymInt> conv_weight_size(
296
+ SymIntArrayRef input_size, SymIntArrayRef output_size,
297
+ SymIntArrayRef padding, SymIntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
298
+ ) {
299
+ return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
300
+ }
301
+
302
+ static inline std::vector<int64_t> conv_weight_size(
303
+ IntArrayRef input_size, IntArrayRef output_size,
304
+ IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
305
+ ) {
306
+ return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
307
+ }
308
+
309
+ static inline Tensor reshape_bias(int64_t dim, const Tensor& bias) {
310
+ std::vector<int64_t> shape(dim, 1);
311
+ shape[1] = -1;
312
+ return bias.reshape(shape);
313
+ }
314
+
315
+ static inline at::MemoryFormat cudnn_conv_suggest_memory_format(const at::Tensor& input, const at::Tensor& weight) {
316
+ // disable NHWC for float64 input.
317
+ if (!at::detail::getCUDAHooks().compiledWithCuDNN() ||
318
+ input.scalar_type() == at::kDouble ||
319
+ weight.scalar_type() == at::kDouble) {
320
+ return at::MemoryFormat::Contiguous;
321
+ }
322
+ long cudnn_version = at::detail::getCUDAHooks().versionCuDNN();
323
+ auto input_memory_format = input.suggest_memory_format();
324
+ auto weight_memory_format = weight.suggest_memory_format();
325
+ auto weight_ndim = weight.ndimension();
326
+
327
+ bool can_use_cudnn_channels_last_2d = (cudnn_version >= 7603) && (weight_ndim == 4) && (
328
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
329
+ (weight_memory_format == at::MemoryFormat::ChannelsLast)
330
+ );
331
+ if (can_use_cudnn_channels_last_2d) {
332
+ return at::MemoryFormat::ChannelsLast;
333
+ }
334
+
335
+ bool can_use_cudnn_channels_last_3d = (cudnn_version >= 8005) && (weight_ndim == 5) && (
336
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
337
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d)
338
+ );
339
+ if (can_use_cudnn_channels_last_3d) {
340
+ return at::MemoryFormat::ChannelsLast3d;
341
+ }
342
+
343
+ return at::MemoryFormat::Contiguous;
344
+ }
345
+
346
+ // controls whether emptyCache will be called following cudnn conv benchmarking
347
+ TORCH_API void _cudnn_set_conv_benchmark_empty_cache(bool enable);
348
+ TORCH_API bool _cudnn_get_conv_benchmark_empty_cache();
349
+
350
+
351
+ static inline bool miopen_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
352
+
353
+ // disable NHWC for float64 input.
354
+ if (!at::detail::getCUDAHooks().compiledWithMIOpen() ||
355
+ input.scalar_type() == at::kDouble ||
356
+ weight.scalar_type() == at::kDouble) {
357
+ return false;
358
+ }
359
+
360
+ bool can_use_miopen_channels_last_2d = false;
361
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 40300)
362
+ // TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
363
+ // See #64427
364
+ static c10::optional<bool> PYTORCH_MIOPEN_SUGGEST_NHWC = c10::utils::check_env("PYTORCH_MIOPEN_SUGGEST_NHWC");
365
+
366
+ auto input_memory_format = input.suggest_memory_format();
367
+ auto weight_memory_format = weight.suggest_memory_format();
368
+
369
+ can_use_miopen_channels_last_2d = PYTORCH_MIOPEN_SUGGEST_NHWC && *PYTORCH_MIOPEN_SUGGEST_NHWC && (
370
+ ( (input_memory_format == at::MemoryFormat::ChannelsLast) ||
371
+ (weight_memory_format == at::MemoryFormat::ChannelsLast) )
372
+ );
373
+ #endif
374
+
375
+ bool can_use_miopen_channels_last_3d = false;
376
+
377
+ return can_use_miopen_channels_last_2d || can_use_miopen_channels_last_3d;
378
+ }
379
+
380
+ static inline bool mkldnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
381
+
382
+ // disable NHWC for float64 input.
383
+ if (input.scalar_type() == at::kDouble ||
384
+ weight.scalar_type() == at::kDouble) {
385
+ return false;
386
+ }
387
+
388
+ // disable NHWC for MkldnnCPU tensor.
389
+ if (input.is_mkldnn() || weight.is_mkldnn()) {
390
+ return false;
391
+ }
392
+
393
+ auto input_memory_format = input.suggest_memory_format();
394
+ auto weight_memory_format = weight.suggest_memory_format();
395
+
396
+ bool can_use_mkldnn_channels_last_2d =
397
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
398
+ (weight_memory_format == at::MemoryFormat::ChannelsLast);
399
+
400
+ bool can_use_mkldnn_channels_last_3d =
401
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
402
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d);
403
+
404
+ return can_use_mkldnn_channels_last_2d || can_use_mkldnn_channels_last_3d;
405
+ }
406
+
407
+ static inline bool thnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
408
+
409
+ auto input_memory_format = input.suggest_memory_format();
410
+ auto weight_memory_format = weight.suggest_memory_format();
411
+
412
+ bool can_use_thnn_channels_last_2d = input.device().is_cpu() && (
413
+ (input_memory_format == at::MemoryFormat::ChannelsLast) || (
414
+ weight_memory_format == at::MemoryFormat::ChannelsLast));
415
+
416
+ return can_use_thnn_channels_last_2d;
417
+ }
418
+
419
+ static inline bool xpu_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
420
+
421
+ // check layout only for xpu tensor.
422
+ if (!input.is_xpu() || !weight.is_xpu()) {
423
+ return false;
424
+ }
425
+
426
+ // disable NHWC for float64 input.
427
+ if (input.scalar_type() == at::kDouble ||
428
+ weight.scalar_type() == at::kDouble) {
429
+ return false;
430
+ }
431
+
432
+ auto input_memory_format = input.suggest_memory_format();
433
+ auto weight_memory_format = weight.suggest_memory_format();
434
+
435
+ bool can_use_xpu_channels_last_2d =
436
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
437
+ (weight_memory_format == at::MemoryFormat::ChannelsLast);
438
+
439
+ bool can_use_xpu_channels_last_3d =
440
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
441
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d);
442
+
443
+ return can_use_xpu_channels_last_2d || can_use_xpu_channels_last_3d;
444
+ }
445
+
446
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ConvolutionMM3d.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+
3
+ namespace at::native {
4
+
5
+ std::tuple<Tensor, Tensor, Tensor> slow_conv3d_backward_cpu(
6
+ const Tensor& grad_output,
7
+ const Tensor& self,
8
+ const Tensor& weight,
9
+ IntArrayRef kernel_size,
10
+ IntArrayRef stride,
11
+ IntArrayRef padding,
12
+ std::array<bool, 3> output_mask);
13
+
14
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+
7
+ class Tensor;
8
+ struct TensorIterator;
9
+ class TensorBase;
10
+
11
+ namespace native {
12
+
13
+ using copy_fn = void (*)(TensorIterator&, bool non_blocking);
14
+
15
+ DECLARE_DISPATCH(copy_fn, copy_stub);
16
+
17
+ TORCH_API void copy_ignoring_overlaps(const TensorBase &dst, const TensorBase &src);
18
+
19
+ } // namespace native
20
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+
8
+ namespace native {
9
+
10
+ using cross_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const int64_t d);
11
+
12
+ DECLARE_DISPATCH(cross_fn, cross_stub);
13
+
14
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <vector>
5
+
6
+ #include <ATen/div_rtn.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \
11
+ TORCH_CHECK( \
12
+ T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \
13
+ "Need " #T " of dimension ", \
14
+ DIM, \
15
+ " and " #T ".size[", \
16
+ DIM_SIZE, \
17
+ "] == ", \
18
+ SIZE, \
19
+ " but got input to be of shape ", \
20
+ T.sizes())
21
+
22
+ namespace at::native::internal {
23
+ namespace {
24
+ inline bool all_positive(IntArrayRef& arr) {
25
+ return std::all_of(
26
+ arr.begin(), arr.end(), [](int64_t item) { return item > 0; });
27
+ }
28
+
29
+ inline bool all_nonnegative(std::vector<int64_t>& arr) {
30
+ return std::all_of(
31
+ arr.begin(), arr.end(), [](int64_t item) { return item >= 0; });
32
+ }
33
+
34
+ } // namespace
35
+
36
+ // calculate the rear part of output tensor sizes
37
+ template <int64_t dim>
38
+ std::vector<int64_t> get_output_size(
39
+ const Tensor& input,
40
+ IntArrayRef kernel_size,
41
+ IntArrayRef stride_size,
42
+ IntArrayRef pad_size,
43
+ IntArrayRef dilation_size) {
44
+ std::vector<int64_t> sizes;
45
+ for (const auto index : c10::irange(dim)) {
46
+ sizes.push_back(
47
+ div_rtn<int64_t>(
48
+ input.size(index + input.dim() - dim) + 2 * pad_size[index] -
49
+ (dilation_size[index] * (kernel_size[index] - 1) + 1),
50
+ stride_size[index]) +
51
+ 1);
52
+ }
53
+ return sizes;
54
+ }
55
+
56
+ // calculate the sizes of output tensor
57
+ template <int64_t dim>
58
+ std::vector<int64_t> get_output_size(
59
+ const Tensor& input,
60
+ const Tensor& weight,
61
+ IntArrayRef kernel_size,
62
+ IntArrayRef stride_size,
63
+ IntArrayRef pad_size,
64
+ IntArrayRef dilation_size) {
65
+ auto output_size = get_output_size<dim>(
66
+ input, kernel_size, stride_size, pad_size, dilation_size);
67
+ output_size.insert(output_size.begin(), weight.size(0));
68
+ if (input.dim() == dim + 2) {
69
+ output_size.insert(output_size.begin(), input.size(0));
70
+ }
71
+ return output_size;
72
+ }
73
+ /*
74
+ slow_conv_dilated_shape_check - check user-input to dilated convolution
75
+ forward and backward functions.
76
+ */
77
+ template <int64_t dim>
78
+ void slow_conv_dilated_shape_check(
79
+ const Tensor& input,
80
+ const Tensor& weight,
81
+ const Tensor& bias,
82
+ const Tensor& grad_output,
83
+ IntArrayRef kernel_size,
84
+ IntArrayRef stride_size,
85
+ IntArrayRef pad_size,
86
+ IntArrayRef dilation_size) {
87
+ /*
88
+ When the following tensors are defined:
89
+
90
+ bias, grad_weight, grad_output
91
+
92
+ then these are assumed to be contiguous without checking
93
+ because of these tensors are made contiguous by calling
94
+ .contiguous() method or by resizing of zero-sized tensors in
95
+ forward/backward functions.
96
+
97
+ When grad_weight is defined then it is assumed without
98
+ checking to have the same shape as weight, see backward
99
+ functions.
100
+ */
101
+ // Check size arguments
102
+ TORCH_CHECK(
103
+ kernel_size.size() == dim,
104
+ "kernel sizes length should be ",
105
+ dim,
106
+ ", but got ",
107
+ kernel_size.size());
108
+ TORCH_CHECK(
109
+ stride_size.size() == dim,
110
+ "strides length should be ",
111
+ dim,
112
+ ", but got ",
113
+ stride_size.size());
114
+ TORCH_CHECK(
115
+ dilation_size.size() == dim,
116
+ "dilations length should be ",
117
+ dim,
118
+ ", but got ",
119
+ dilation_size.size());
120
+ TORCH_CHECK(
121
+ pad_size.size() == dim,
122
+ "pads length should be ",
123
+ dim,
124
+ ", but got ",
125
+ pad_size.size());
126
+
127
+ TORCH_CHECK(
128
+ all_positive(kernel_size),
129
+ "kernel size should be greater than zero, but got ",
130
+ kernel_size);
131
+ TORCH_CHECK(
132
+ all_positive(stride_size),
133
+ "stride should be greater than zero, but got ",
134
+ stride_size);
135
+ TORCH_CHECK(
136
+ all_positive(dilation_size),
137
+ "dilation should be greater than zero, but got ",
138
+ dilation_size);
139
+
140
+ // check input
141
+ TORCH_CHECK(input.defined(), "input must be defined");
142
+ bool is_batch = input.dim() == dim + 2;
143
+ int64_t n = (is_batch ? 2 : 1);
144
+ int64_t ndim = n + dim;
145
+ if (!is_batch) {
146
+ // input dim has to be dim + 1 if not batched
147
+ TORCH_CHECK(
148
+ input.dim() == dim + 1,
149
+ "input must be 4D or 5D tensor but got ",
150
+ input.dim(),
151
+ "D tensor");
152
+ }
153
+
154
+ // check output sizes
155
+ auto output_size = get_output_size<dim>(
156
+ input, kernel_size, stride_size, pad_size, dilation_size);
157
+
158
+ TORCH_CHECK(
159
+ all_nonnegative(output_size),
160
+ "calculated output size ",
161
+ output_size,
162
+ " is too small (all sizes must be non-negative)");
163
+
164
+ // check weight
165
+ TORCH_CHECK(weight.defined(), "weight must be defined");
166
+ TORCH_CHECK(
167
+ weight.dim() == dim + 2,
168
+ "weight must be ",
169
+ dim + 2,
170
+ "D tensor but got ",
171
+ weight.dim(),
172
+ "D tensor dim=",
173
+ dim);
174
+ TORCH_CHECK(
175
+ weight.sizes().slice(2) == kernel_size,
176
+ "weight[2:] shape ",
177
+ weight.sizes().slice(2),
178
+ " must be equal to kernel_size ",
179
+ kernel_size);
180
+
181
+ TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1));
182
+
183
+ // check bias when present
184
+ if (bias.defined()) {
185
+ TORCH_CHECK(
186
+ bias.dim() == 1,
187
+ "bias must be 1D tensor but got ",
188
+ bias.dim(),
189
+ "D tensor");
190
+ TORCH_CHECK_DIM_SIZE(bias, 1, 0, weight.size(0));
191
+ }
192
+
193
+ // check grad_output when present
194
+ if (grad_output.defined()) {
195
+ TORCH_CHECK(
196
+ grad_output.dim() == ndim,
197
+ "grad_output must be ",
198
+ ndim,
199
+ "D tensor but got ",
200
+ grad_output.dim(),
201
+ "D tensor");
202
+ if (is_batch) {
203
+ TORCH_CHECK(
204
+ grad_output.size(0) == input.size(0),
205
+ "grad_output.size(0)=",
206
+ grad_output.size(0),
207
+ " must be input.size(0)=",
208
+ input.size(0));
209
+ }
210
+ TORCH_CHECK(
211
+ grad_output.size(n - 1) == weight.size(0),
212
+ "grad_output.size(",
213
+ n - 1,
214
+ ")=",
215
+ grad_output.size(n - 1),
216
+ " must be weight.size(0)=",
217
+ weight.size(0));
218
+ TORCH_CHECK(
219
+ grad_output.sizes().slice(n) == output_size,
220
+ "grad_output[",
221
+ n,
222
+ ":] shape",
223
+ grad_output.sizes().slice(n),
224
+ " must be equal to output size ",
225
+ output_size);
226
+ }
227
+ }
228
+
229
+ } // namespace at::native::internal
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Macros.h>
5
+
6
+ #include <atomic>
7
+ #include <utility>
8
+
9
+ // Implements instruction set specific function dispatch.
10
+ //
11
+ // Kernels that may make use of specialized instruction sets (e.g. AVX2) are
12
+ // compiled multiple times with different compiler flags (e.g. -mavx2). A
13
+ // DispatchStub contains a table of function pointers for a kernel. At runtime,
14
+ // the fastest available kernel is chosen based on the features reported by
15
+ // cpuinfo.
16
+ //
17
+ // Example:
18
+ //
19
+ // In native/MyKernel.h:
20
+ // using fn_type = void(*)(const Tensor& x);
21
+ // DECLARE_DISPATCH(fn_type, stub);
22
+ //
23
+ // In native/MyKernel.cpp
24
+ // DEFINE_DISPATCH(stub);
25
+ //
26
+ // In native/cpu/MyKernel.cpp:
27
+ // namespace {
28
+ // // use anonymous namespace so that different cpu versions won't conflict
29
+ // void kernel(const Tensor& x) { ... }
30
+ // }
31
+ // REGISTER_DISPATCH(stub, &kernel);
32
+ //
33
+ // To call:
34
+ // stub(kCPU, tensor);
35
+ //
36
+ // TODO: CPU instruction set selection should be folded into whatever
37
+ // the main dispatch mechanism is.
38
+
39
+ // ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere
40
+ C10_CLANG_DIAGNOSTIC_PUSH()
41
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wundefined-var-template")
42
+
43
+ namespace at::native {
44
+
45
+ enum class CPUCapability {
46
+ DEFAULT = 0,
47
+ #if defined(HAVE_VSX_CPU_DEFINITION)
48
+ VSX = 1,
49
+ #elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
50
+ ZVECTOR = 1,
51
+ #else
52
+ AVX2 = 1,
53
+ AVX512 = 2,
54
+ #endif
55
+ NUM_OPTIONS
56
+ };
57
+
58
+ CPUCapability get_cpu_capability();
59
+
60
+ template <typename FnPtr, typename T>
61
+ struct DispatchStub;
62
+
63
+ /**
64
+ * The sole purpose of this class is to outline methods that don't need to be
65
+ * specialized or otherwise inlined and duplicated (by the compiler due to
66
+ * template expansion), since it causes size bloat if there are a significant
67
+ * number of specialization of the DispatchStub<> class.
68
+ */
69
+ struct TORCH_API DispatchStubImpl {
70
+ void* get_call_ptr(
71
+ c10::DeviceType device_type
72
+ , void *DEFAULT
73
+ #ifdef HAVE_AVX512_CPU_DEFINITION
74
+ , void *AVX512
75
+ #endif
76
+ #ifdef HAVE_AVX2_CPU_DEFINITION
77
+ , void *AVX2
78
+ #endif
79
+ #ifdef HAVE_VSX_CPU_DEFINITION
80
+ , void *VSX
81
+ #endif
82
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
83
+ , void *ZVECTOR
84
+ #endif
85
+ );
86
+
87
+ /**
88
+ * The CPU Dispatch actual method is chosen in decreasing order of preference by
89
+ * DispatchStubImpl::choose_cpu_impl() in case none is found by
90
+ * DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr.
91
+ */
92
+ void* choose_cpu_impl(
93
+ void *DEFAULT
94
+ #ifdef HAVE_AVX512_CPU_DEFINITION
95
+ , void *AVX512
96
+ #endif
97
+ #ifdef HAVE_AVX2_CPU_DEFINITION
98
+ , void *AVX2
99
+ #endif
100
+ #ifdef HAVE_VSX_CPU_DEFINITION
101
+ , void *VSX
102
+ #endif
103
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
104
+ , void *ZVECTOR
105
+ #endif
106
+ );
107
+
108
+ // Fixing dispatch error in Windows debug builds.
109
+ // See https://github.com/pytorch/pytorch/issues/22681 for more details.
110
+ #if defined(_MSC_VER) && defined(_DEBUG)
111
+ std::atomic<void*> cpu_dispatch_ptr;
112
+ void* cuda_dispatch_ptr;
113
+ void* hip_dispatch_ptr;
114
+ void* mps_dispatch_ptr;
115
+ void* privateuse1_dispatch_ptr;
116
+ #else
117
+ std::atomic<void*> cpu_dispatch_ptr{nullptr};
118
+ void* cuda_dispatch_ptr = nullptr;
119
+ void* hip_dispatch_ptr = nullptr;
120
+ void* mps_dispatch_ptr = nullptr;
121
+ void* privateuse1_dispatch_ptr = nullptr;
122
+ #endif
123
+ };
124
+
125
+ template <typename rT, typename T, typename... Args>
126
+ struct DispatchStub<rT (*)(Args...), T> {
127
+ using FnPtr = rT (*) (Args...);
128
+
129
+ DispatchStub() = default;
130
+ DispatchStub(const DispatchStub&) = delete;
131
+ DispatchStub& operator=(const DispatchStub&) = delete;
132
+
133
+ private:
134
+ FnPtr get_call_ptr(c10::DeviceType device_type) {
135
+ return reinterpret_cast<FnPtr>(
136
+ impl.get_call_ptr(device_type
137
+ , reinterpret_cast<void*>(DEFAULT)
138
+ #ifdef HAVE_AVX512_CPU_DEFINITION
139
+ , reinterpret_cast<void*>(AVX512)
140
+ #endif
141
+ #ifdef HAVE_AVX2_CPU_DEFINITION
142
+ , reinterpret_cast<void*>(AVX2)
143
+ #endif
144
+ #ifdef HAVE_VSX_CPU_DEFINITION
145
+ , reinterpret_cast<void*>(VSX)
146
+ #endif
147
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
148
+ , reinterpret_cast<void*>(ZVECTOR)
149
+ #endif
150
+ )
151
+ );
152
+ }
153
+
154
+ public:
155
+ template <typename... ArgTypes>
156
+ rT operator()(c10::DeviceType device_type, ArgTypes&&... args) {
157
+ FnPtr call_ptr = get_call_ptr(device_type);
158
+ return (*call_ptr)(std::forward<ArgTypes>(args)...);
159
+ }
160
+
161
+ void set_cuda_dispatch_ptr(FnPtr fn_ptr) {
162
+ impl.cuda_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
163
+ }
164
+
165
+ void set_hip_dispatch_ptr(FnPtr fn_ptr) {
166
+ impl.hip_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
167
+ }
168
+
169
+ void set_mps_dispatch_ptr(FnPtr fn_ptr) {
170
+ impl.mps_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
171
+ }
172
+
173
+ void set_privateuse1_dispatch_ptr(FnPtr fn_ptr) {
174
+ impl.privateuse1_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
175
+ }
176
+
177
+ static TORCH_API FnPtr DEFAULT;
178
+ #ifdef HAVE_AVX512_CPU_DEFINITION
179
+ static TORCH_API FnPtr AVX512;
180
+ #endif
181
+ #ifdef HAVE_AVX2_CPU_DEFINITION
182
+ static TORCH_API FnPtr AVX2;
183
+ #endif
184
+ #ifdef HAVE_VSX_CPU_DEFINITION
185
+ static TORCH_API FnPtr VSX;
186
+ #endif
187
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
188
+ static TORCH_API FnPtr ZVECTOR;
189
+ #endif
190
+ private:
191
+ DispatchStubImpl impl;
192
+ };
193
+
194
+ namespace {
195
+ template <typename DispatchStub>
196
+ struct RegisterCUDADispatch {
197
+ RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
198
+ stub.set_cuda_dispatch_ptr(value);
199
+ }
200
+ };
201
+
202
+ template <typename DispatchStub>
203
+ struct RegisterMPSDispatch {
204
+ RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
205
+ stub.set_mps_dispatch_ptr(value);
206
+ }
207
+ };
208
+
209
+ template <typename DispatchStub>
210
+ struct RegisterHIPDispatch {
211
+ RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
212
+ // TODO: make this point at hip_dispatch_ptr
213
+ stub.set_cuda_dispatch_ptr(value);
214
+ }
215
+ };
216
+
217
+ template <typename DispatchStub>
218
+ struct RegisterPRIVATEUSE1Dispatch {
219
+ RegisterPRIVATEUSE1Dispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
220
+ stub.set_privateuse1_dispatch_ptr(value);
221
+ }
222
+ };
223
+
224
+ } // anonymous namespace
225
+ // Compiler will complain if you put things like std::tuple<Tensor, Tensor> in
226
+ // the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g.,
227
+ // adding parentheses and using helper struct to get rid of the parentheses, do
228
+ // not work with MSVC. So do a `using`-declaration if you need to pass in such
229
+ // `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h.
230
+ #define DECLARE_DISPATCH(fn, name) \
231
+ struct name : DispatchStub<fn, name> { \
232
+ name() = default; \
233
+ name(const name&) = delete; \
234
+ name& operator=(const name&) = delete; \
235
+ }; \
236
+ extern TORCH_API struct name name
237
+
238
+ #define DEFINE_DISPATCH(name) struct name name
239
+
240
+ #define REGISTER_ARCH_DISPATCH(name, arch, fn) \
241
+ template <> name::FnPtr TORCH_API DispatchStub<name::FnPtr, struct name>::arch = fn;
242
+
243
+ #ifdef HAVE_AVX512_CPU_DEFINITION
244
+ #define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn)
245
+ #else
246
+ #define REGISTER_AVX512_DISPATCH(name, fn)
247
+ #endif
248
+
249
+ #ifdef HAVE_AVX2_CPU_DEFINITION
250
+ #define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn)
251
+ #else
252
+ #define REGISTER_AVX2_DISPATCH(name, fn)
253
+ #endif
254
+
255
+ #ifdef HAVE_VSX_CPU_DEFINITION
256
+ #define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn)
257
+ #else
258
+ #define REGISTER_VSX_DISPATCH(name, fn)
259
+ #endif
260
+
261
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
262
+ #define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn)
263
+ #else
264
+ #define REGISTER_ZVECTOR_DISPATCH(name, fn)
265
+ #endif
266
+
267
+ // Macro to register the same kernel for all CPU arch types. This is useful
268
+ // if a kernel does not benefit from being recompiled across different arch types.
269
+ #define REGISTER_ALL_CPU_DISPATCH(name, fn) \
270
+ REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \
271
+ REGISTER_AVX512_DISPATCH(name, fn) \
272
+ REGISTER_AVX2_DISPATCH(name, fn) \
273
+ REGISTER_VSX_DISPATCH(name, fn) \
274
+ REGISTER_ZVECTOR_DISPATCH(name, fn)
275
+
276
+ #define REGISTER_NO_CPU_DISPATCH(name) \
277
+ REGISTER_ALL_CPU_DISPATCH(name, nullptr)
278
+
279
+ #define REGISTER_CUDA_DISPATCH(name, fn) \
280
+ static RegisterCUDADispatch<struct name> name ## __register(name, fn);
281
+
282
+ #define REGISTER_HIP_DISPATCH(name, fn) \
283
+ static RegisterHIPDispatch<struct name> name ## __register(name, fn);
284
+
285
+ #define REGISTER_MPS_DISPATCH(name, fn) \
286
+ static RegisterMPSDispatch<struct name> name ## __register(name, fn);
287
+
288
+ #define REGISTER_PRIVATEUSE1_DISPATCH(name, fn) \
289
+ static RegisterPRIVATEUSE1Dispatch<struct name> name ## __register(name, fn);
290
+
291
+ // NB: This macro must be used in an actual 'cu' file; if you try using
292
+ // it from a 'cpp' file it will not work!
293
+ #if defined(__CUDACC__)
294
+ #define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
295
+ #elif defined(__HIPCC__)
296
+ // TODO: cut this over to HIP dispatch once we stop pretending that CUDA
297
+ // is HIP in the PyTorch HIPify build.
298
+ #define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
299
+ // #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn)
300
+ #elif defined(__OBJC__) && defined(USE_MPS)
301
+ // NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel
302
+ #define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn)
303
+ #elif defined(CPU_CAPABILITY)
304
+ // REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches.
305
+ // ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others.
306
+ #ifdef CPU_CAPABILITY_AVX512
307
+ #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, nullptr)
308
+ #else
309
+ #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
310
+ #endif
311
+ #define ALSO_REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
312
+ #endif
313
+ } // namespace at::native
314
+
315
+ C10_CLANG_DIAGNOSTIC_POP()
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/Dispatch_v2.h>
6
+ #include <ATen/Generator.h>
7
+ #include <ATen/ExpandUtils.h>
8
+ #include <ATen/Tensor.h>
9
+ #include <ATen/MemoryOverlap.h>
10
+ #include <ATen/NamedTensorUtils.h>
11
+ #include <ATen/native/Resize.h>
12
+ #include <ATen/native/TensorIterator.h>
13
+ #include <c10/util/Optional.h>
14
+ #include <limits>
15
+ #include <cmath>
16
+
17
+ #ifndef AT_PER_OPERATOR_HEADERS
18
+ #include <ATen/Functions.h>
19
+ #else
20
+ #include <ATen/ops/empty_like.h>
21
+ #include <ATen/ops/empty.h>
22
+ #include <ATen/ops/full.h>
23
+ #include <ATen/ops/view_as_real.h>
24
+ #endif
25
+
26
+ namespace at::native::templates {
27
+
28
+ // ==================================================== Random ========================================================
29
+
30
+ // The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`.
31
+ // The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t).
32
+ // This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance:
33
+ //
34
+ // auto actual = torch::empty({3, 3}, torch::half);
35
+ // actual.random_(0, 65504);
36
+ //
37
+ // If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504
38
+ // and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to`
39
+ // moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to
40
+ // the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous
41
+ // available number for torch::half dtype.
42
+ template<typename scalar_t>
43
+ int64_t update_from(int64_t from) {
44
+ static_assert(
45
+ std::is_floating_point<scalar_t>::value ||
46
+ std::is_same<scalar_t, at::Half>::value ||
47
+ std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
48
+ const auto from_plus_1 = static_cast<int64_t>(static_cast<scalar_t>(from + 1));
49
+ if (from_plus_1 < from) {
50
+ int64_t from_ = std::abs(from + 1);
51
+ int n = 0;
52
+ while (from_ >>= 1) ++n;
53
+ // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
54
+ from = from_plus_1 + (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
55
+ }
56
+ return from;
57
+ }
58
+
59
+ template<typename scalar_t>
60
+ int64_t update_to(int64_t to) {
61
+ static_assert(
62
+ std::is_floating_point<scalar_t>::value ||
63
+ std::is_same<scalar_t, at::Half>::value ||
64
+ std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
65
+ const auto to_minus_1 = static_cast<int64_t>(static_cast<scalar_t>(to - 1));
66
+ if (to_minus_1 >= to) {
67
+ int64_t to_ = std::abs(to - 1);
68
+ int n = 0;
69
+ while (to_ >>= 1) ++n;
70
+ // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
71
+ to = to_minus_1 - (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
72
+ }
73
+ return to;
74
+ }
75
+
76
+ // Return earlier for not invoking kernel.
77
+ // See https://github.com/pytorch/pytorch/issues/103418 for more details
78
+ #define CHECK_EMPTY_AND_RETURN(tensor) \
79
+ if (tensor.numel() == 0) { \
80
+ return tensor; \
81
+ }
82
+
83
+ template<template<typename> class random_kernel, typename RNG>
84
+ at::Tensor& random_impl(at::Tensor& self, c10::optional<Generator> generator) {
85
+ CHECK_EMPTY_AND_RETURN(self);
86
+ auto iter = at::TensorIterator::borrowing_nullary_op(self);
87
+ random_kernel<RNG>()(iter, generator);
88
+ return self;
89
+ }
90
+
91
+ #define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \
92
+ TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \
93
+
94
+ #define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \
95
+ if (var < -(1LL << digits) || var > (1LL << digits)) { \
96
+ TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \
97
+ "Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \
98
+ "This warning will become an error in version 1.7 release, please fix the code in advance"); \
99
+ }
100
+
101
+ static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
102
+ const auto scalar_type = typeMetaToScalarType(dtype);
103
+ if (isFloatingType(scalar_type)) {
104
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] {
105
+ const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
106
+ const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
107
+ CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
108
+ CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
109
+
110
+ constexpr auto digits = std::numeric_limits<scalar_t>::digits;
111
+ WARN_OUT_OF_BOUNDS(from, "from", digits, dtype);
112
+ WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype);
113
+ });
114
+ } else if (scalar_type == kUInt64) {
115
+ // When you do a comparison between int64_t and uint64_t, the usual
116
+ // arithmetic conversions say that the int64_t value is promoted to
117
+ // unsigned. But this conversion wraps around: if I had -1 as my int64_t,
118
+ // then it will promote to 0xFFFFFFFFFFFFFFFF in uint64_t. This is never
119
+ // the right thing to do.
120
+ CHECK_OUT_OF_BOUNDS(from, "from", 0, INT64_MAX, dtype);
121
+ CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", 0, INT64_MAX, dtype);
122
+ } else if (isIntegralType(scalar_type, /*includeBool=*/true)) {
123
+ AT_DISPATCH_V2(scalar_type, "check_random_integral_bounds", AT_WRAP([&]() {
124
+ const auto min = static_cast<int64_t>(std::numeric_limits<scalar_t>::lowest());
125
+ const auto max = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
126
+ CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
127
+ CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
128
+ }), AT_EXPAND(AT_INTEGRAL_TYPES), kUInt16, kUInt32, kBool);
129
+ } else {
130
+ TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types");
131
+ }
132
+ }
133
+
134
+ template<template<typename> class random_from_to_kernel, typename RNG>
135
+ at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> generator) {
136
+ uint64_t range = 0;
137
+ auto iter = at::TensorIterator::borrowing_nullary_op(self);
138
+ if (to_opt.has_value()) {
139
+ // [from, to)
140
+ int64_t to = *to_opt;
141
+ TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
142
+ if (isFloatingType(iter.dtype())) {
143
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] {
144
+ from = update_from<scalar_t>(from);
145
+ to = update_to<scalar_t>(to);
146
+ TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to);
147
+ });
148
+ }
149
+ check_from_to_in_range(from, to - 1, self.dtype());
150
+ CHECK_EMPTY_AND_RETURN(self);
151
+ range = static_cast<uint64_t>(to) - static_cast<uint64_t>(from);
152
+ random_from_to_kernel<RNG>()(iter, range, from, generator);
153
+ } else if (from != std::numeric_limits<int64_t>::lowest()) {
154
+ // [from, std::numeric_limits<int64_t>::max()]
155
+ int64_t to_inc = 0;
156
+ if (isFloatingType(iter.dtype())) {
157
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] {
158
+ constexpr int64_t scalar_t_max = static_cast<int64_t>(1) << std::numeric_limits<scalar_t>::digits;
159
+ to_inc = scalar_t_max > std::numeric_limits<int64_t>::max() ? std::numeric_limits<int64_t>::max() : static_cast<int64_t>(scalar_t_max);
160
+ from = update_from<scalar_t>(from);
161
+ TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc);
162
+ });
163
+ } else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) {
164
+ AT_DISPATCH_V2(self.scalar_type(), "random_from_to_range_calc", AT_WRAP([&] {
165
+ if constexpr (std::is_same_v<scalar_t, bool>) {
166
+ to_inc = static_cast<int64_t>(true);
167
+ } else {
168
+ to_inc = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
169
+ }
170
+ }), AT_EXPAND(AT_INTEGRAL_TYPES_V2), kBool);
171
+ } else {
172
+ TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types");
173
+ }
174
+ check_from_to_in_range(from, to_inc, self.dtype());
175
+ CHECK_EMPTY_AND_RETURN(self);
176
+ range = static_cast<uint64_t>(to_inc) - static_cast<uint64_t>(from) + 1;
177
+ random_from_to_kernel<RNG>()(iter, range, from, generator);
178
+ } else {
179
+ // [std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()]
180
+ // range = 2^64
181
+ CHECK_EMPTY_AND_RETURN(self);
182
+ random_from_to_kernel<RNG>()(iter, generator);
183
+ }
184
+ return self;
185
+ }
186
+
187
+ // ==================================================== Normal ========================================================
188
+
189
+ #define CHECK_NORMAL_TENSOR_STD(std) \
190
+ do { \
191
+ TORCH_CHECK( \
192
+ !std.is_complex(), \
193
+ "normal expects standard deviation to be non-complex"); \
194
+ TORCH_CHECK( \
195
+ std.numel() == 0 || std.is_meta() || std.min().ge(0).item<bool>(), \
196
+ "normal expects all elements of std >= 0.0"); \
197
+ } while (0)
198
+
199
+ #define CHECK_NORMAL_STD(std) \
200
+ TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std);
201
+
202
+ template<template<typename> class normal_kernel, typename RNG>
203
+ Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
204
+ CHECK_NORMAL_STD(std);
205
+ CHECK_EMPTY_AND_RETURN(self);
206
+
207
+ if (self.is_complex()) {
208
+ auto float_tensor = at::view_as_real(self);
209
+ // variance for normal distribution of the real and imaginary values
210
+ // is half of the input variance
211
+ normal_kernel<RNG>()(float_tensor, mean, std/(std::sqrt(2)), gen);
212
+ } else {
213
+ normal_kernel<RNG>()(self, mean, std, gen);
214
+ }
215
+ return self;
216
+ }
217
+
218
+ template<template<typename> class normal_kernel, typename RNG>
219
+ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional<Generator> gen) {
220
+ CHECK_NORMAL_STD(std);
221
+ auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous);
222
+ auto shape = at::infer_size(mean.sizes(), std_tensor.sizes());
223
+ at::native::resize_output(output, shape);
224
+ normal_impl_<normal_kernel, RNG>(output, 0, std, gen);
225
+ output.add_(mean);
226
+ return output;
227
+ }
228
+
229
+ template<template<typename> class normal_kernel, typename RNG>
230
+ Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional<Generator> gen) {
231
+ CHECK_NORMAL_TENSOR_STD(std);
232
+ auto mean_tensor = at::full({}, mean, output.options());
233
+ auto shape = at::infer_size(mean_tensor.sizes(), std.sizes());
234
+ at::native::resize_output(output, shape);
235
+ normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
236
+ // CUDA NB: addcmul_out copies the tensor to be added into the output.
237
+ // The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
238
+ // The third argument is not a constant reference and hence the samples in output are overwritten.
239
+ // Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
240
+ output.mul_(std).add_(mean_tensor);
241
+ return output;
242
+ }
243
+
244
+ template<template<typename> class normal_kernel, typename RNG>
245
+ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
246
+ CHECK_NORMAL_TENSOR_STD(std);
247
+ auto shape = at::infer_size(mean.sizes(), std.sizes());
248
+ at::native::resize_output(output, shape);
249
+ normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
250
+ // CUDA NB: addcmul_out copies the tensor to be added into the output.
251
+ // The previous function here was addcmul_out(output, mean, output, std, 1);
252
+ // The third argument is not a constant reference and hence the samples in output are overwritten.
253
+ // Consequently, the computation performed is mean + mean * std instead of mean + output * std
254
+ output.mul_(std).add_(mean);
255
+ return output;
256
+ }
257
+
258
+ template<template<typename> class normal_kernel, typename RNG>
259
+ Tensor normal_impl(const Tensor& mean, double std, c10::optional<Generator> gen) {
260
+ CHECK_NORMAL_STD(std);
261
+ Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous);
262
+ normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
263
+ return ret;
264
+ }
265
+
266
+ template<template<typename> class normal_kernel, typename RNG>
267
+ Tensor normal_impl(double mean, const Tensor& std, c10::optional<Generator> gen) {
268
+ CHECK_NORMAL_TENSOR_STD(std);
269
+ Tensor ret = at::empty_like(std, MemoryFormat::Contiguous);
270
+ normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
271
+ return ret;
272
+ }
273
+
274
+ template<template<typename> class normal_kernel, typename RNG>
275
+ Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
276
+ CHECK_NORMAL_TENSOR_STD(std);
277
+ auto shape = at::infer_size(mean.sizes(), std.sizes());
278
+ Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous);
279
+ normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
280
+ return ret;
281
+ }
282
+
283
+ // ==================================================== Uniform =======================================================
284
+
285
+ template<template<typename> class uniform_kernel, typename RNG>
286
+ at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional<Generator> generator) {
287
+ if (self.is_complex()) {
288
+ CHECK_EMPTY_AND_RETURN(self);
289
+ auto float_tensor = at::view_as_real(self);
290
+ uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator);
291
+ } else {
292
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] {
293
+ const auto dtype = self.dtype();
294
+ const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
295
+ const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
296
+ CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
297
+ CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype);
298
+ TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to);
299
+ TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
300
+ "uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()),
301
+ ">::max(), but found to=", to, " and from=", from,
302
+ " which result in to-from to exceed the limit");
303
+ from = std::min(std::max(from, min), max);
304
+ to = std::max(std::min(to, max), min);
305
+ });
306
+ CHECK_EMPTY_AND_RETURN(self);
307
+ auto iter = at::TensorIterator::borrowing_nullary_op(self);
308
+ uniform_kernel<RNG>()(iter, from, to, generator);
309
+ }
310
+ return self;
311
+ }
312
+
313
+ // ================================================== LogNormal =======================================================
314
+
315
+ template<template<typename> class log_normal_kernel, typename RNG>
316
+ at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional<Generator> gen) {
317
+ TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
318
+ CHECK_EMPTY_AND_RETURN(self);
319
+ auto iter = TensorIterator::borrowing_nullary_op(self);
320
+ log_normal_kernel<RNG>()(iter, mean, std, gen);
321
+ return self;
322
+ }
323
+
324
+ // =================================================== Geometric ======================================================
325
+
326
+ template<template<typename> class geometric_kernel, typename RNG>
327
+ Tensor& geometric_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
328
+ TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
329
+ CHECK_EMPTY_AND_RETURN(self);
330
+ auto iter = TensorIterator::borrowing_nullary_op(self);
331
+ geometric_kernel<RNG>()(iter, p, gen);
332
+ return self;
333
+ }
334
+
335
+ // ================================================== Exponential =====================================================
336
+
337
+ template<template<typename> class exponential_kernel, typename RNG>
338
+ Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional<Generator> gen) {
339
+ TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
340
+ CHECK_EMPTY_AND_RETURN(self);
341
+ auto iter = TensorIterator::borrowing_nullary_op(self);
342
+ exponential_kernel<RNG>()(iter, lambda, gen);
343
+ return self;
344
+ }
345
+
346
+ // ==================================================== Cauchy ========================================================
347
+
348
+ template<template<typename> class cauchy_kernel, typename RNG>
349
+ Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
350
+ // TODO: instead of variable name 'sigma', use 'gamma' or 'scale'
351
+ // the variance, squared sigma, is undefined for cauchy distribution
352
+ TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma);
353
+ TORCH_CHECK(at::isFloatingType(self.scalar_type()), "Cauchy distribution is a continuous probability distribution. dtype must be a floating point but you specified ", self.dtype());
354
+ CHECK_EMPTY_AND_RETURN(self);
355
+ auto iter = TensorIterator::borrowing_nullary_op(self);
356
+ cauchy_kernel<RNG>()(iter, median, sigma, gen);
357
+ return self;
358
+ }
359
+
360
+ // ==================================================== Bernoulli =====================================================
361
+
362
+ template<template<typename> class bernoulli_tensor_kernel, typename RNG>
363
+ Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
364
+ CHECK_EMPTY_AND_RETURN(self);
365
+ NoNamesGuard guard;
366
+ at::assert_no_internal_overlap(self);
367
+ bernoulli_tensor_kernel<RNG>()(self, p_, gen);
368
+ return self;
369
+ }
370
+
371
+ template<template<typename> class bernoulli_scalar_kernel, typename RNG>
372
+ Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
373
+ TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
374
+ CHECK_EMPTY_AND_RETURN(self);
375
+ at::assert_no_internal_overlap(self);
376
+ bernoulli_scalar_kernel<RNG>()(self, p, gen);
377
+ return self;
378
+ }
379
+
380
+ template<template<typename> class bernoulli_tensor_kernel, typename RNG>
381
+ Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional<Generator> gen) {
382
+ // result.resize_as_(self) requires self to have same dtype as result, so we
383
+ // use resize_ instead.
384
+ // TODO: Fix resize_as_. See pytorch/pytorch#11665.
385
+ result.resize_(self.sizes());
386
+ bernoulli_impl_<bernoulli_tensor_kernel, RNG>(result, self, gen);
387
+ namedinference::propagate_names(result, self);
388
+ return result;
389
+ }
390
+
391
+ #undef CHECK_OUT_OF_BOUNDS
392
+ #undef WARN_OUT_OF_BOUNDS
393
+
394
+ } // namespace at::native::templates
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Distributions.h ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/Math.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/MathConstants.h>
6
+
7
+ // ROCM hcc doesn't work well with using std:: in kernel functions
8
+ #if defined(__CUDA_ARCH__)
9
+ #include <c10/cuda/CUDAMathCompat.h>
10
+ #define compat_exp c10::cuda::compat::exp
11
+ #define compat_ceil c10::cuda::compat::ceil
12
+ #define compat_floor c10::cuda::compat::floor
13
+ #define compat_log c10::cuda::compat::log
14
+ #define compat_pow c10::cuda::compat::pow
15
+ #define compat_sqrt c10::cuda::compat::sqrt
16
+ #define compat_tan c10::cuda::compat::tan
17
+ #define compat_abs c10::cuda::compat::abs
18
+ #define compat_log1p c10::cuda::compat::log1p
19
+ #elif defined(__HIPCC__)
20
+ #include <c10/hip/HIPMathCompat.h>
21
+ #define compat_exp c10::hip::compat::exp
22
+ #define compat_ceil c10::hip::compat::ceil
23
+ #define compat_floor c10::hip::compat::floor
24
+ #define compat_log c10::hip::compat::log
25
+ #define compat_pow c10::hip::compat::pow
26
+ #define compat_sqrt c10::hip::compat::sqrt
27
+ #define compat_tan c10::hip::compat::tan
28
+ #define compat_abs c10::hip::compat::abs
29
+ #define compat_log1p c10::hip::compat::log1p
30
+ #else
31
+ #define compat_exp std::exp
32
+ #define compat_ceil std::ceil
33
+ #define compat_floor std::floor
34
+ #define compat_log std::log
35
+ #define compat_pow std::pow
36
+ #define compat_sqrt std::sqrt
37
+ #define compat_tan std::tan
38
+ #define compat_abs std::abs
39
+ #define compat_log1p std::log1p
40
+ #endif
41
+
42
+ namespace {
43
+
44
+ #if !defined(__CUDA_ARCH__) && !defined(__HIPCC__)
45
+ // we cannot use std::isnan directly due to some incompatibility of
46
+ // gcc constexpr'ing and nvcc
47
+ using std::isnan;
48
+ #endif
49
+
50
+ // Here sampler_t should be function type scalar_t(void). For gpu
51
+ // "sampler" is a device function, but since ROCM doesn't have
52
+ // equivalent to nvstd::function, we use a template type parameter to
53
+ // capture it.
54
+ template<typename scalar_t, typename sampler_t>
55
+ struct BaseSampler {
56
+ sampler_t sampler;
57
+ C10_DEVICE BaseSampler(const sampler_t& sampler): sampler(sampler) {}
58
+ C10_DEVICE scalar_t sample() {
59
+ return sampler();
60
+ }
61
+ };
62
+
63
+ // The function `sample_gamma` is
64
+ // is adapted from Numpy's distributions.c implementation.
65
+ // It is MIT licensed, so here is the copyright:
66
+
67
+ /* Copyright 2005 Robert Kern ([email protected])
68
+ *
69
+ * Permission is hereby granted, free of charge, to any person obtaining a
70
+ * copy of this software and associated documentation files (the
71
+ * "Software"), to deal in the Software without restriction, including
72
+ * without limitation the rights to use, copy, modify, merge, publish,
73
+ * distribute, sublicense, and/or sell copies of the Software, and to
74
+ * permit persons to whom the Software is furnished to do so, subject to
75
+ * the following conditions:
76
+ *
77
+ * The above copyright notice and this permission notice shall be included
78
+ * in all copies or substantial portions of the Software.
79
+ *
80
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
81
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
82
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
83
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
84
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
85
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
86
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
87
+ */
88
+
89
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t, typename normal_sampler_t>
90
+ C10_DEVICE scalar_t sample_gamma(scalar_t alpha, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform, BaseSampler<accscalar_t, normal_sampler_t>& standard_normal) {
91
+ accscalar_t scale = 1.0f;
92
+
93
+ // Boost alpha for higher acceptance probability.
94
+ if (alpha < 1.0f) {
95
+ if (alpha == 0.f) return 0.f;
96
+ scale *= compat_pow(1 - standard_uniform.sample(), 1.0f / alpha);
97
+ alpha += 1.0f;
98
+ }
99
+
100
+ // This implements the acceptance-rejection method of Marsaglia and Tsang (2000)
101
+ // doi:10.1145/358407.358414
102
+ const accscalar_t d = alpha - 1.0f / 3.0f;
103
+ const accscalar_t c = 1.0f / compat_sqrt(9.0f * d);
104
+ for (;;) {
105
+ accscalar_t x, y;
106
+ do {
107
+ x = standard_normal.sample();
108
+ y = 1.0f + c * x;
109
+ } while (y <= 0);
110
+ const accscalar_t v = y * y * y;
111
+ const accscalar_t u = 1 - standard_uniform.sample();
112
+ const accscalar_t xx = x * x;
113
+ if (u < 1.0f - 0.0331f * xx * xx)
114
+ return static_cast<scalar_t>(scale * d * v);
115
+ if (compat_log(u) < 0.5f * xx + d * (1.0f - v + compat_log(v)))
116
+ return static_cast<scalar_t>(scale * d * v);
117
+ }
118
+ }
119
+
120
+ /* the functions stirling_approx_tail, binomial_inversion, and btrs are adapted
121
+ * from TensorFlow's random_binomial_op.cc implementation. That code is under
122
+ * copyright: 2019 The TensorFlow Authors.
123
+ *
124
+ * It was released under the Apache License, Version 2.0 (the "License"), available at:
125
+ * http://www.apache.org/licenses/LICENSE-2.0
126
+ */
127
+
128
+ template<typename scalar_t>
129
+ C10_DEVICE scalar_t stirling_approx_tail(scalar_t k) {
130
+ const static scalar_t kTailValues[] = {
131
+ 0.0810614667953272,
132
+ 0.0413406959554092,
133
+ 0.0276779256849983,
134
+ 0.02079067210376509,
135
+ 0.0166446911898211,
136
+ 0.0138761288230707,
137
+ 0.0118967099458917,
138
+ 0.0104112652619720,
139
+ 0.00925546218271273,
140
+ 0.00833056343336287
141
+ };
142
+ if (k <= 9) {
143
+ return kTailValues[static_cast<size_t>(k)];
144
+ }
145
+ scalar_t kp1sq = (k + 1) * (k + 1);
146
+ return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1);
147
+ }
148
+
149
+
150
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
151
+ C10_DEVICE scalar_t binomial_inversion(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
152
+ accscalar_t U;
153
+ accscalar_t geom_sum = 0;
154
+ scalar_t num_geom = 0;
155
+
156
+ accscalar_t logprob = compat_log1p(-prob);
157
+
158
+ while (1) {
159
+ U = standard_uniform.sample();
160
+ accscalar_t geom = compat_ceil(compat_log(U) / logprob);
161
+ geom_sum += geom;
162
+ if (geom_sum > count) {
163
+ break;
164
+ }
165
+ num_geom = num_geom + 1;
166
+ }
167
+ return num_geom;
168
+ }
169
+
170
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
171
+ C10_DEVICE scalar_t btrs(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
172
+ scalar_t k;
173
+ accscalar_t U, V, us;
174
+
175
+ // This is spq in the paper.
176
+ const accscalar_t stddev = compat_sqrt(count * prob * (1 - prob));
177
+
178
+ // Other coefficients for Transformed Rejection sampling.
179
+ const accscalar_t b = 1.15 + 2.53 * stddev;
180
+ const accscalar_t a = -0.0873 + 0.0248 * b + 0.01 * prob;
181
+ const accscalar_t c = count * prob + 0.5;
182
+ const accscalar_t v_r = 0.92 - 4.2 / b;
183
+ const accscalar_t r = prob / (1 - prob);
184
+
185
+ const accscalar_t alpha = (2.83 + 5.1 / b) * stddev;
186
+ const accscalar_t m = compat_floor((count + 1) * prob);
187
+
188
+ while (1) {
189
+ U = standard_uniform.sample() - 0.5;
190
+ V = standard_uniform.sample();
191
+
192
+ us = 0.5 - compat_abs(U);
193
+ k = static_cast<scalar_t>(compat_floor((2 * a / us + b) * U + c));
194
+
195
+ // Reject non-sensical answers.
196
+ if (k < 0 || k > count) {
197
+ continue;
198
+ }
199
+ // Region for which the box is tight, and we can return our calculated value.
200
+ // This should happen 0.86 * v_r times. In the limit as n * p is large,
201
+ // the acceptance rate converges to ~79% (and in the lower regime it is ~24%).
202
+ if (us >= 0.07 && V <= v_r) {
203
+ return k;
204
+ }
205
+
206
+ // This deviates from Hormann's BTRS algorithm, as there is a log missing.
207
+ // For all (u, v) pairs outside of the bounding box, this calculates the
208
+ // transformed-reject ratio.
209
+ V = compat_log(V * alpha / (a / (us * us) + b));
210
+ accscalar_t upperbound =
211
+ ((m + 0.5) * compat_log((m + 1) / (r * (count - m + 1))) +
212
+ (count + 1) * compat_log((count - m + 1) / (count - k + 1)) +
213
+ (k + 0.5) * compat_log(r * (count - k + 1) / (k + 1)) +
214
+ stirling_approx_tail<accscalar_t>(m) + stirling_approx_tail<accscalar_t>(count - m) -
215
+ stirling_approx_tail<accscalar_t>(k) - stirling_approx_tail<accscalar_t>(count - k));
216
+
217
+ if (V <= upperbound) {
218
+ return k;
219
+ }
220
+ }
221
+ }
222
+
223
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
224
+ C10_DEVICE scalar_t sample_binomial(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
225
+ if (count <= 0.0 || prob <= 0.0) {
226
+ return 0;
227
+ } else if (prob >= 1.0) {
228
+ return count;
229
+ } else if (prob <= 0.5) {
230
+ if (count * prob >= 10.0) {
231
+ // btrs
232
+ return btrs<scalar_t, accscalar_t, uniform_sampler_t>(count, prob, standard_uniform);
233
+ } else {
234
+ // binomial inversion
235
+ return binomial_inversion<scalar_t, accscalar_t, uniform_sampler_t>(count, prob, standard_uniform);
236
+ }
237
+ } else if (prob > 0.5) {
238
+ scalar_t qprob = 1.0 - prob;
239
+ if (count * qprob >= 10.0) {
240
+ // btrs
241
+ return count - btrs<scalar_t, accscalar_t, uniform_sampler_t>(count, qprob, standard_uniform);
242
+ } else {
243
+ // count - binomial inversion
244
+ return count - binomial_inversion<scalar_t, accscalar_t, uniform_sampler_t>(count, qprob, standard_uniform);
245
+ }
246
+ } else {
247
+ // prob is nan?
248
+ return static_cast<scalar_t>(NAN);
249
+ }
250
+ }
251
+
252
+ /*
253
+ * This function is derived from the implementation of the digamma function in the Cephes Math Library.
254
+ * See note [3-Clause BSD License for the Cephes Math Library] in ATen/native/Math.h.
255
+ */
256
+ template<typename scalar_t, typename accscalar_t>
257
+ C10_DEVICE static inline scalar_t digamma_one(scalar_t x) {
258
+ constexpr accscalar_t PSI_10 = 2.25175258906672110764;
259
+ if (x == 0) {
260
+ return INFINITY;
261
+ }
262
+ accscalar_t additional_summand = 0;
263
+ int x_is_integer = x == compat_floor(x);
264
+ if (x < 0) {
265
+ if (x_is_integer) {
266
+ return INFINITY;
267
+ }
268
+ // it is more standard to write this as recursion, but
269
+ // nvcc does not like that
270
+ additional_summand = -c10::pi<scalar_t> /
271
+ compat_tan(c10::pi<scalar_t> * x);
272
+ x = 1 - x;
273
+ }
274
+
275
+ // Push x to be >= 10
276
+ accscalar_t result = 0;
277
+ while (x < 10) {
278
+ result -= 1 / x;
279
+ x += 1;
280
+ }
281
+ if (x == 10) {
282
+ return result + PSI_10 + additional_summand;
283
+ }
284
+
285
+ // Compute asymptotic digamma
286
+ static const accscalar_t A[] = {
287
+ 8.33333333333333333333E-2,
288
+ -2.10927960927960927961E-2,
289
+ 7.57575757575757575758E-3,
290
+ -4.16666666666666666667E-3,
291
+ 3.96825396825396825397E-3,
292
+ -8.33333333333333333333E-3,
293
+ 8.33333333333333333333E-2,
294
+ };
295
+
296
+ accscalar_t y = 0;
297
+ if (x < 1.0e17f) {
298
+ accscalar_t z = 1.0 / (x * x);
299
+ y = z * polevl<accscalar_t>(z, A, 6);
300
+ }
301
+ return static_cast<scalar_t>(
302
+ result + compat_log(x) - (0.5f / x) - y + additional_summand);
303
+ }
304
+
305
+ // Computes the reparameterized gradient -(d/dalpha cdf(x;alpha)) / pdf(x;alpha)
306
+ // for random number x drawn from a standard Gamma distribution Gamma(alpha).
307
+ template <typename scalar_t, typename accscalar_t>
308
+ C10_HOST_DEVICE scalar_t standard_gamma_grad_one(scalar_t alpha_, scalar_t x_) {
309
+ // Use a Taylor series expansion for small x.
310
+ accscalar_t x = static_cast<accscalar_t>(x_);
311
+ accscalar_t alpha = static_cast<accscalar_t>(alpha_);
312
+ if (x < 0.8f) {
313
+ accscalar_t numer = 1;
314
+ accscalar_t denom = alpha;
315
+ auto series1 = numer / denom;
316
+ auto series2 = numer / (denom * denom);
317
+ for (int i = 1; i <= 5; ++i) {
318
+ numer *= -x / static_cast<accscalar_t>(i);
319
+ denom += 1;
320
+ series1 += numer / denom;
321
+ series2 += numer / (denom * denom);
322
+ }
323
+ const auto pow_x_alpha = compat_pow(x, alpha);
324
+ const auto gamma_pdf = compat_pow(x, alpha - 1) * compat_exp(-x);
325
+ const auto gamma_cdf = pow_x_alpha * series1;
326
+ const auto gamma_cdf_alpha =
327
+ (compat_log(x) - digamma_one<accscalar_t, accscalar_t>(alpha)) *
328
+ gamma_cdf -
329
+ pow_x_alpha * series2;
330
+ const auto result = -gamma_cdf_alpha / gamma_pdf;
331
+ return isnan(result) ? static_cast<scalar_t>( 0.f ) : static_cast<scalar_t>(result);
332
+ }
333
+
334
+ // Use a Rice saddle point expansion for large alpha.
335
+ if (alpha > 8.0f) {
336
+ if (0.9f * alpha <= x && x <= 1.1f * alpha) {
337
+ const auto numer_1 = 1 + 24 * alpha * (1 + 12 * alpha);
338
+ const auto numer_2 = 1440 * (alpha * alpha) + 6 * x * (53 - 120 * x)
339
+ - 65 * x * x / alpha + alpha * (107 + 3600 * x);
340
+ const auto denom = 1244160 * (alpha * alpha) * (alpha * alpha);
341
+ return static_cast<scalar_t>(numer_1 * numer_2 / denom);
342
+ }
343
+ const auto denom = compat_sqrt(8 * alpha);
344
+ const auto term2 = denom / (alpha - x);
345
+ const auto term3 = compat_pow(
346
+ x - alpha - alpha * compat_log(x / alpha),
347
+ static_cast<accscalar_t>(-1.5));
348
+ const auto term23 = (x < alpha) ? term2 - term3 : term2 + term3;
349
+ const auto term1 = compat_log(x / alpha) * term23 -
350
+ compat_sqrt(2 / alpha) * (alpha + x) / ((alpha - x) * (alpha - x));
351
+ const auto stirling = 1 + 1 / (12 * alpha) * (1 + 1 / (24 * alpha));
352
+ const auto numer = x * term1;
353
+ return static_cast<scalar_t>(-stirling * numer / denom);
354
+ }
355
+
356
+ // Use a bivariate rational approximation to the reparameterized gradient.
357
+ const auto u = compat_log(x / alpha);
358
+ const auto v = compat_log(alpha);
359
+ static const accscalar_t coef_uv[3][8] = {
360
+ {0.16009398, -0.094634809, 0.025146376, -0.0030648343,
361
+ 1, 0.32668115, 0.10406089, 0.0014179084},
362
+ {0.53487893, 0.1298071, 0.065735949, -0.0015649758,
363
+ 0.16639465, 0.020070113, -0.0035938915, -0.00058392623},
364
+ {0.040121004, -0.0065914022, -0.0026286047, -0.0013441777,
365
+ 0.017050642, -0.0021309326, 0.00085092367, -1.5247877e-07},
366
+ };
367
+ accscalar_t coef_v[8];
368
+ for (int i = 0; i < 8; ++ i) {
369
+ coef_v[i] = coef_uv[0][i] + u * (coef_uv[1][i] + u * coef_uv[2][i]);
370
+ }
371
+ const auto p = coef_v[0] + v * (coef_v[1] + v * (coef_v[2] + v * coef_v[3]));
372
+ const auto q = coef_v[4] + v * (coef_v[5] + v * (coef_v[6] + v * coef_v[7]));
373
+ return static_cast<scalar_t>(compat_exp(p / q));
374
+ }
375
+
376
+ // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
377
+ // Assumes x is close to zero and uses a Taylor expansion.
378
+ template <typename scalar_t, typename accscalar_t>
379
+ C10_DEVICE static inline scalar_t _beta_grad_alpha_small(scalar_t x, scalar_t alpha, scalar_t beta) {
380
+ const scalar_t factor = digamma_one<scalar_t, accscalar_t>(alpha)
381
+ - digamma_one<scalar_t, accscalar_t>(alpha + beta) - compat_log(x);
382
+ scalar_t numer = 1;
383
+ scalar_t series = numer / alpha * (factor + 1 / alpha);
384
+ for (int i = 1; i <= 10; ++i) {
385
+ scalar_t casted_i = static_cast<scalar_t>(i);
386
+ numer *= (casted_i - beta) * x / casted_i;
387
+ const scalar_t denom = alpha + casted_i;
388
+ series += numer / denom * (factor + 1 / denom);
389
+ }
390
+ const scalar_t result = x * compat_pow(1 - x, -beta) * series;
391
+ return isnan(result) ? static_cast<scalar_t>( 0.f ) : result;
392
+ }
393
+
394
+ // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta.
395
+ // Assumes x is close to zero and uses a Taylor expansion.
396
+ template <typename scalar_t, typename accscalar_t>
397
+ C10_DEVICE static inline scalar_t _beta_grad_beta_small(scalar_t x, scalar_t alpha, scalar_t beta) {
398
+ const scalar_t factor = digamma_one<scalar_t, accscalar_t>(alpha + beta) - digamma_one<scalar_t, accscalar_t>(beta);
399
+ scalar_t numer = 1, betas = 1, dbetas = 0, series = factor / alpha;
400
+ for (int i = 1; i <= 8; ++i) {
401
+ scalar_t casted_i = static_cast<scalar_t>(i);
402
+ numer *= -x / casted_i;
403
+ dbetas = dbetas * (beta - casted_i) + betas;
404
+ betas = betas * (beta - casted_i);
405
+ series += numer / (alpha + casted_i) * (dbetas + factor * betas);
406
+ }
407
+ const scalar_t result = -compat_pow(1 - x, 1 - beta) * series;
408
+ return isnan(result) ? static_cast<scalar_t>( 0.f ) : result;
409
+ }
410
+
411
+ // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
412
+ // Assumes alpha and beta are both large and uses a Rice saddle point expansion.
413
+ // To ensure numerical stability, this computation is performed at higher precision.
414
+ template<typename scalar_t, typename accscalar_t>
415
+ C10_DEVICE static inline scalar_t _beta_grad_alpha_mid(accscalar_t x, accscalar_t alpha, accscalar_t beta) {
416
+ const accscalar_t total = alpha + beta;
417
+ const accscalar_t mean = alpha / total;
418
+ const accscalar_t std = compat_sqrt(alpha * beta / (total + 1)) / total;
419
+ if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) {
420
+ // Avoid the singularity at x = mean.
421
+ const accscalar_t poly = 47 * x * (beta * beta) * (beta * beta) + alpha * (
422
+ (43 + 20 * (16 + 27 * beta) * x) * (beta * beta) * beta + alpha * (
423
+ 3 * (59 + 180 * beta - 90 * x) * (beta * beta) + alpha * (
424
+ (453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * (
425
+ 8 * (1 - x) * (135 * beta - 11)))));
426
+ const accscalar_t prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total);
427
+ const accscalar_t prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total);
428
+ return prefactor_num / (1 - x) * poly / prefactor_den;
429
+ }
430
+ const accscalar_t prefactor = -x / compat_sqrt(2 * alpha * beta / total);
431
+ const accscalar_t stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha * alpha))
432
+ * (1 + 1 / (12 * beta) + 1 / (288 * beta * beta))
433
+ / (1 + 1 / (12 * total) + 1 / (288 * total * total));
434
+ const accscalar_t term1_num = 2 * (alpha * alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta * beta);
435
+ const accscalar_t axbx = alpha * (x - 1) + beta * x;
436
+ const accscalar_t term1_den = compat_sqrt(2 * alpha / beta) * compat_pow(total, static_cast<accscalar_t>(1.5f)) * axbx * axbx;
437
+ const accscalar_t term1 = term1_num / term1_den;
438
+ const accscalar_t term2 = 0.5f * compat_log(alpha / (total * x));
439
+ const accscalar_t term3_num = compat_sqrt(8 * alpha * beta / total);
440
+ const accscalar_t term3_den = beta * x + alpha * (x - 1);
441
+ const accscalar_t term3 = term3_num / term3_den;
442
+ const accscalar_t term4_base = beta * compat_log(beta / (total * (1 - x))) +
443
+ alpha * compat_log(alpha / (total * x));
444
+ const accscalar_t term4 = compat_pow(term4_base, static_cast<accscalar_t>(-1.5f));
445
+ const accscalar_t term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4));
446
+ return static_cast<scalar_t>(stirling * prefactor * term1234);
447
+ }
448
+
449
+ // Computes a scaled reparameterized gradient
450
+ // -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x)
451
+ // for random number x drawn from a Beta distribution Beta(alpha,beta).
452
+ // This function inputs total=alpha+beta to make it easy to implement
453
+ // Dirichlet reparameterized gradients in terms of Betas.
454
+ template<typename scalar_t, typename accscalar_t>
455
+ C10_HOST_DEVICE static inline scalar_t dirichlet_grad_one(scalar_t x, scalar_t alpha, scalar_t total) {
456
+ accscalar_t x_ = static_cast<accscalar_t>(x);
457
+ accscalar_t alpha_ = static_cast<accscalar_t>(alpha);
458
+ accscalar_t total_ = static_cast<accscalar_t>(total);
459
+
460
+ const scalar_t beta = total - alpha;
461
+ const accscalar_t beta_ = total_ - alpha_;
462
+ const scalar_t boundary = total * x * (1 - x);
463
+
464
+ // Use an asymptotic approximation for x close to 0.
465
+ if (x <= 0.5f && boundary < 2.5f) {
466
+ return _beta_grad_alpha_small<scalar_t, accscalar_t>(x, alpha, beta);
467
+ }
468
+
469
+ // Use an asymptotic approximation for x close to 1.
470
+ if (x >= 0.5f && boundary < 0.75f) {
471
+ return -_beta_grad_beta_small<scalar_t, accscalar_t>(1 - x, beta, alpha);
472
+ }
473
+
474
+ // Use an asymptotic approximation when alpha and (total - alpha) are both large.
475
+ if (alpha > 6 && beta > 6) {
476
+ return _beta_grad_alpha_mid<scalar_t, accscalar_t>(x_, alpha_, beta_);
477
+ }
478
+
479
+ // Use a rational correction to an analytic approximation.
480
+ static const accscalar_t c[2][3][3][4] = {
481
+ {{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863},
482
+ {0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033},
483
+ {-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}},
484
+ {{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814},
485
+ {-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057},
486
+ {0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}},
487
+ {{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565},
488
+ {0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181},
489
+ {0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}},
490
+ {{{1, -0.02924021934, -0.04438342661, 0.007285809825},
491
+ {0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521},
492
+ {-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}},
493
+ {{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273},
494
+ {0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956},
495
+ {-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}},
496
+ {{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05},
497
+ {0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05},
498
+ {-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}},
499
+ };
500
+ const accscalar_t u = compat_log(x_);
501
+ const accscalar_t a = compat_log(alpha_) - u;
502
+ const accscalar_t b = compat_log(total_) - a;
503
+ const accscalar_t pow_u[3] = {1, u, u * u};
504
+ const accscalar_t pow_a[3] = {1, a, a * a};
505
+ accscalar_t p = 0.0;
506
+ accscalar_t q = 0.0;
507
+ for (int i = 0; i < 3; ++i) {
508
+ for (int j = 0; j < 3; ++j) {
509
+ const accscalar_t ua = pow_u[i] * pow_a[j];
510
+ p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3])));
511
+ q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3])));
512
+ }
513
+ }
514
+ const accscalar_t approx = x_ * (digamma_one<scalar_t, accscalar_t>(total_) - digamma_one<scalar_t, accscalar_t>(alpha_)) / beta_;
515
+ return static_cast<scalar_t>(p / q * approx);
516
+ }
517
+
518
+ } // namespace
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Fill.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Functions that fill Tensors with constants. Implementations are in Fill.cpp.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/native/DispatchStub.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ class Tensor;
13
+ struct TensorIterator;
14
+
15
+ namespace native {
16
+
17
+ DECLARE_DISPATCH(void(*)(TensorIterator&, const c10::Scalar&), fill_stub);
18
+
19
+ Tensor& fill_out(Tensor& self, const Scalar& value);
20
+
21
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Device.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/ScalarType.h>
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/native/utils/ParamsHash.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/NativeFunctions.h>
13
+ #else
14
+ #include <ATen/ops/result_type_native.h>
15
+ #endif
16
+
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace at::native {
21
+ namespace {
22
+ // Check if tensor list has either a boolean tensor or a integer tensor
23
+ inline bool has_integral_tensor(TensorList tensors, const bool includeBool) {
24
+ return std::any_of(
25
+ tensors.begin(), tensors.end(), [&includeBool](const auto& t) {
26
+ return at::isIntegralType(t.scalar_type(), includeBool);
27
+ });
28
+ }
29
+ // check if tensor list has bool tensors
30
+ inline bool has_bool_tensor(TensorList tensors) {
31
+ return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool {
32
+ return t.scalar_type() == ScalarType::Bool;
33
+ });
34
+ }
35
+
36
+ // Check foreach API restrictions
37
+ // - Tensor lists must be non-empty.
38
+ // - All TensorLists and ScalarLists must have the same number of elements.
39
+ // - Corresponding tensors must have the same size.
40
+ inline void check_foreach_api_restrictions(TensorList tensors) {
41
+ TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor.");
42
+ }
43
+
44
+ inline void check_foreach_api_restrictions(
45
+ TensorList tensors,
46
+ ArrayRef<Scalar> scalars) {
47
+ check_foreach_api_restrictions(tensors);
48
+ TORCH_CHECK(
49
+ tensors.size() == scalars.size(),
50
+ "Tensor list must have same number of elements as scalar list.");
51
+ }
52
+
53
+ inline void check_foreach_api_restrictions(
54
+ TensorList tensors1,
55
+ TensorList tensors2) {
56
+ TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
57
+ TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
58
+ TORCH_CHECK(
59
+ tensors1.size() == tensors2.size(),
60
+ "Tensor lists must have the same number of tensors, got ",
61
+ tensors1.size(),
62
+ " and ",
63
+ tensors2.size());
64
+ }
65
+
66
+ inline void check_foreach_api_restrictions(
67
+ TensorList tensors1,
68
+ TensorList tensors2,
69
+ TensorList tensors3) {
70
+ TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
71
+ TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
72
+ TORCH_CHECK(!tensors3.empty(), "Tensor list must have at least one tensor.");
73
+ TORCH_CHECK(
74
+ tensors1.size() == tensors2.size(),
75
+ "Tensor lists must have the same number of tensors, got ",
76
+ tensors1.size(),
77
+ " and ",
78
+ tensors2.size());
79
+ TORCH_CHECK(
80
+ tensors1.size() == tensors3.size(),
81
+ "Tensor lists must have the same number of tensors, got ",
82
+ tensors1.size(),
83
+ " and ",
84
+ tensors3.size());
85
+ }
86
+
87
+ inline void check_foreach_api_restrictions(
88
+ TensorList tensors1,
89
+ TensorList tensors2,
90
+ TensorList tensors3,
91
+ ArrayRef<Scalar> scalars) {
92
+ check_foreach_api_restrictions(tensors1, tensors2, tensors3);
93
+ TORCH_CHECK(
94
+ tensors1.size() == scalars.size(),
95
+ "Tensor list must have same number of elements as scalar list, got ",
96
+ tensors1.size(),
97
+ " and ",
98
+ scalars.size());
99
+ }
100
+
101
+ // Helper function called in check_fast_path_restrictions to check whether all
102
+ // corresponding tensors (aligning in index across the tensorLists) share the
103
+ // same device and dtype.
104
+ inline bool _check_tensors_share_device_and_dtype(
105
+ ArrayRef<TensorList> tensorLists) {
106
+ const auto expected_dtype = tensorLists[0][0].dtype();
107
+ const auto expected_device = tensorLists[0][0].device();
108
+
109
+ auto is_tensor_okay = [&](const Tensor& tensor) {
110
+ return tensor.dtype() == expected_dtype &&
111
+ tensor.device() == expected_device && tensor.layout() == at::kStrided &&
112
+ tensor.is_non_overlapping_and_dense();
113
+ };
114
+
115
+ for (const auto& tensorList : tensorLists) {
116
+ for (const auto& tensor : tensorList) {
117
+ if (!is_tensor_okay(tensor)) {
118
+ return false;
119
+ }
120
+ }
121
+ }
122
+
123
+ return true;
124
+ }
125
+
126
+ // Helper function called in check_fast_path_restrictions to check if
127
+ // corresponding tensors in tensor lists have the same sizes and strides.
128
+ inline bool _check_tensors_share_sizes_and_strides(
129
+ ArrayRef<TensorList> tensorLists) {
130
+ for (const auto i : c10::irange(1, tensorLists.size())) {
131
+ for (const auto j : c10::irange(tensorLists[0].size())) {
132
+ if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() ||
133
+ tensorLists[0][j].strides() != tensorLists[i][j].strides()) {
134
+ return false;
135
+ }
136
+ }
137
+ }
138
+
139
+ return true;
140
+ }
141
+
142
+ // Helper function called in check_fast_path_restrictions to check whether
143
+ // all tensors type promote properly with the scalars in scalarList. This
144
+ // function assumes that _check_tensors_share_device_and_dtype has already been
145
+ // called so that all corresponding tensors in tensorLists have the same dtype.
146
+ // Then, it is sufficient to check the type promotion with just one tensorList.
147
+ inline bool _check_tensors_do_type_promotion_with_scalars(
148
+ TensorList tensorList,
149
+ ArrayRef<Scalar> scalarList = {},
150
+ bool does_op_promote_integer_inputs_to_float = false) {
151
+ for (const auto i : c10::irange(tensorList.size())) {
152
+ // For division, integer inputs will result in float.
153
+ if (does_op_promote_integer_inputs_to_float) {
154
+ if (at::isIntegralType(
155
+ tensorList[i].scalar_type(), /*includeBool*/ true)) {
156
+ return false;
157
+ }
158
+ }
159
+ if (!scalarList.empty()) {
160
+ const auto& scalar =
161
+ scalarList.size() == 1 ? scalarList[0] : scalarList[i];
162
+ const auto& tensor = tensorList[i];
163
+ // note(mkozuki): This check might be responsible for
164
+ // `_foreach_add(bool_tensors, bool_tensors)` being pushed to slow path.
165
+ if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) {
166
+ return false;
167
+ }
168
+ }
169
+ }
170
+
171
+ return true;
172
+ }
173
+
174
+ // To go via 'fast' path, several conditions must be satisfied
175
+ // - All tensors in all lists must have the same dtype.
176
+ // - All tensors must be on the same device
177
+ // - All tensors must have strided layout
178
+ // - All tensors must be non-overlapping and dense
179
+ // - Resulting tensor must have the same dtype as the input one
180
+
181
+ // Please, make sure to call check_foreach_api_restrictions before calling this
182
+ // method. There is a set of preconditions that have to be satisfied.
183
+ inline bool check_fast_path_restrictions(
184
+ ArrayRef<TensorList> tensorLists,
185
+ ArrayRef<Scalar> scalarList = {},
186
+ bool does_op_promote_integer_inputs_to_float = false) {
187
+ return _check_tensors_share_device_and_dtype(tensorLists) &&
188
+ _check_tensors_share_sizes_and_strides(tensorLists) &&
189
+ _check_tensors_do_type_promotion_with_scalars(
190
+ tensorLists[0],
191
+ scalarList,
192
+ does_op_promote_integer_inputs_to_float);
193
+ }
194
+
195
+ inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
196
+ const Tensor& scalarList_,
197
+ int64_t expect_length) {
198
+ std::vector<c10::Scalar> scalarList;
199
+ TORCH_CHECK(
200
+ scalarList_.device() == c10::kCPU,
201
+ "Expected scalars to be on CPU, got ",
202
+ scalarList_.device(),
203
+ " instead.");
204
+ TORCH_CHECK(
205
+ scalarList_.is_contiguous(), "Expected scalars to be contiguous.");
206
+ TORCH_CHECK(
207
+ scalarList_.dim() == 1,
208
+ "Expected packed scalar Tensor to be of dimension 1. Got ",
209
+ scalarList_.dim(),
210
+ " instead.");
211
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
212
+ kComplexHalf,
213
+ kHalf,
214
+ kBool,
215
+ kBFloat16,
216
+ scalarList_.scalar_type(),
217
+ "convert_tensor_to_scalar_list",
218
+ [&]() {
219
+ const scalar_t* scalar_data = scalarList_.data_ptr<scalar_t>();
220
+ TORCH_CHECK(
221
+ (expect_length == scalarList_.size(0)),
222
+ "Expected length of scalars to match input of length ",
223
+ expect_length,
224
+ " but got ",
225
+ scalarList_.size(0),
226
+ " instead.");
227
+ for (int64_t i = 0; i < scalarList_.size(0); i++) {
228
+ scalarList.emplace_back(scalar_data[i]);
229
+ }
230
+ });
231
+ return scalarList;
232
+ }
233
+
234
+ inline bool can_use_fast_route(
235
+ ArrayRef<TensorList> tensorLists,
236
+ ArrayRef<Scalar> scalarList = {},
237
+ bool does_op_promote_integer_inputs_to_float = false) {
238
+ return check_fast_path_restrictions(
239
+ tensorLists, scalarList, does_op_promote_integer_inputs_to_float);
240
+ }
241
+
242
+ inline bool can_use_fast_route(
243
+ TensorList tensors1,
244
+ TensorList tensors2,
245
+ bool does_op_promote_integer_inputs_to_float = false) {
246
+ return can_use_fast_route(
247
+ {tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float);
248
+ }
249
+
250
+ using DeviceDtypeKey = std::pair<at::Device, at::ScalarType>;
251
+ using IndicesT = std::vector<size_t>;
252
+ using nested_optional_tensorvec_t =
253
+ std::vector<std::vector<c10::optional<at::Tensor>>>;
254
+ using TensorsAndIndicesT = std::pair<nested_optional_tensorvec_t, IndicesT>;
255
+ using FlatMap = std::unordered_map<
256
+ DeviceDtypeKey,
257
+ TensorsAndIndicesT,
258
+ ParamsHash<DeviceDtypeKey>>;
259
+
260
+ inline FlatMap _group_tensors_by_first_tensors_device_and_dtype(
261
+ const nested_optional_tensorvec_t& nested_tensorlist,
262
+ const bool with_indices) {
263
+ FlatMap grouped_tensors_with_indices;
264
+
265
+ TORCH_CHECK(!nested_tensorlist.empty());
266
+ TORCH_CHECK(!nested_tensorlist[0].empty());
267
+ const auto num_lists = nested_tensorlist.size();
268
+ const auto num_tensors = nested_tensorlist[0].size();
269
+
270
+ TORCH_CHECK(std::all_of(
271
+ nested_tensorlist.cbegin(),
272
+ nested_tensorlist.cend(),
273
+ [&](const auto& tensorlist) -> bool {
274
+ // note(crcrpar): Allow empty tensorlists following
275
+ // ref:
276
+ // https://github.com/pytorch/pytorch/blob/85885301fd3c6adb8b9dc3cf7afadf6945566684/torch/utils/_foreach_utils.py#L21-L24
277
+ return tensorlist.size() == num_tensors || tensorlist.size() == 0;
278
+ }));
279
+
280
+ for (const auto& tensor_index : c10::irange(num_tensors)) {
281
+ const auto key = [&]() -> DeviceDtypeKey {
282
+ const auto t = nested_tensorlist[0][tensor_index];
283
+ TORCH_CHECK(
284
+ t.has_value(),
285
+ "Tensors of the first list of nested Tensor lists are supposed to be defined but ",
286
+ "the ",
287
+ tensor_index,
288
+ "-th Tensor is not.");
289
+ return {t->device(), t->scalar_type()};
290
+ }();
291
+ TORCH_CHECK(
292
+ std::all_of(
293
+ nested_tensorlist.cbegin(),
294
+ nested_tensorlist.cend(),
295
+ [&](const auto& tensorlist) -> bool {
296
+ if (tensorlist.size() == 0) {
297
+ return true;
298
+ }
299
+ const auto& tensor = tensorlist[tensor_index];
300
+ // note(crcrpar): Currently the scope of this function is
301
+ // optimizers so there could be `state_steps` and other scalars
302
+ // whose elements are float tensors no matter what the parameter's
303
+ // dtype is.
304
+ if (!tensor.has_value()) {
305
+ return true;
306
+ } else {
307
+ const auto s = tensor->scalar_type();
308
+ const auto d = tensor->device();
309
+ // Note: `step` or `state_step` is float32 by default.
310
+ if (key.first == d) {
311
+ return key.second == s || s == at::ScalarType::Float ||
312
+ s == at::ScalarType::Double;
313
+ } else if (d.is_cpu()) {
314
+ // note(crcrpar): There are some test cases (e.g.
315
+ // TestOptim::test_adam) where state_steps are on CPU and the
316
+ // others are on CUDA. Currently a state_step Tensor has the
317
+ // dtype of float.
318
+ return s == at::ScalarType::Float ||
319
+ s == at::ScalarType::Double;
320
+ } else {
321
+ return false;
322
+ }
323
+ }
324
+ }),
325
+ "Tensors of the same index must be on the same device and the same dtype except `step` tensors that can be CPU and float32/64 notwithstanding");
326
+ if (!grouped_tensors_with_indices.count(key)) {
327
+ grouped_tensors_with_indices.insert(
328
+ {key,
329
+ TensorsAndIndicesT{
330
+ [&]() -> nested_optional_tensorvec_t {
331
+ nested_optional_tensorvec_t nested_tensorvec;
332
+ nested_tensorvec.reserve(num_lists);
333
+ for (const auto& i : c10::irange(num_lists)) {
334
+ std::vector<c10::optional<at::Tensor>> tensors;
335
+ if (!nested_tensorlist[i].empty()) {
336
+ // NB: num_tensors is the max possible length for any of
337
+ // the inner lists of tensor references. Reserving the max
338
+ // trades memory for perf. This should not have significant
339
+ // impact.
340
+ tensors.reserve(num_tensors);
341
+ }
342
+ nested_tensorvec.emplace_back(tensors);
343
+ }
344
+ return nested_tensorvec;
345
+ }(),
346
+ [&]() -> IndicesT {
347
+ if (!with_indices) {
348
+ return {};
349
+ } else {
350
+ IndicesT indices;
351
+ indices.reserve(num_tensors);
352
+ return indices;
353
+ }
354
+ }()}});
355
+ }
356
+ for (const auto& list_index : c10::irange(num_lists)) {
357
+ if (!nested_tensorlist[list_index].empty()) {
358
+ grouped_tensors_with_indices[key].first[list_index].emplace_back(
359
+ nested_tensorlist[list_index][tensor_index]);
360
+ }
361
+ }
362
+ if (with_indices) {
363
+ grouped_tensors_with_indices[key].second.emplace_back(tensor_index);
364
+ }
365
+ }
366
+
367
+ return grouped_tensors_with_indices;
368
+ }
369
+
370
+ } // namespace
371
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/FractionalMaxPooling.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <c10/util/irange.h>
5
+
6
+ namespace at::native {
7
+
8
+ template<typename scalar_t>
9
+ static inline std::vector<int> generate_intervals(
10
+ scalar_t sample,
11
+ int64_t inputSize,
12
+ int64_t outputSize,
13
+ int64_t poolSize) {
14
+ std::vector<int> sequence(outputSize);
15
+ if (outputSize > 1) {
16
+ scalar_t alpha = static_cast<scalar_t>(inputSize - poolSize) /
17
+ static_cast<scalar_t>(outputSize - 1);
18
+
19
+ for (const auto i : c10::irange(outputSize - 1)) {
20
+ sequence[i] =
21
+ static_cast<int>((i + sample) * alpha) - static_cast<int>(sample * alpha);
22
+ }
23
+ }
24
+ if (outputSize > 0) {
25
+ sequence[outputSize - 1] = inputSize - poolSize;
26
+ }
27
+ return sequence;
28
+ }
29
+
30
+ template <int64_t ndim>
31
+ static inline void fractional_max_pool_check_shape(
32
+ const Tensor& input,
33
+ const Tensor& randomSamples) {
34
+
35
+ TORCH_CHECK(
36
+ input.scalar_type() == randomSamples.scalar_type(),
37
+ "Expect _random_samples to have the same dtype as input");
38
+
39
+ int64_t ndimension = randomSamples.ndimension();
40
+ TORCH_CHECK(
41
+ ndimension == 3,
42
+ "Expect _random_samples to have 3 dimensions, got ", ndimension);
43
+
44
+ int64_t N = randomSamples.size(0);
45
+ int64_t C = randomSamples.size(1);
46
+ int64_t D = randomSamples.size(2);
47
+
48
+ int64_t input_batch, input_channel;
49
+ if (ndim == 2) {
50
+ // fractional_max_pool2d
51
+ if (input.ndimension() == 3) {
52
+ input_batch = 1;
53
+ input_channel = input.size(0);
54
+ } else {
55
+ input_batch = input.size(0);
56
+ input_channel = input.size(1);
57
+ }
58
+ } else {
59
+ // factional_max_pool3d
60
+ if (input.ndimension() == 4) {
61
+ input_batch = 1;
62
+ input_channel = input.size(0);
63
+ } else {
64
+ input_batch = input.size(0);
65
+ input_channel = input.size(1);
66
+ }
67
+ }
68
+
69
+ TORCH_CHECK(
70
+ N >= input_batch,
71
+ "Expect _random_samples.size(0) no less then input batch size.");
72
+ TORCH_CHECK(
73
+ C == input_channel,
74
+ "Expect _random_samples.size(1) equals to input channel size.");
75
+ TORCH_CHECK(
76
+ D == ndim,
77
+ "Expect _random_samples.size(2) equals to ", ndim, "; got ", D, ".");
78
+ }
79
+
80
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/FunctionOfAMatrixUtils.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <cstdint>
5
+
6
+ namespace at {
7
+ struct TensorIterator;
8
+
9
+ namespace native {
10
+
11
+ using _compute_linear_combination_fn = void(*)(
12
+ TensorIterator& iter,
13
+ int64_t in_stride,
14
+ int64_t coeff_stride,
15
+ int64_t num_summations
16
+ );
17
+
18
+ DECLARE_DISPATCH(_compute_linear_combination_fn, _compute_linear_combination_stub);
19
+
20
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <cmath>
5
+ #include <cstdint>
6
+ #include <utility>
7
+
8
+ #include <ATen/native/GridSamplerUtils.h>
9
+
10
+ namespace at::native {
11
+
12
+ using detail::GridSamplerInterpolation;
13
+ using detail::GridSamplerPadding;
14
+
15
+ // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
16
+ // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
17
+ // if align_corners: -1 and +1 get sent to the centers of the corner pixels
18
+ // -1 --> 0
19
+ // +1 --> (size - 1)
20
+ // scale_factor = (size - 1) / 2
21
+ // if not align_corners: -1 and +1 get sent to the image edges
22
+ // -1 --> -0.5
23
+ // +1 --> (size - 1) + 0.5 == size - 0.5
24
+ // scale_factor = size / 2
25
+ template <typename scalar_t>
26
+ static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size,
27
+ bool align_corners) {
28
+ if (align_corners) {
29
+ // unnormalize coord from [-1, 1] to [0, size - 1]
30
+ return ((coord + 1) / 2) * (size - 1);
31
+ } else {
32
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
33
+ return ((coord + 1) * size - 1) / 2;
34
+ }
35
+ }
36
+
37
+ // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
38
+ // except that it also returns the `d output / d input` via pointer argument
39
+ // `grad_in`.
40
+ // This is useful in the backward pass of grid_sampler.
41
+ template <typename scalar_t>
42
+ static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size,
43
+ bool align_corners, scalar_t *grad_in) {
44
+ if (align_corners) {
45
+ // unnormalize coord from [-1, 1] to [0, size - 1]
46
+ *grad_in = static_cast<scalar_t>(size - 1) / 2;
47
+ return ((coord + 1) / 2) * (size - 1);
48
+ } else {
49
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
50
+ *grad_in = static_cast<scalar_t>(size) / 2;
51
+ return ((coord + 1) * size - 1) / 2;
52
+ }
53
+ }
54
+
55
+ // Clips coordinates to between 0 and clip_limit - 1
56
+ template<typename scalar_t>
57
+ static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) {
58
+ return std::min(static_cast<scalar_t>(clip_limit - 1), std::max(in, static_cast<scalar_t>(0)));
59
+ }
60
+
61
+ // clip_coordinates_set_grad works similarly to clip_coordinates except that
62
+ // it also returns the `d output / d input` via pointer argument `grad_in`.
63
+ // This is useful in the backward pass of grid_sampler.
64
+ template<typename scalar_t>
65
+ static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit,
66
+ scalar_t *grad_in) {
67
+ // Note that it is important for the gradient calculation that borders
68
+ // are considered out of bounds.
69
+ if (in <= static_cast<scalar_t>(0)) {
70
+ *grad_in = static_cast<scalar_t>(0);
71
+ return static_cast<scalar_t>(0);
72
+ } else {
73
+ scalar_t max = static_cast<scalar_t>(clip_limit - 1);
74
+ if (in >= max) {
75
+ *grad_in = static_cast<scalar_t>(0);
76
+ return max;
77
+ } else {
78
+ *grad_in = static_cast<scalar_t>(1);
79
+ return in;
80
+ }
81
+ }
82
+ }
83
+
84
+ // Reflects coordinates until they fall between low and high (inclusive).
85
+ // The bounds are passed as twice their value so that half-integer values
86
+ // can be represented as ints.
87
+ template<typename scalar_t>
88
+ static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low,
89
+ int64_t twice_high) {
90
+ if (twice_low == twice_high) {
91
+ return static_cast<scalar_t>(0);
92
+ }
93
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
94
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
95
+ in = std::fabs(in - min);
96
+ // `fmod` returns same sign as `in`, which is positive after the `fabs` above.
97
+ scalar_t extra = std::fmod(in, span);
98
+ int flips = static_cast<int>(std::floor(in / span));
99
+ if (flips % 2 == 0) {
100
+ return extra + min;
101
+ } else {
102
+ return span - extra + min;
103
+ }
104
+ }
105
+
106
+ // reflect_coordinates_set_grad works similarly to reflect_coordinates except
107
+ // that it also returns the `d output / d input` via pointer argument
108
+ // `grad_in`.
109
+ // This is useful in the backward pass of grid_sampler.
110
+ template<typename scalar_t>
111
+ static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low,
112
+ int64_t twice_high, scalar_t *grad_in) {
113
+ if (twice_low == twice_high) {
114
+ *grad_in = static_cast<scalar_t>(0);
115
+ return static_cast<scalar_t>(0);
116
+ }
117
+ int grad_in_mult_;
118
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
119
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
120
+ in = in - min;
121
+ if (in < static_cast<scalar_t>(0)) {
122
+ grad_in_mult_ = -1;
123
+ in = -in;
124
+ } else {
125
+ grad_in_mult_ = 1;
126
+ }
127
+ // `fmod` returns same sign as `in`, which is positive after the `if` above.
128
+ scalar_t extra = std::fmod(in, span);
129
+ int flips = static_cast<int>(std::floor(in / span));
130
+ if (flips % 2 == 0) {
131
+ *grad_in = static_cast<scalar_t>(grad_in_mult_);
132
+ return extra + min;
133
+ } else {
134
+ *grad_in = static_cast<scalar_t>(-grad_in_mult_);
135
+ return span - extra + min;
136
+ }
137
+ }
138
+
139
+ // Mapping the out-of-boundary points back into boundary
140
+ // This would only affect padding_mode=border or reflection
141
+ template<typename scalar_t>
142
+ static inline scalar_t compute_coordinates(scalar_t coord, int64_t size,
143
+ GridSamplerPadding padding_mode,
144
+ bool align_corners) {
145
+ if (padding_mode == GridSamplerPadding::Border) {
146
+ // clip coordinates to image borders
147
+ coord = clip_coordinates(coord, size);
148
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
149
+ // reflect coordinates by image borders
150
+ if (align_corners) {
151
+ coord = reflect_coordinates(coord, 0, 2*(size - 1));
152
+ } else {
153
+ coord = reflect_coordinates(coord, -1, 2*size - 1);
154
+ }
155
+ // clip coordinates to image borders
156
+ coord = clip_coordinates(coord, size);
157
+ }
158
+ return coord;
159
+ }
160
+
161
+ // Computes the pixel source index value for a grid coordinate
162
+ template <typename scalar_t>
163
+ static inline scalar_t grid_sampler_compute_source_index(
164
+ scalar_t coord,
165
+ int64_t size,
166
+ GridSamplerPadding padding_mode,
167
+ bool align_corners) {
168
+ coord = grid_sampler_unnormalize(coord, size, align_corners);
169
+ coord = compute_coordinates(coord, size, padding_mode, align_corners);
170
+ return coord;
171
+ }
172
+
173
+ // grid_sampler_compute_source_index_set_grad works similarly to
174
+ // grid_sampler_compute_source_index except that it also returns the
175
+ // `d output / d input` via pointer argument `grad_in`.
176
+ // This is useful in the backward pass of grid_sampler.
177
+ template <typename scalar_t>
178
+ static inline scalar_t grid_sampler_compute_source_index_set_grad(
179
+ scalar_t coord,
180
+ int64_t size,
181
+ GridSamplerPadding padding_mode,
182
+ bool align_corners,
183
+ scalar_t *grad_in) {
184
+ scalar_t grad_clip, grad_refl;
185
+ coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
186
+ if (padding_mode == GridSamplerPadding::Border) {
187
+ // clip coordinates to image borders
188
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
189
+ *grad_in = (*grad_in) * grad_clip;
190
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
191
+ // reflect coordinates by image borders
192
+ if (align_corners) {
193
+ coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
194
+ } else {
195
+ coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
196
+ }
197
+ // clip coordinates to image borders
198
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
199
+ *grad_in = (*grad_in) * grad_refl * grad_clip;
200
+ }
201
+ return coord;
202
+ }
203
+
204
+ static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
205
+ return h >= 0 && h < H && w >= 0 && w < W;
206
+ }
207
+
208
+ static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
209
+ return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
210
+ }
211
+
212
+ template<typename scalar_t>
213
+ static inline scalar_t get_value_bounded(
214
+ scalar_t* data,
215
+ scalar_t x,
216
+ scalar_t y,
217
+ int64_t W,
218
+ int64_t H,
219
+ int64_t sW,
220
+ int64_t sH,
221
+ GridSamplerPadding padding_mode,
222
+ bool align_corners) {
223
+
224
+ x = compute_coordinates(x, W, padding_mode, align_corners);
225
+ y = compute_coordinates(y, H, padding_mode, align_corners);
226
+
227
+ int64_t ix = static_cast<int64_t>(x);
228
+ int64_t iy = static_cast<int64_t>(y);
229
+
230
+ if (within_bounds_2d(iy, ix, H, W)) {
231
+ return data[iy * sH + ix * sW];
232
+ }
233
+ return static_cast<scalar_t>(0);
234
+ }
235
+
236
+ template<typename scalar_t>
237
+ static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
238
+ int64_t sH, int64_t sW, int64_t H, int64_t W,
239
+ scalar_t delta) {
240
+ if (within_bounds_2d(h, w, H, W)) {
241
+ data[h * sH + w * sW] += delta;
242
+ }
243
+ }
244
+
245
+ template<typename scalar_t>
246
+ static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
247
+ int64_t sD, int64_t sH, int64_t sW,
248
+ int64_t D, int64_t H, int64_t W,
249
+ scalar_t delta) {
250
+ if (within_bounds_3d(d, h, w, D, H, W)) {
251
+ data[d * sD + h * sH + w * sW] += delta;
252
+ }
253
+ }
254
+
255
+ template<typename scalar_t>
256
+ static inline void add_value_bounded(
257
+ scalar_t* data,
258
+ scalar_t x,
259
+ scalar_t y,
260
+ int64_t W,
261
+ int64_t H,
262
+ int64_t sW,
263
+ int64_t sH,
264
+ scalar_t delta,
265
+ GridSamplerPadding padding_mode,
266
+ bool align_corners) {
267
+
268
+ x = compute_coordinates(x, W, padding_mode, align_corners);
269
+ y = compute_coordinates(y, H, padding_mode, align_corners);
270
+
271
+ int64_t ix = static_cast<int64_t>(x);
272
+ int64_t iy = static_cast<int64_t>(y);
273
+
274
+ safe_add_2d(data, iy, ix, sH, sW, H, W, delta);
275
+ }
276
+
277
+ // Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
278
+ template<typename scalar_t>
279
+ static inline void get_cubic_coefficients_grad(
280
+ scalar_t coeffs[4],
281
+ scalar_t t) {
282
+
283
+ // Must be the same as forward calculation in
284
+ // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients
285
+ scalar_t A = -0.75;
286
+
287
+ scalar_t x;
288
+ x = -1 - t; // 1 < x = |-1 - tx| < 2
289
+ coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
290
+ x = -t; // x = |0 - tx| <= 1
291
+ coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
292
+ x = 1 - t; // x = |1 - tx| <= 1
293
+ coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
294
+ x = 2 - t; // 1 < x = |2 - tx| < 2
295
+ coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
296
+ }
297
+
298
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/GridSamplerUtils.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // See NOTE: [Tensor vs. TensorBase]
4
+ // https://github.com/pytorch/pytorch/pull/66979
5
+ #include <ATen/core/TensorBase.h>
6
+ #include <ATen/native/TensorProperties.h>
7
+ #include <ATen/native/CanUse32BitIndexMath.h>
8
+
9
+ namespace at::native {
10
+
11
+ namespace detail {
12
+
13
+ enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic};
14
+ enum class GridSamplerPadding {Zeros, Border, Reflection};
15
+
16
+ } // namespace detail
17
+
18
+ using detail::GridSamplerInterpolation;
19
+ using detail::GridSamplerPadding;
20
+
21
+ namespace {
22
+
23
+ // See NOTE [ grid_sampler Native Functions ].
24
+ void check_grid_sampler_common(
25
+ const TensorBase& input,
26
+ const TensorBase& grid
27
+ ) {
28
+ auto input_opt = input.options();
29
+ auto grid_opt = grid.options();
30
+
31
+ TORCH_CHECK(
32
+ input.defined(),
33
+ "grid_sampler(): expected input to not be undefined");
34
+ TORCH_CHECK(
35
+ grid.defined(),
36
+ "grid_sampler(): expected grid to not be undefined");
37
+ TORCH_CHECK(
38
+ input_opt.device() == grid_opt.device(),
39
+ "grid_sampler(): expected input and grid to be on same device, but input "
40
+ "is on ", input_opt.device(), " and grid is on ", grid_opt.device());
41
+ TORCH_CHECK(
42
+ input_opt.layout() == kStrided && grid_opt.layout() == kStrided,
43
+ "grid_sampler(): expected input and grid to have torch.strided layout, but "
44
+ "input has ", input_opt.layout(), " and grid has ", grid_opt.layout());
45
+ TORCH_CHECK(
46
+ input.size(0) == grid.size(0),
47
+ "grid_sampler(): expected grid and input to have same batch size, but got "
48
+ "input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes());
49
+ TORCH_CHECK(
50
+ grid.size(-1) == input.dim() - 2,
51
+ "grid_sampler(): expected grid to have size ", input.dim() - 2, " in last "
52
+ "dimension, but got grid with sizes ", grid.sizes());
53
+
54
+ for (const auto i : c10::irange(2, input.dim())) {
55
+ TORCH_CHECK(input.size(i) > 0,
56
+ "grid_sampler(): expected input to have non-empty spatial dimensions, "
57
+ "but input has sizes ", input.sizes(), " with dimension ", i, " being "
58
+ "empty");
59
+ }
60
+ }
61
+
62
+ // See NOTE [ grid_sampler Native Functions ].
63
+ void check_grid_sampler_2d(
64
+ const TensorBase& input,
65
+ const TensorBase& grid
66
+ ) {
67
+ TORCH_CHECK(
68
+ input.dim() == 4 && input.dim() == grid.dim(),
69
+ "grid_sampler(): expected 4D input and grid with same number of "
70
+ "dimensions, but got input with sizes ", input.sizes(),
71
+ " and grid with sizes ", grid.sizes());
72
+ }
73
+
74
+ // See NOTE [ grid_sampler Native Functions ].
75
+ void check_grid_sampler_3d(
76
+ const TensorBase& input,
77
+ const TensorBase& grid,
78
+ int64_t interpolation_mode
79
+ ) {
80
+ TORCH_CHECK(
81
+ input.dim() == 5 && input.dim() == grid.dim(),
82
+ "grid_sampler(): expected 5D input and grid with same number of "
83
+ "dimensions, but got input with sizes ", input.sizes(),
84
+ " and grid with sizes ", grid.sizes());
85
+ TORCH_CHECK(
86
+ !(input.dim() == 5 &&
87
+ static_cast<GridSamplerInterpolation>(interpolation_mode) ==
88
+ GridSamplerInterpolation::Bicubic),
89
+ "grid_sampler(): bicubic interpolation only supports 4D input");
90
+ }
91
+
92
+ // See NOTE [ grid_sampler Native Functions ].
93
+ // cudnn does not support inputs larger than 1024.
94
+ bool cond_cudnn_grid_sampler(
95
+ const TensorBase& input,
96
+ const TensorBase& grid
97
+ ) {
98
+ return (
99
+ at::native::cudnn_is_acceptable(input) &&
100
+ at::native::cudnn_is_acceptable(grid) &&
101
+ at::native::canUse32BitIndexMath(input) &&
102
+ at::native::canUse32BitIndexMath(grid) &&
103
+ input.dim() == 4 &&
104
+ input.sym_size(1) <= 1024);
105
+ }
106
+
107
+ } // anonymous namespace
108
+
109
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using histogramdd_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&);
9
+ using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&, bool);
10
+ using histogram_select_outer_bin_edges_fn = void(*)(const Tensor& input, const int64_t N, std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges);
11
+
12
+ DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub);
13
+ DECLARE_DISPATCH(histogramdd_linear_fn, histogramdd_linear_stub);
14
+ DECLARE_DISPATCH(histogram_select_outer_bin_edges_fn, histogram_select_outer_bin_edges_stub);
15
+
16
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/IndexingUtils.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/ExpandUtils.h>
3
+ #include <ATen/native/CanUse32BitIndexMath.h>
4
+ #include <ATen/native/TensorIterator.h>
5
+ #include <ATen/core/IListRef.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at::native {
9
+
10
+ [[noreturn]]
11
+ static void invalid_mask(const Tensor & self, int64_t idx, const Tensor & mask, int64_t maskIdx) {
12
+ TORCH_CHECK_INDEX(false, "The shape of the mask ", mask.sizes(), " at index ", maskIdx,
13
+ " does not match the shape of the indexed tensor ", self.sizes(), " at index ", idx);
14
+ }
15
+
16
+
17
+ static C10_UNUSED std::vector<Tensor> expandTensors(const Tensor & self, IOptTensorListRef indices) {
18
+ // If indices come in as ByteTensor or BoolTensor (masks), expand them into the equivalent indexing by LongTensors
19
+ std::vector<Tensor> result;
20
+ for (const auto& index_opt : indices) {
21
+ if (!index_opt.has_value()) {
22
+ result.emplace_back();
23
+ } else {
24
+ const auto& index = *index_opt;
25
+ if (index.scalar_type() == kByte || index.scalar_type() == kBool) {
26
+ if (index.scalar_type() == kByte) {
27
+ TORCH_WARN("indexing with dtype torch.uint8 is now deprecated," \
28
+ " please use a dtype torch.bool instead.");
29
+ }
30
+ // The sizes of the ByteTensor mask or bool tensor must match the sizes of the
31
+ // corresponding dimensions in self
32
+ for (const auto j : c10::irange(index.dim())) {
33
+ int64_t srcIdx = static_cast<int64_t>(result.size() + j);
34
+ if (index.size(j) != self.size(srcIdx)) {
35
+ invalid_mask(self, srcIdx, index, j);
36
+ }
37
+ }
38
+ // Replace with nonzeros
39
+ auto nonzero = index.nonzero();
40
+ for (const auto j : c10::irange(index.dim())) {
41
+ result.emplace_back(nonzero.select(1, j));
42
+ }
43
+ } else {
44
+ result.emplace_back(index);
45
+ }
46
+ }
47
+ }
48
+ return result;
49
+ }
50
+
51
+ static C10_UNUSED void checkIndexTensorTypes(IOptTensorListRef indices, bool allow_int=false) {
52
+ for (const auto& tensor : indices) {
53
+ if (tensor.has_value() && tensor->defined()) {
54
+ auto scalarType = tensor->scalar_type();
55
+ if (allow_int) {
56
+ if (scalarType != kLong && scalarType != kByte && scalarType != kBool && scalarType != kInt) {
57
+ TORCH_CHECK_INDEX(false, "tensors used as indices must be long, int, byte or bool tensors");
58
+ }
59
+ } else {
60
+ if (scalarType != kLong && scalarType != kByte && scalarType != kBool) {
61
+ TORCH_CHECK_INDEX(false, "tensors used as indices must be long, byte or bool tensors");
62
+ }
63
+ }
64
+ }
65
+ }
66
+ }
67
+
68
+ inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<Tensor> list) {
69
+ torch::List<c10::optional<Tensor>> result;
70
+ result.reserve(list.size());
71
+ for (const Tensor& a : list) {
72
+ result.push_back(a);
73
+ }
74
+ return result;
75
+ }
76
+
77
+ inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<IValue> list) {
78
+ torch::List<c10::optional<Tensor>> result;
79
+ result.reserve(list.size());
80
+ for (const IValue& a : list) {
81
+ result.push_back(a.isTensor() ? c10::optional<Tensor>(a.toTensor()) : c10::optional<Tensor>());
82
+ }
83
+ return result;
84
+ }
85
+
86
+ static C10_UNUSED bool hasContiguousSubspace(TensorList tl) {
87
+ // true if all the non-null tensors are adjacent
88
+ auto isDefined = [](const Tensor & tensor){ return tensor.defined(); };
89
+ auto isNull = [](const Tensor & tensor){ return !tensor.defined(); };
90
+ auto start = std::find_if(tl.begin(), tl.end(), isDefined);
91
+ auto stop = std::find_if(tl.rbegin(), tl.rend(), isDefined);
92
+ auto it = std::find_if(start, stop.base(), isNull);
93
+ return it == stop.base();
94
+ }
95
+
96
+
97
+ // Transposes the tensor and indices together so that all the non-null indices
98
+ // index the first k dimensions of the tensor. Returns the transposed tensor
99
+ // and the reordered indices. For example:
100
+ // transposeToFront(tensor, {nullptr, a, nullptr, b})
101
+ // returns
102
+ // tensor.permute([1, 3, 0, 2]), {a, b, nullptr, nullptr}
103
+ static C10_UNUSED std::tuple<Tensor, std::vector<Tensor>>
104
+ transposeToFront(const Tensor& self, TensorList indices) {
105
+ std::vector<int64_t> dims;
106
+ std::vector<Tensor> transposedIndices;
107
+ dims.reserve(self.dim());
108
+ for (const auto i : c10::irange(self.dim())) {
109
+ if (indices[i].defined()) {
110
+ dims.push_back(i);
111
+ transposedIndices.emplace_back(indices[i]);
112
+ }
113
+ }
114
+ for (const auto i : c10::irange(self.dim())) {
115
+ if (!indices[i].defined()) {
116
+ dims.push_back(i);
117
+ transposedIndices.emplace_back();
118
+ }
119
+ }
120
+ return std::make_tuple(self.permute(dims), std::move(transposedIndices));
121
+ }
122
+
123
+ inline std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
124
+ transposeToFrontAndInvPerm(const Tensor& self, TensorList indices) {
125
+ std::vector<int64_t> dims;
126
+ std::vector<int64_t> invPerm;
127
+ std::vector<Tensor> transposedIndices;
128
+ dims.reserve(self.dim());
129
+ invPerm.resize(self.dim());
130
+ for (const auto i : c10::irange(self.dim())) {
131
+ if (indices[i].defined()) {
132
+ dims.push_back(i);
133
+ transposedIndices.emplace_back(indices[i]);
134
+ }
135
+ }
136
+ for (const auto i : c10::irange(self.dim())) {
137
+ if (!indices[i].defined()) {
138
+ dims.push_back(i);
139
+ transposedIndices.emplace_back();
140
+ }
141
+ }
142
+ for (const auto i : c10::irange(self.dim())) {
143
+ invPerm[dims[i]] = i;
144
+ }
145
+ return std::make_tuple(self.permute(dims), std::move(transposedIndices), std::move(invPerm));
146
+ }
147
+
148
+ struct AdvancedIndex {
149
+ AdvancedIndex(const Tensor& src, TensorList indices);
150
+
151
+ Tensor src;
152
+ std::vector<Tensor> indices;
153
+ DimVector indexed_sizes;
154
+ DimVector indexed_strides;
155
+ int64_t dims_before;
156
+ int64_t dims_after;
157
+ };
158
+
159
+
160
+ } //namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/OpMathType.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <c10/core/Scalar.h>
7
+
8
+ namespace at::native {
9
+
10
+ template <typename scalar_t>
11
+ C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(scalar_t weight) {
12
+ return std::abs(weight) < scalar_t(0.5);
13
+ }
14
+ template <typename scalar_t>
15
+ C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(c10::complex<scalar_t> weight) {
16
+ // Avoid the sqrt in abs(weight)
17
+ return (weight.real() * weight.real() + weight.imag() * weight.imag()) < scalar_t(0.25);
18
+ }
19
+
20
+ template <typename scalar_t, typename weight_t>
21
+ C10_HOST_DEVICE C10_ALWAYS_INLINE scalar_t lerp(scalar_t self_, scalar_t end_, weight_t weight_) {
22
+ using opmath_t = at::opmath_type<scalar_t>;
23
+ using opmath_weight_t = at::opmath_type<weight_t>;
24
+
25
+ opmath_t self = self_;
26
+ opmath_t end = end_;
27
+ opmath_weight_t weight = weight_;
28
+
29
+ // Conditional for better numeric. This has been discussed in
30
+ // https://github.com/pytorch/pytorch/pull/18871
31
+ return is_lerp_weight_small(weight)
32
+ ? self + weight * (end - self)
33
+ : end - (end - self) * (opmath_t(1) - weight);
34
+ }
35
+
36
+ using lerp_fn_scalar = void (*)(
37
+ at::TensorIteratorBase& iter,
38
+ const Scalar& weight);
39
+
40
+ using lerp_fn_tensor = void (*)(
41
+ at::TensorIteratorBase& iter);
42
+
43
+ DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight);
44
+ DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight);
45
+
46
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/LossMulti.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/AccumulateType.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/TensorUtils.h>
6
+
7
+ namespace at::native {
8
+ namespace {
9
+ static C10_UNUSED void multilabel_margin_loss_shape_check(
10
+ int64_t& nframe,
11
+ int64_t& dim,
12
+ const int64_t& ndims,
13
+ const Tensor& input,
14
+ const Tensor& target) {
15
+ TORCH_CHECK(
16
+ (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0,
17
+ "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
18
+ input.sizes());
19
+
20
+ if (ndims <= 1) {
21
+ nframe = 1;
22
+ dim = ndims == 0 ? 1 : input.size(0);
23
+ TORCH_CHECK(
24
+ target.dim() <= 1 && target.numel() == dim,
25
+ "inconsistent target size: ", target.sizes(), " for input of size: ",
26
+ input.sizes());
27
+ } else {
28
+ nframe = input.size(0);
29
+ dim = input.size(1);
30
+ TORCH_CHECK(
31
+ target.dim() == 2 && target.size(0) == nframe &&
32
+ target.size(1) == dim,
33
+ "inconsistent target size: ", target.sizes(), " for input of size: ",
34
+ input.sizes());
35
+ }
36
+ }
37
+
38
+ static C10_UNUSED void multi_margin_loss_shape_check(
39
+ int64_t& nframe,
40
+ int64_t& dim,
41
+ const int64_t& ndims,
42
+ const Tensor& input,
43
+ const Tensor& target,
44
+ const c10::optional<Tensor>& weight) {
45
+ TORCH_CHECK(
46
+ (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0,
47
+ "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
48
+ input.sizes());
49
+
50
+ if (ndims <= 1) {
51
+ nframe = 1;
52
+ dim = ndims == 0 ? 1 : input.size(0);
53
+ } else {
54
+ nframe = input.size(0);
55
+ dim = input.size(1);
56
+ }
57
+
58
+ TORCH_CHECK(
59
+ target.dim() <= 1 && target.numel() == nframe,
60
+ "inconsistent target size, expected ", nframe, " but got ",
61
+ target.sizes());
62
+ if (weight && weight->defined()) {
63
+ TORCH_CHECK(
64
+ weight->dim() <= 1 && weight->numel() == dim,
65
+ "inconsistent weight size, expected ", dim, " but got ",
66
+ weight->sizes());
67
+ }
68
+ }
69
+
70
+
71
+ } // anonymous namespace
72
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Math.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ // views and their in-place version ops
5
+ #define TORCH_VIEW_FNS(m) \
6
+ m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \
7
+ m.impl("detach", torch::CppFunction::makeFallthrough()); \
8
+ m.impl("detach_", torch::CppFunction::makeFallthrough()); \
9
+ m.impl("diagonal", torch::CppFunction::makeFallthrough()); \
10
+ m.impl("expand", torch::CppFunction::makeFallthrough()); \
11
+ m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
12
+ m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \
13
+ m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \
14
+ m.impl("narrow", torch::CppFunction::makeFallthrough()); \
15
+ m.impl("permute", torch::CppFunction::makeFallthrough()); \
16
+ m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \
17
+ m.impl("select.int", torch::CppFunction::makeFallthrough()); \
18
+ m.impl("squeeze", torch::CppFunction::makeFallthrough()); \
19
+ m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \
20
+ m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \
21
+ m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \
22
+ m.impl("transpose_", torch::CppFunction::makeFallthrough()); \
23
+ m.impl("t", torch::CppFunction::makeFallthrough()); \
24
+ m.impl("t_", torch::CppFunction::makeFallthrough()); \
25
+ m.impl("real", torch::CppFunction::makeFallthrough()); \
26
+ m.impl("imag", torch::CppFunction::makeFallthrough()); \
27
+ m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \
28
+ m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \
29
+ m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \
30
+ m.impl("unfold", torch::CppFunction::makeFallthrough()); \
31
+ m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \
32
+ m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \
33
+ m.impl("view_as", torch::CppFunction::makeFallthrough()); \
34
+ m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \
35
+ m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \
36
+ m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \
37
+ m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \
38
+ m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \
39
+ m.impl("swapdims", torch::CppFunction::makeFallthrough()); \
40
+ m.impl("chunk", torch::CppFunction::makeFallthrough()); \
41
+ m.impl("reshape", torch::CppFunction::makeFallthrough()); \
42
+ m.impl("alias", torch::CppFunction::makeFallthrough()); \
43
+ m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \
44
+ m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \
45
+ m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \
46
+ m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \
47
+ m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \
48
+ m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \
49
+ m.impl("conj", torch::CppFunction::makeFallthrough()); \
50
+ m.impl("_conj", torch::CppFunction::makeFallthrough()); \
51
+ m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \
52
+ m.impl("resize_", torch::CppFunction::makeFallthrough());
53
+
54
+ #define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \
55
+ m.impl("empty_like", torch::CppFunction::makeFallthrough()); \
56
+ m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \
57
+ m.impl("empty.out", torch::CppFunction::makeFallthrough()); \
58
+ m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \
59
+ m.impl("full_like", torch::CppFunction::makeFallthrough()); \
60
+ m.impl("stride.int", torch::CppFunction::makeFallthrough()); \
61
+ m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \
62
+ m.impl("size.int", torch::CppFunction::makeFallthrough()); \
63
+ m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \
64
+ m.impl("is_complex", torch::CppFunction::makeFallthrough()); \
65
+ m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \
66
+ m.impl("requires_grad_", torch::CppFunction::makeFallthrough());
67
+ }
68
+
69
+ #define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \
70
+ m.impl("as_strided", torch::CppFunction::makeFallthrough()); \
71
+ m.impl("view", torch::CppFunction::makeFallthrough());
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/core/dispatch/Dispatcher.h>
3
+ #include <ATen/core/op_registration/op_registration.h>
4
+ #include <ATen/native/UnaryOps.h>
5
+ #include <ATen/native/Resize.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/library.h>
8
+
9
+ #ifndef AT_PER_OPERATOR_HEADERS
10
+ #include <ATen/Functions.h>
11
+ #else
12
+ #include <ATen/ops/clone.h>
13
+
14
+ #include <utility>
15
+ #endif
16
+
17
+ namespace at::native {
18
+ // This fallback should only be used for operations that are self inverse and have a corresponding tensor
19
+ // bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit.
20
+ // Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit.
21
+ // Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called.
22
+
23
+ // NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit.
24
+ struct MathOpFallback {
25
+ MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {}
26
+ virtual bool is_bit_set(const Tensor&) = 0;
27
+ void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) {
28
+ /*
29
+ Situations to handle:
30
+ 1. Out-of-place operation. Easy: materialize all inputs and
31
+ call it a day.
32
+ 2. Inplace operation. Desugar x.add_(2) into x.conj_().add_(2).conj_().
33
+ Materialize other inputs as in (1).
34
+ 3. out= operation. Desugar add(x, 2, out=y) into y.copy_(add(x, 2))
35
+ Materialize other inputs as in (1).
36
+
37
+ It is important to be able to tell if we READ from an argument and if we
38
+ WRITE to an argument. Conservative approach is to assume that we always
39
+ READ from an argument, but in out= operations you can skip
40
+ conjugating inputs on entry that never get used. In the current schema we
41
+ can't easily tell if the operation is in in-place or out= operation.
42
+
43
+ Note:
44
+ 1. Mutable tensorlists containing tensors whose math bit set to true are disallowed.
45
+ 2. Mutable tensors with math bit set to true are unconditionally cloned to ensure
46
+ correct behavior in the case when the mutable tensor shares memory with non mutable arguments.
47
+
48
+ If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory
49
+ with these mutable inputs would read into wrong values in the following cases:
50
+ 1. Non mutable inputs have their math bit set to false.
51
+ 2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory
52
+ with one or more mutable arg(s)) are cloned.
53
+ At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs.
54
+ */
55
+ const auto& arguments = op.schema().arguments();
56
+ const auto num_arguments = arguments.size();
57
+ const auto stack_start = stack->size() - num_arguments;
58
+
59
+ c10::optional<bool> is_write;
60
+ for (const auto i : c10::irange(num_arguments)) {
61
+ // Three possible states:
62
+ // 1. alias_info has no value --> out-of-place operation
63
+ // 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation
64
+ // 3. alias_info does have a value, alias_info->is_write=False --> view operation
65
+ const AliasInfo* alias_info = arguments[i].alias_info();
66
+ if (alias_info != nullptr) {
67
+ if (is_write.has_value()) {
68
+ TORCH_CHECK(*is_write == alias_info->isWrite(),
69
+ "Unsupported operator for ", op_name, " fallback: ", op.schema().name(),
70
+ op_name, " fallback doesn't work for operators with a mix "
71
+ "mutable and non-mutable inputs that alias with outputs, "
72
+ "this must be implemented manually. "
73
+ "If you got this error on a core op, please report a bug to PyTorch.");
74
+ } else {
75
+ is_write = alias_info->isWrite();
76
+ }
77
+ }
78
+ }
79
+
80
+ if (is_write.has_value() && !*is_write) {
81
+ // We assume that view operators automatically handle the math bit
82
+ // correctly by propagating the dispatch key in key_set.
83
+ // This is not necessarily always right, so you should test these cases.
84
+ op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
85
+ return;
86
+ }
87
+
88
+ // Mutable inputs with math bit set to True and their clones
89
+ std::vector<std::pair<Tensor, Tensor>> mutable_inputs_with_their_clones;
90
+ for (const auto i : c10::irange(num_arguments)) {
91
+ auto& ivalue = (*stack)[stack_start + i];
92
+ if (!(ivalue.isTensor() || ivalue.isTensorList())) {
93
+ continue;
94
+ }
95
+ const auto& argument = arguments[i];
96
+ bool mut_arg = false;
97
+ if (argument.alias_info()) {
98
+ // Was already tested by is_write loop above
99
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite());
100
+ mut_arg = true;
101
+ }
102
+ if (ivalue.isTensor()) {
103
+ if (!is_bit_set(ivalue.toTensor())) {
104
+ continue;
105
+ }
106
+ auto tensor = std::move(ivalue).toTensor();
107
+ auto resolved_tensor = at::clone(tensor);
108
+ if (mut_arg) {
109
+ TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ",
110
+ op_name, "bit set to true.");
111
+ mutable_inputs_with_their_clones.emplace_back(std::move(tensor), resolved_tensor);
112
+ }
113
+ (*stack)[stack_start + i] = std::move(resolved_tensor);
114
+ } else if (ivalue.isTensorList()) {
115
+ auto tensors = std::move(ivalue).toTensorList();
116
+ for(const auto j : c10::irange(tensors.size())) {
117
+ const auto& tensor = tensors[j];
118
+ if (!is_bit_set(tensor)) {
119
+ continue;
120
+ }
121
+ TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ",
122
+ op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ",
123
+ op.schema().name());
124
+ tensors[j] = at::clone(tensor);
125
+ }
126
+ (*stack)[stack_start + i] = std::move(tensors);
127
+ }
128
+ }
129
+
130
+ op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
131
+
132
+ TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1);
133
+
134
+ for (std::pair<Tensor, Tensor> mut_tensors: mutable_inputs_with_their_clones) {
135
+ auto& mutable_input = mut_tensors.first;
136
+ auto& cloned_mutable_input = mut_tensors.second;
137
+ auto& ivalue = (*stack)[stack_start];
138
+ auto returned_output = std::move(ivalue).toTensor();
139
+
140
+ // sanity check to ensure that the tensor in stack aliases the cloned_mutable_input
141
+ TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output));
142
+
143
+ // necessary for out= arg
144
+ at::native::resize_output(mutable_input, returned_output.sizes());
145
+
146
+ mutable_input.copy_(returned_output);
147
+ (*stack)[stack_start] = std::move(mutable_input);
148
+ }
149
+ }
150
+
151
+ virtual ~MathOpFallback() = default;
152
+
153
+ DispatchKey key;
154
+ string op_name;
155
+ };
156
+
157
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <c10/util/irange.h>
4
+ #include <ATen/core/IListRef.h>
5
+
6
+ namespace at::native {
7
+ // This file contains non-symbolic signatures for ops that we have sym-intified the signature of.
8
+ // However, in certain cases (such as static runtime), we call the native versions of the ops directly.
9
+ // In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation.
10
+ TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape);
11
+ TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length);
12
+ TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt, c10::optional<bool> is_coalesced=c10::nullopt);
13
+ TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
14
+ TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
15
+ // The below ops don't get a duplicated C++ implementation.
16
+ // They are backward ops, which make them very unlikely to be called directly
17
+ // by external code (at::native::trace_backward).
18
+ // They get their own declaration for BC purposes however.
19
+ TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
20
+ TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
21
+ TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim);
22
+ TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes);
23
+ TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index);
24
+ TORCH_API at::Tensor select(const at::Tensor& self, int64_t dim, int64_t index);
25
+ TORCH_API std::vector<Tensor> tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim);
26
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/TensorIterator.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm);
9
+ DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub);
10
+
11
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using padding_fn = void (*)(const Tensor&, const Tensor&, IntArrayRef);
9
+
10
+ // reflection padding
11
+ DECLARE_DISPATCH(padding_fn, reflection_pad1d_kernel);
12
+ DECLARE_DISPATCH(padding_fn, reflection_pad1d_backward_kernel);
13
+ DECLARE_DISPATCH(padding_fn, reflection_pad2d_kernel);
14
+ DECLARE_DISPATCH(padding_fn, reflection_pad2d_backward_kernel);
15
+ DECLARE_DISPATCH(padding_fn, reflection_pad3d_kernel);
16
+ DECLARE_DISPATCH(padding_fn, reflection_pad3d_backward_kernel);
17
+
18
+ // replication padding
19
+ DECLARE_DISPATCH(padding_fn, replication_pad1d_kernel);
20
+ DECLARE_DISPATCH(padding_fn, replication_pad1d_backward_kernel);
21
+ DECLARE_DISPATCH(padding_fn, replication_pad2d_kernel);
22
+ DECLARE_DISPATCH(padding_fn, replication_pad2d_backward_kernel);
23
+ DECLARE_DISPATCH(padding_fn, replication_pad3d_kernel);
24
+ DECLARE_DISPATCH(padding_fn, replication_pad3d_backward_kernel);
25
+
26
+ namespace padding {
27
+
28
+ template <int dim>
29
+ static inline void check_valid_input(const Tensor& input, IntArrayRef padding) {
30
+
31
+ TORCH_CHECK(padding.size() == 2 * dim,
32
+ "padding size is expected to be ", 2 * dim,
33
+ ", but got: ", padding.size());
34
+
35
+ int input_dim = input.dim();
36
+
37
+ bool is_batch_mode = input_dim == (dim + 2);
38
+
39
+ bool valid_batch_mode = is_batch_mode;
40
+ bool valid_non_batch_mode = !is_batch_mode;
41
+
42
+ if (is_batch_mode) {
43
+ // allow batch size of 0-dim.
44
+ for (const auto d : c10::irange(1, input_dim)) {
45
+ valid_batch_mode = valid_batch_mode && input.size(d) != 0;
46
+ }
47
+ } else {
48
+ for (const auto d : c10::irange(0, input_dim)) {
49
+ valid_non_batch_mode = valid_non_batch_mode && input.size(d) != 0;
50
+ }
51
+ }
52
+
53
+ // allow empty batch size but not other dimensions.
54
+ TORCH_CHECK(valid_batch_mode || valid_non_batch_mode,
55
+ "Expected ", dim + 1, "D or ", dim + 2,
56
+ "D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
57
+ input.sizes());
58
+ }
59
+
60
+ } // namespace padding
61
+
62
+ } // at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/PixelShuffle.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <c10/util/Exception.h>
3
+
4
+ namespace at {
5
+ namespace native {
6
+
7
+ inline void check_pixel_shuffle_shapes(const Tensor& self, int64_t upscale_factor) {
8
+ TORCH_CHECK(self.dim() >= 3,
9
+ "pixel_shuffle expects input to have at least 3 dimensions, but got input with ",
10
+ self.dim(), " dimension(s)");
11
+ TORCH_CHECK(upscale_factor > 0,
12
+ "pixel_shuffle expects a positive upscale_factor, but got ",
13
+ upscale_factor);
14
+ int64_t c = self.size(-3);
15
+ int64_t upscale_factor_squared = upscale_factor * upscale_factor;
16
+ TORCH_CHECK(c % upscale_factor_squared == 0,
17
+ "pixel_shuffle expects its input's 'channel' dimension to be divisible by the square of "
18
+ "upscale_factor, but input.size(-3)=", c, " is not divisible by ", upscale_factor_squared);
19
+ }
20
+
21
+ inline void check_pixel_unshuffle_shapes(const Tensor& self, int64_t downscale_factor) {
22
+ TORCH_CHECK(
23
+ self.dim() >= 3,
24
+ "pixel_unshuffle expects input to have at least 3 dimensions, but got input with ",
25
+ self.dim(),
26
+ " dimension(s)");
27
+ TORCH_CHECK(
28
+ downscale_factor > 0,
29
+ "pixel_unshuffle expects a positive downscale_factor, but got ",
30
+ downscale_factor);
31
+ int64_t h = self.size(-2);
32
+ int64_t w = self.size(-1);
33
+ TORCH_CHECK(
34
+ h % downscale_factor == 0,
35
+ "pixel_unshuffle expects height to be divisible by downscale_factor, but input.size(-2)=",
36
+ h,
37
+ " is not divisible by ",
38
+ downscale_factor);
39
+ TORCH_CHECK(
40
+ w % downscale_factor == 0,
41
+ "pixel_unshuffle expects width to be divisible by downscale_factor, but input.size(-1)=",
42
+ w,
43
+ " is not divisible by ",
44
+ downscale_factor);
45
+ }
46
+
47
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/div_rtn.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ #include <utility>
8
+
9
+ #pragma once
10
+
11
+ namespace at::native {
12
+
13
+ using max_pool2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input,
14
+ int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
15
+ using max_pool2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
16
+
17
+ DECLARE_DISPATCH(max_pool2d_fn, max_pool2d_kernel);
18
+ DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel);
19
+
20
+ // averge pooling has same signature for forward and backward
21
+ using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH,
22
+ int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
23
+ using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH,
24
+ int dW, int dH, int padW, int padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
25
+
26
+ DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel);
27
+ DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel);
28
+
29
+ using max_pool3d_fn = void(*)(Tensor& output, Tensor& indices, const Tensor& input,
30
+ int kW, int kH, int kD, int dW, int dH, int dD, int pW, int pH, int pD, int dilationW, int dilationH, int dilationD);
31
+ using max_pool3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
32
+
33
+ DECLARE_DISPATCH(max_pool3d_fn, max_pool3d_kernel);
34
+ DECLARE_DISPATCH(max_pool3d_backward_fn, max_pool3d_backward_kernel);
35
+ namespace {
36
+
37
+ template <typename dest_t, typename src_t>
38
+ static inline dest_t
39
+ safe_downcast(src_t v)
40
+ {
41
+ TORCH_CHECK(std::numeric_limits<dest_t>::min() <= v && v <= std::numeric_limits<dest_t>::max(),
42
+ "integer out of range");
43
+
44
+ return static_cast<dest_t>(v);
45
+ }
46
+
47
+ template<typename T>
48
+ static inline T pooling_output_shape_pad_lr(
49
+ T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation,
50
+ bool ceil_mode) {
51
+ T outputSize = div_rtn<T>(
52
+ inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 +
53
+ (ceil_mode ? stride - 1 : 0), stride) + 1;
54
+ if (ceil_mode) {
55
+ // ensure that the last pooling starts inside the image
56
+ // needed to avoid problems in ceil mode
57
+ if ((outputSize - 1) * stride >= inputSize + pad_l) {
58
+ --outputSize;
59
+ }
60
+ }
61
+ return outputSize;
62
+ }
63
+
64
+ template<typename T>
65
+ static inline T pooling_output_shape(
66
+ T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) {
67
+ TORCH_CHECK(stride != 0, "stride should not be zero");
68
+ TORCH_CHECK(pad >= 0,
69
+ "pad must be non-negative, but got pad: ", pad);
70
+ TORCH_CHECK(pad <= ((kernelSize - 1) * dilation + 1) / 2,
71
+ "pad should be at most half of effective kernel size, but got pad=",
72
+ pad, ", kernel_size=", kernelSize, " and dilation=", dilation)
73
+ return pooling_output_shape_pad_lr(
74
+ inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode);
75
+ }
76
+
77
+ template <typename T>
78
+ std::pair<T, T> _pooling_same_mode_padding_lr(
79
+ T inputSize, T kernelSize, T stride, T dilation) {
80
+ // NOTE: with strides, the output shape is ceil(inputSize/stride)
81
+ auto total_padding = T(dilation) * (kernelSize - 1);
82
+
83
+ // Prefer symmetric padding if possible
84
+ if (stride > 2 && (total_padding % 2 == 1)) {
85
+ // The floor in the output size calculation gives us a little wiggle room
86
+ auto wiggle_room = inputSize % stride - 1;
87
+ if (wiggle_room > 0) {
88
+ total_padding = total_padding - 1;
89
+ }
90
+ }
91
+
92
+ auto left = total_padding / 2;
93
+ return {left, total_padding - left};
94
+ }
95
+
96
+ inline std::pair<int64_t, int64_t> pooling_same_mode_padding_lr(
97
+ int64_t inputSize, int64_t kernelSize, int64_t stride, int64_t dilation) {
98
+ return _pooling_same_mode_padding_lr(inputSize, kernelSize, stride, dilation);
99
+ }
100
+
101
+ inline std::pair<c10::SymInt, c10::SymInt> pooling_same_mode_padding_lr(
102
+ c10::SymInt inputSize, c10::SymInt kernelSize, c10::SymInt stride, c10::SymInt dilation) {
103
+ return _pooling_same_mode_padding_lr(std::move(inputSize), std::move(kernelSize), std::move(stride), std::move(dilation));
104
+ }
105
+
106
+ // AveragePool2d/DilatedMaxPool2d (forward)
107
+ static inline void
108
+ pool2d_shape_check(
109
+ const Tensor& input,
110
+ int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
111
+ int64_t nInputPlane,
112
+ int64_t inputHeight, int64_t inputWidth,
113
+ int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
114
+ {
115
+ const int64_t ndim = input.ndimension();
116
+ const int64_t nOutputPlane = nInputPlane;
117
+
118
+ TORCH_CHECK(kW > 0 && kH > 0,
119
+ "kernel size should be greater than zero, but got ",
120
+ "kH: ", kH, " kW: ", kW);
121
+ TORCH_CHECK(dW > 0 && dH > 0,
122
+ "stride should be greater than zero, but got "
123
+ "dH: ", dH, " dW: ", dW);
124
+ TORCH_CHECK(dilationH > 0 && dilationW > 0,
125
+ "dilation should be greater than zero, but got ",
126
+ "dilationH: ", dilationH, " dilationW: ", dilationW);
127
+
128
+ bool valid_dims = input.size(1) != 0 && input.size(2) != 0;
129
+ if (memory_format == at::MemoryFormat::ChannelsLast){
130
+ // Expect tensor in NHWC format and allow 0-dim only for N.
131
+ TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0),
132
+ "Expected 4D (batch mode) tensor expected for input with channels_last layout"
133
+ " with optional 0 dim batch size for input, but got: ", input.sizes());
134
+ } else {
135
+ TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) ||
136
+ (ndim == 4 && valid_dims && input.size(3) != 0),
137
+ "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got:",
138
+ input.sizes());
139
+ }
140
+
141
+ TORCH_CHECK(kW/2 >= padW && kH/2 >= padH,
142
+ "pad should be smaller than or equal to half of kernel size, but got ",
143
+ "padW = ", padW, ", padH = ", padH, ", kW = ", kW, ", kH = ", kH);
144
+
145
+ TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1,
146
+ "Given input size: (",
147
+ nInputPlane, "x", inputHeight, "x", inputWidth, "). ",
148
+ "Calculated output size: (",
149
+ nOutputPlane, "x", outputHeight, "x", outputWidth, "). ",
150
+ "Output size is too small");
151
+ }
152
+
153
+ // DilatedMaxPool2d (backward)
154
+ static inline void
155
+ max_pool2d_backward_shape_check(
156
+ const Tensor& input,
157
+ const Tensor& gradOutput,
158
+ const Tensor& indices,
159
+ int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
160
+ int64_t nInputPlane,
161
+ int64_t inputHeight, int64_t inputWidth,
162
+ int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
163
+ {
164
+ pool2d_shape_check(
165
+ input,
166
+ kH, kW, dH, dW, padH, padW, dilationH, dilationW,
167
+ nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, memory_format);
168
+
169
+ const int64_t ndim = input.ndimension();
170
+ const int64_t nOutputPlane = nInputPlane;
171
+
172
+ check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
173
+ check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
174
+ check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
175
+
176
+ check_dim_size(indices, ndim, ndim-3, nOutputPlane);
177
+ check_dim_size(indices, ndim, ndim-2, outputHeight);
178
+ check_dim_size(indices, ndim, ndim-1, outputWidth);
179
+ }
180
+
181
+ // AveragePool2d (backward)
182
+ static inline void
183
+ avg_pool2d_backward_shape_check(
184
+ const Tensor& input,
185
+ const Tensor& gradOutput,
186
+ int64_t /*nbatch*/,
187
+ int kH, int kW, int dH, int dW, int padH, int padW,
188
+ int64_t nInputPlane,
189
+ int64_t inputHeight, int64_t inputWidth,
190
+ int64_t outputHeight, int64_t outputWidth,
191
+ MemoryFormat memory_format)
192
+ {
193
+ pool2d_shape_check(
194
+ input,
195
+ kH, kW, dH, dW, padH, padW, 1, 1,
196
+ nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
197
+ memory_format);
198
+
199
+ const int64_t ndim = input.ndimension();
200
+ const int64_t nOutputPlane = nInputPlane;
201
+
202
+ check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
203
+ check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
204
+ check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
205
+ }
206
+
207
+ // AveragePool3d/DilatedMaxPool3d (forward)
208
+ static inline void
209
+ pool3d_shape_check(
210
+ const Tensor& input,
211
+ int64_t nslices,
212
+ int kT, int kH, int kW,
213
+ int dT, int dH, int dW,
214
+ int pT, int pH, int pW,
215
+ int dilationT, int dilationH, int dilationW,
216
+ int64_t itime, int64_t iheight, int64_t iwidth,
217
+ int64_t otime, int64_t oheight, int64_t owidth,
218
+ const char *fn_name,
219
+ bool check_input_size=false)
220
+ {
221
+ const int64_t ndim = input.ndimension();
222
+
223
+ TORCH_CHECK(kT > 0 && kW > 0 && kH > 0,
224
+ "kernel size should be greater than zero, but got ",
225
+ "kT: ", kT, " kH: ", kH, " kW: ", kW);
226
+ TORCH_CHECK(dT > 0 && dW > 0 && dH > 0,
227
+ "stride should be greater than zero, but got ",
228
+ "dT: ", dT, " dH: ", dH, " dW: ", dW);
229
+ TORCH_CHECK(dilationT > 0 && dilationW > 0 && dilationH > 0,
230
+ "dilation should be greater than zero, but got ",
231
+ "dilationT: ", dilationT, " dilationH: ", dilationH, " dilationW: ", dilationW);
232
+
233
+ TORCH_CHECK(ndim == 4 || ndim == 5,
234
+ fn_name, ": Expected 4D or 5D tensor for input, but got: ", input.sizes());
235
+
236
+ for (const auto i : c10::irange(ndim)) {
237
+ if (ndim == 5 && i == 0) {
238
+ // size of batch-dim can be 0.
239
+ continue;
240
+ }
241
+ TORCH_CHECK(
242
+ input.size(i) > 0,
243
+ fn_name,
244
+ ": Expected input's non-batch dimensions to have positive length,"
245
+ " but input has a shape of ",
246
+ input.sizes(),
247
+ " and non-batch dimension ",
248
+ input.size(i),
249
+ " has length zero!")
250
+ }
251
+
252
+ if (check_input_size) { // AveragePool3d
253
+ TORCH_CHECK(itime >= kT && iheight >= kH && iwidth >= kW,
254
+ "input image ", "(T: ", itime, " H: ", iheight, " W: ", iwidth, ") smaller than ",
255
+ "kernel size ", "(kT: ", kT, " kH: ", kH, " kW: ", kW, ")");
256
+ }
257
+
258
+ TORCH_CHECK(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH,
259
+ "pad should be smaller than or equal to half of kernel size, but got "
260
+ "kT: ", kT, " kW: ", kW, " kH: ", kH, " padT: ", pT, " padW: ", pW, " padH: ", pH);
261
+
262
+ TORCH_CHECK(otime >= 1 && owidth >= 1 && oheight >= 1,
263
+ "Given input size: (",
264
+ nslices,"x", itime, "x", iheight, "x", iwidth, "). ",
265
+ "Calculated output size: (",
266
+ nslices, "x", otime, "x", oheight, "x", owidth, "). ",
267
+ "Output size is too small");
268
+ }
269
+
270
+ static inline void
271
+ max_pool3d_backward_shape_check(
272
+ const Tensor& input,
273
+ const Tensor& gradOutput,
274
+ const Tensor& indices,
275
+ int64_t nslices,
276
+ int kT, int kH, int kW,
277
+ int dT, int dH, int dW,
278
+ int pT, int pH, int pW,
279
+ int dilationT, int dilationH, int dilationW,
280
+ int64_t itime, int64_t iheight, int64_t iwidth,
281
+ int64_t otime, int64_t oheight, int64_t owidth,
282
+ const char* fn_name)
283
+ {
284
+ const int64_t ndim = input.ndimension();
285
+
286
+ pool3d_shape_check(
287
+ input,
288
+ nslices,
289
+ kT, kH, kW,
290
+ dT, dH, dW,
291
+ pT, pH, pW,
292
+ dilationT, dilationH, dilationW,
293
+ itime, iheight, iwidth,
294
+ otime, oheight, owidth, fn_name);
295
+
296
+ check_dim_size(gradOutput, ndim, ndim-4, nslices);
297
+ check_dim_size(gradOutput, ndim, ndim-3, otime);
298
+ check_dim_size(gradOutput, ndim, ndim-2, oheight);
299
+ check_dim_size(gradOutput, ndim, ndim-1, owidth);
300
+
301
+ check_dim_size(indices, ndim, ndim-4, nslices);
302
+ check_dim_size(indices, ndim, ndim-3, otime);
303
+ check_dim_size(indices, ndim, ndim-2, oheight);
304
+ check_dim_size(indices, ndim, ndim-1, owidth);
305
+ }
306
+
307
+ static inline void
308
+ avg_pool3d_backward_shape_check(
309
+ const Tensor& input,
310
+ const Tensor& gradOutput,
311
+ int64_t nslices,
312
+ int kT, int kH, int kW,
313
+ int dT, int dH, int dW,
314
+ int pT, int pH, int pW,
315
+ int64_t itime, int64_t iheight, int64_t iwidth,
316
+ int64_t otime, int64_t oheight, int64_t owidth,
317
+ const char *fn_name)
318
+ {
319
+ const int64_t ndim = input.ndimension();
320
+
321
+ pool3d_shape_check(
322
+ input,
323
+ nslices,
324
+ kT, kH, kW,
325
+ dT, dH, dW,
326
+ pT, pH, pW,
327
+ 1, 1, 1,
328
+ itime, iheight, iwidth,
329
+ otime, oheight, owidth,
330
+ fn_name, true);
331
+
332
+ check_dim_size(gradOutput, ndim, ndim-4, nslices);
333
+ check_dim_size(gradOutput, ndim, ndim-3, otime);
334
+ check_dim_size(gradOutput, ndim, ndim-2, oheight);
335
+ check_dim_size(gradOutput, ndim, ndim-1, owidth);
336
+ }
337
+
338
+ } // anonymous namespace
339
+
340
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using lstm_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool, bool);
9
+ using rnn_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool, bool);
10
+ using lstm_packed_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool);
11
+ using rnn_packed_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool);
12
+
13
+ DECLARE_DISPATCH(lstm_fn, lstm_cudnn_stub);
14
+ DECLARE_DISPATCH(lstm_fn, lstm_miopen_stub);
15
+ DECLARE_DISPATCH(lstm_fn, lstm_mkldnn_stub);
16
+ DECLARE_DISPATCH(rnn_fn, gru_cudnn_stub);
17
+ DECLARE_DISPATCH(rnn_fn, gru_miopen_stub);
18
+ DECLARE_DISPATCH(rnn_fn, rnn_tanh_cudnn_stub);
19
+ DECLARE_DISPATCH(rnn_fn, rnn_tanh_miopen_stub);
20
+ DECLARE_DISPATCH(rnn_fn, rnn_relu_cudnn_stub);
21
+ DECLARE_DISPATCH(rnn_fn, rnn_relu_miopen_stub);
22
+ DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_cudnn_stub);
23
+ DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_miopen_stub);
24
+ DECLARE_DISPATCH(rnn_packed_fn, gru_packed_cudnn_stub);
25
+ DECLARE_DISPATCH(rnn_packed_fn, gru_packed_miopen_stub);
26
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_cudnn_stub);
27
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_miopen_stub);
28
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_cudnn_stub);
29
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_miopen_stub);
30
+
31
+ inline void check_attributes(const Tensor& input, const TensorList& params, const TensorList& hiddens, bool check_dtype=false) {
32
+ auto input_device = input.device();
33
+ auto input_dtype = input.scalar_type();
34
+
35
+ auto check_tensors = [&](const std::string& name, const Tensor& t) {
36
+ if (!t.defined()) return;
37
+ auto t_device = t.device();
38
+ TORCH_CHECK(input_device == t_device,
39
+ "Input and ", name, " tensors are not at the same device, found input tensor at ",
40
+ input_device, " and ", name, " tensor at ", t_device);
41
+ if (check_dtype) {
42
+ auto t_dtype = t.scalar_type();
43
+ TORCH_CHECK(input_dtype == t_dtype,
44
+ "Input and ", name, " tensors are not the same dtype, found input tensor with ",
45
+ input_dtype, " and ", name, " tensor with ", t_dtype);
46
+ }
47
+ };
48
+
49
+ for (const auto& h : hiddens) check_tensors("hidden", h);
50
+ for (const auto& p : params) check_tensors("parameter", p);
51
+ }
52
+
53
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/RangeFactories.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/native/DispatchStub.h>
2
+ #include <c10/core/Scalar.h>
3
+
4
+ namespace at {
5
+ struct TensorIterator;
6
+
7
+ namespace native {
8
+
9
+ DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, const Scalar&), arange_stub);
10
+ DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, int64_t), linspace_stub);
11
+
12
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ using reduce_all_fn = void (*)(Tensor & result, const Tensor & self);
12
+ using reduce_min_max_fn = void (*)(Tensor & max_result, Tensor & min_result, const Tensor & self);
13
+ DECLARE_DISPATCH(reduce_all_fn, min_all_stub);
14
+ DECLARE_DISPATCH(reduce_all_fn, max_all_stub);
15
+
16
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Optional.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ struct TensorIterator;
13
+ class Tensor;
14
+ }
15
+
16
+ namespace at::native {
17
+
18
+ using reduce_fn = void(*)(TensorIterator &);
19
+
20
+ DECLARE_DISPATCH(reduce_fn, sum_stub);
21
+ DECLARE_DISPATCH(reduce_fn, nansum_stub);
22
+ DECLARE_DISPATCH(reduce_fn, prod_stub);
23
+ DECLARE_DISPATCH(reduce_fn, mean_stub);
24
+ DECLARE_DISPATCH(reduce_fn, and_stub);
25
+ DECLARE_DISPATCH(reduce_fn, or_stub);
26
+ DECLARE_DISPATCH(reduce_fn, min_values_stub);
27
+ DECLARE_DISPATCH(reduce_fn, max_values_stub);
28
+ DECLARE_DISPATCH(reduce_fn, argmax_stub);
29
+ DECLARE_DISPATCH(reduce_fn, argmin_stub);
30
+
31
+ using reduce_std_var_function =
32
+ void (*)(TensorIterator&, double correction, bool take_sqrt);
33
+ DECLARE_DISPATCH(reduce_std_var_function, std_var_stub);
34
+
35
+ using reduce_norm_fn =
36
+ void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional<int64_t>);
37
+ DECLARE_DISPATCH(reduce_norm_fn, norm_kernel);
38
+
39
+ using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&);
40
+ DECLARE_DISPATCH(reduce_fn_flag, norm_stub);
41
+
42
+ using structured_cum_fn = void (*)(const Tensor&, const Tensor&, int64_t);
43
+ using cum_fn = void (*)(Tensor&, const Tensor&, int64_t);
44
+ DECLARE_DISPATCH(structured_cum_fn, cumsum_stub);
45
+ DECLARE_DISPATCH(structured_cum_fn, cumprod_stub);
46
+ DECLARE_DISPATCH(cum_fn, logcumsumexp_stub);
47
+
48
+ DECLARE_DISPATCH(void (*)(const Tensor&, int64_t, bool, Tensor&, Tensor&), aminmax_stub);
49
+ DECLARE_DISPATCH(void (*)(const Tensor&, Tensor&, Tensor&), aminmax_allreduce_stub);
50
+
51
+ // Used in cuda/Normalization.cu
52
+ TORCH_API std::tuple<Tensor&,Tensor&> var_mean_out(
53
+ Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim,
54
+ int64_t correction, bool keepdim);
55
+
56
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Scalar.h>
4
+
5
+ namespace at::native {
6
+
7
+ enum class ReductionType {MAX, MEAN, MIN, SUM, PROD};
8
+
9
+ static inline ReductionType get_reduction_enum(const c10::string_view& reduce) {
10
+ if (reduce == "max" || reduce == "amax") {
11
+ return ReductionType::MAX;
12
+ } else if (reduce == "mean") {
13
+ return ReductionType::MEAN;
14
+ } else if (reduce == "min" || reduce == "amin") {
15
+ return ReductionType::MIN;
16
+ } else if (reduce == "sum") {
17
+ return ReductionType::SUM;
18
+ } else if (reduce == "prod") {
19
+ return ReductionType::PROD;
20
+ } else {
21
+ TORCH_CHECK(false, "reduce argument must be either sum, prod, mean, amax or amin, got ", reduce);
22
+ }
23
+ }
24
+
25
+ // used for `scatter_reduce`, old options for BC.
26
+ static inline ReductionType get_operator_enum(const c10::string_view reduce, bool use_new_options) {
27
+ if (use_new_options) {
28
+ return get_reduction_enum(reduce);
29
+ } else {
30
+ if (reduce == "add") {
31
+ return ReductionType::SUM;
32
+ } else if (reduce == "multiply") {
33
+ return ReductionType::PROD;
34
+ } else {
35
+ TORCH_CHECK(false, "reduce argument must be either add or multiply.")
36
+ }
37
+ }
38
+ }
39
+
40
+ } // at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/TensorFactories.h>
5
+ #include <ATen/NamedTensorUtils.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/NativeFunctions.h>
10
+ #else
11
+ #include <ATen/ops/empty.h>
12
+ #endif
13
+
14
+ namespace at::native {
15
+
16
+ template <typename T>
17
+ inline T storage_size_for(ArrayRef<T> size, ArrayRef<T> stride) {
18
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(),
19
+ "storage_size_for(size, stride) requires that size and stride ",
20
+ "have the same size as a precondition.");
21
+ T storage_size = 1;
22
+ for (const auto dim : c10::irange(size.size())) {
23
+ if (size[dim] == 0) {
24
+ storage_size = 0;
25
+ break;
26
+ }
27
+ storage_size += (size[dim] - 1) * stride[dim];
28
+ }
29
+ return storage_size;
30
+ }
31
+
32
+ inline const Tensor& resize_named_tensor_(
33
+ const Tensor& self,
34
+ IntArrayRef size,
35
+ c10::optional<MemoryFormat> optional_memory_format) {
36
+ TORCH_INTERNAL_ASSERT(self.has_names());
37
+ TORCH_CHECK(
38
+ self.sizes() == size,
39
+ "Cannot resize named tensor with resize_ or resize_as_ (tried to resize "
40
+ "Tensor",
41
+ self.names(),
42
+ " with size ",
43
+ self.sizes(),
44
+ " to ",
45
+ size,
46
+ "). This may be caused by passing a named tensor ",
47
+ "as an `out=` argument; please ensure that the sizes are the same. ");
48
+ TORCH_CHECK(
49
+ !optional_memory_format.has_value(),
50
+ "Unsupported memory format for named tensor resize ",
51
+ optional_memory_format.value());
52
+ return self;
53
+ }
54
+
55
+ // For deterministic output, fill new elements that were added after a storage
56
+ // resize with NaN or MAX_INT. `old_storage_nbytes` is the size of the storage
57
+ // before the resize happened.
58
+ inline const Tensor& fill_resize_deterministic_(const Tensor& tensor, int64_t old_storage_nbytes) {
59
+ const at::Storage& storage = tensor.unsafeGetTensorImpl()->unsafe_storage();
60
+ int64_t new_storage_nbytes = storage.nbytes();
61
+ int64_t old_storage_numel = old_storage_nbytes / tensor.itemsize();
62
+ int64_t new_storage_numel = new_storage_nbytes / tensor.itemsize();
63
+ if (new_storage_numel > old_storage_numel) {
64
+ at::Tensor tensor_view = at::empty({}, at::TensorOptions().dtype(tensor.scalar_type()).device(tensor.device()));
65
+ tensor_view.set_(
66
+ storage,
67
+ /*storage_offset=*/old_storage_numel,
68
+ /*size=*/{new_storage_numel - old_storage_numel},
69
+ /*stride=*/{1});
70
+ at::native::fill_empty_deterministic_(tensor_view);
71
+ }
72
+ return tensor;
73
+ }
74
+
75
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+ #include <ATen/core/Tensor.h>
5
+ #include <ATen/native/ReduceOpsUtils.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at::native {
9
+
10
+ namespace {
11
+
12
+ // checks whether index.dtype == int64
13
+ // and self.dtype == src.dtype if src is a Tensor
14
+ static void scatter_gather_dtype_check(
15
+ const std::string& method_name,
16
+ const Tensor& self,
17
+ const Tensor& index,
18
+ const c10::optional<Tensor>& src_opt = c10::nullopt
19
+ ) {
20
+ if (index.numel() != 0) {
21
+ TORCH_CHECK(
22
+ index.scalar_type() == at::ScalarType::Long,
23
+ method_name, "(): Expected dtype int64 for index"
24
+ );
25
+ }
26
+
27
+ if (src_opt.has_value()) {
28
+ const auto& src = src_opt.value();
29
+ TORCH_CHECK(
30
+ self.scalar_type() == src.scalar_type(),
31
+ method_name, "(): Expected self.dtype to be equal to src.dtype"
32
+ );
33
+ }
34
+ }
35
+
36
+ // Used for `gather`-like methods
37
+ // Note: self means the input tensor here
38
+ // Test:
39
+ // 1. index.size(d) <= self.size(d) for all d != dim
40
+ // 2. index.dim() == self.dim()
41
+ static C10_UNUSED void gather_shape_check(const Tensor& self, int64_t dim,
42
+ const Tensor& index
43
+ ) {
44
+ auto self_dims = ensure_nonempty_dim(self.dim());
45
+ TORCH_CHECK(self_dims == ensure_nonempty_dim(index.dim()),
46
+ "Index tensor must have the same number of dimensions as input tensor"
47
+ );
48
+
49
+ for (const auto i : c10::irange(self_dims)) {
50
+ if (i != dim) {
51
+ TORCH_CHECK(
52
+ ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
53
+ "Size does not match at dimension ", i,
54
+ " expected index ", index.sizes(),
55
+ " to be smaller than self ", self.sizes(),
56
+ " apart from dimension ", dim
57
+ );
58
+ }
59
+ }
60
+ }
61
+
62
+ // Used for `scatter` and `scatter_add`
63
+ // Tests:
64
+ // 1. index.size(d) <= self.size(d) for all d != dim
65
+ // 2. index.size(d) <= src.size(d) for all d if src is a Tensor
66
+ // 3. index.dim() == self.dim() == src.dim()
67
+ static C10_UNUSED void scatter_shape_check(
68
+ const Tensor& self, int64_t dim, const Tensor& index,
69
+ const c10::optional<Tensor>& src_opt = c10::nullopt
70
+ ) {
71
+ if (index.numel() == 0) return;
72
+ TORCH_CHECK(
73
+ ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
74
+ "Index tensor must have the same number of dimensions as self tensor"
75
+ );
76
+
77
+ bool is_wrong_shape = false;
78
+ int64_t self_dims = ensure_nonempty_dim(self.dim());
79
+
80
+ // Check: index.size(d) <= self.size(d) for all d != dim
81
+ for (const auto d : c10::irange(self_dims)) {
82
+ int64_t index_d_size = ensure_nonempty_size(index, d);
83
+ if (d == dim) continue;
84
+ if (index_d_size > ensure_nonempty_size(self, d)) {
85
+ is_wrong_shape = true;
86
+ break;
87
+ }
88
+ }
89
+
90
+ // Check: index.size(d) <= src.size(d) for all d if src is Tensor
91
+ if (!is_wrong_shape && src_opt.has_value()) {
92
+ const auto& src = src_opt.value();
93
+ for (const auto d : c10::irange(self_dims)) {
94
+ int64_t index_d_size = ensure_nonempty_size(index, d);
95
+ if (index_d_size > ensure_nonempty_size(src, d)) {
96
+ is_wrong_shape = true;
97
+ break;
98
+ }
99
+ }
100
+ }
101
+
102
+ if (src_opt.has_value()) {
103
+ const auto& src = src_opt.value();
104
+
105
+ TORCH_CHECK(
106
+ ensure_nonempty_dim(src.dim()) == ensure_nonempty_dim(index.dim()),
107
+ "Index tensor must have the same number of dimensions as src tensor"
108
+ );
109
+
110
+ TORCH_CHECK(!is_wrong_shape,
111
+ "Expected index ", index.sizes(),
112
+ " to be smaller than self ", self.sizes(),
113
+ " apart from dimension ", dim,
114
+ " and to be smaller size than src ", src.sizes()
115
+ );
116
+ }
117
+ else {
118
+ TORCH_CHECK(!is_wrong_shape,
119
+ "Expected index ", index.sizes(),
120
+ " to be smaller than self ", self.sizes(),
121
+ " apart from dimension ", dim
122
+ );
123
+ }
124
+ }
125
+
126
+ } // anonymous namespace
127
+
128
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SegmentReduce.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/native/ReductionType.h>
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/util/Optional.h>
7
+
8
+ namespace at {
9
+ class Tensor;
10
+
11
+ namespace native {
12
+
13
+ using segment_reduce_lengths_fn = Tensor (*)(
14
+ ReductionType,
15
+ const Tensor&,
16
+ const Tensor&,
17
+ int64_t,
18
+ const c10::optional<Scalar>&);
19
+ DECLARE_DISPATCH(segment_reduce_lengths_fn, _segment_reduce_lengths_stub);
20
+
21
+ using segment_reduce_offsets_fn = Tensor (*)(
22
+ ReductionType,
23
+ const Tensor&,
24
+ const Tensor&,
25
+ int64_t,
26
+ const c10::optional<Scalar>&);
27
+ DECLARE_DISPATCH(segment_reduce_offsets_fn, _segment_reduce_offsets_stub);
28
+
29
+ using segment_reduce_lengths_backward_fn = Tensor (*)(
30
+ const Tensor&,
31
+ const Tensor&,
32
+ const Tensor&,
33
+ ReductionType,
34
+ const Tensor&,
35
+ int64_t,
36
+ const c10::optional<Scalar>&);
37
+ DECLARE_DISPATCH(segment_reduce_lengths_backward_fn, _segment_reduce_lengths_backward_stub);
38
+
39
+ using segment_reduce_offsets_backward_fn = Tensor (*)(
40
+ const Tensor&,
41
+ const Tensor&,
42
+ const Tensor&,
43
+ ReductionType,
44
+ const Tensor&,
45
+ int64_t,
46
+ const c10::optional<Scalar>&);
47
+ DECLARE_DISPATCH(segment_reduce_offsets_backward_fn, _segment_reduce_offsets_backward_stub);
48
+
49
+ } // namespace native
50
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SharedReduceOps.h ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // Please note that this file is
3
+ // used across both CPU and GPU.
4
+
5
+ #include <type_traits>
6
+ #include <complex>
7
+ #include <c10/macros/Macros.h>
8
+ #include <ATen/detail/FunctionTraits.h>
9
+ #include <ATen/NumericUtils.h>
10
+ #if defined(__CUDACC__)
11
+ #include <ATen/cuda/DeviceUtils.cuh>
12
+ #include <ATen/native/cuda/DeviceSqrt.cuh>
13
+ #elif defined(__HIPCC__)
14
+ #include <ATen/hip/DeviceUtils.cuh>
15
+ #include <ATen/native/hip/DeviceSqrt.cuh>
16
+ #endif
17
+ #if defined(__CUDACC__) || defined(__HIPCC__)
18
+ #include <thrust/pair.h>
19
+ #else
20
+ #include <cmath>
21
+ #define device_sqrt std::sqrt
22
+ #endif
23
+ #if defined(__CUDACC__) || defined(__HIPCC__)
24
+ template <typename scalar_t>
25
+ inline C10_DEVICE scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
26
+ #if defined(__HIPCC__)
27
+ // TODO: remove this special case for HIP when issue is fixed:
28
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
29
+ scalar_t max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b));
30
+ #else
31
+ scalar_t max = at::_isnan(b) ? b : std::max(a, b);
32
+ #endif
33
+ return max;
34
+ }
35
+ template <typename scalar_t>
36
+ inline C10_DEVICE scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
37
+ #if defined(__HIPCC__)
38
+ // TODO: remove this special case for HIP when issue is fixed:
39
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
40
+ scalar_t min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b));
41
+ #else
42
+ scalar_t min = at::_isnan(b) ? b : std::min(a, b);
43
+ #endif
44
+ return min;
45
+ }
46
+ #define MAX(X, Y) max_propagate_nan(X,Y)
47
+ #define MIN(X, Y) min_propagate_nan(X,Y)
48
+ #else
49
+ #include <ATen/native/cpu/zmath.h>
50
+ #define MAX(X, Y) max_impl(X,Y)
51
+ #define MIN(X, Y) min_impl(X,Y)
52
+ #endif
53
+
54
+ // ROCM hcc doesn't work well with using std:: in kernel functions
55
+ #if defined(__CUDA_ARCH__)
56
+ #include <c10/cuda/CUDAMathCompat.h>
57
+ #define compat_pow c10::cuda::compat::pow
58
+ #elif defined(__HIPCC__)
59
+ #include <c10/hip/HIPMathCompat.h>
60
+ #define compat_pow c10::hip::compat::pow
61
+ #else
62
+ #define compat_pow std::pow
63
+ #endif
64
+
65
+ namespace at { namespace native {
66
+
67
+ namespace detail {
68
+
69
+ #if defined(__CUDACC__) || defined(__HIPCC__)
70
+ template <typename T1, typename T2> using pair = thrust::pair<T1, T2>;
71
+ #else
72
+ template <typename T1, typename T2> using pair = std::pair<T1, T2>;
73
+ #endif
74
+
75
+ } // namespace detail
76
+
77
+ template <typename scalar_t, typename index_t>
78
+ struct WelfordData {
79
+ scalar_t mean;
80
+ scalar_t m2;
81
+ index_t n;
82
+ scalar_t nf;
83
+
84
+ C10_HOST_DEVICE WelfordData() : mean(0), m2(0), n(0), nf(0) {}
85
+
86
+ C10_HOST_DEVICE WelfordData(
87
+ scalar_t mean,
88
+ scalar_t m2,
89
+ index_t n,
90
+ scalar_t nf)
91
+ : mean(mean), m2(m2), n(n), nf(nf) {}
92
+ };
93
+
94
+
95
+ template <typename scalar_t, typename acc_scalar_t, typename index_t, typename res_t>
96
+ struct WelfordOps {
97
+ acc_scalar_t correction;
98
+ bool take_sqrt;
99
+ public:
100
+ using acc_t = WelfordData<acc_scalar_t, index_t>;
101
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const {
102
+ // We accumulate n in index_t to avoid cumulative rounding error, but still
103
+ // need nf for use in combine where int32 may overflow.
104
+ index_t new_n = acc.n + 1;
105
+ acc_scalar_t new_nf = static_cast<acc_scalar_t>(new_n);
106
+ acc_scalar_t delta = data - acc.mean;
107
+ acc_scalar_t new_mean = acc.mean + delta / new_nf;
108
+ acc_scalar_t new_delta = data - new_mean;
109
+ return {
110
+ new_mean,
111
+ acc.m2 + delta * new_delta,
112
+ new_n,
113
+ new_nf,
114
+ };
115
+ }
116
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
117
+ if (a.nf == 0) {
118
+ return b;
119
+ }
120
+ if (b.nf == 0) {
121
+ return a;
122
+ }
123
+ acc_scalar_t delta = b.mean - a.mean;
124
+ acc_scalar_t new_count = a.nf + b.nf;
125
+ acc_scalar_t nb_over_n = b.nf / new_count;
126
+ return {
127
+ a.mean + delta * nb_over_n,
128
+ a.m2 + b.m2 + delta * delta * a.nf * nb_over_n,
129
+ // setting acc.n as -1 since acc.n might not be able to represent the count
130
+ // correctly within its range, setting it to -1 to avoid confusion
131
+ -1,
132
+ new_count
133
+ };
134
+ }
135
+ inline C10_DEVICE res_t project(acc_t acc) const __ubsan_ignore_float_divide_by_zero__ {
136
+ const auto mean = static_cast<scalar_t>(acc.mean);
137
+ const auto divisor = acc.nf > correction ? acc.nf - correction : 0;
138
+ const auto var = acc.m2 / divisor;
139
+ res_t results(take_sqrt ? device_sqrt(var) : var, mean);
140
+ return results;
141
+ }
142
+
143
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
144
+ return acc;
145
+ }
146
+
147
+ #if defined(__CUDACC__) || defined(__HIPCC__)
148
+ inline __device__ acc_t warp_shfl_down(acc_t acc, int offset) const {
149
+ return {
150
+ WARP_SHFL_DOWN(acc.mean, offset)
151
+ , WARP_SHFL_DOWN(acc.m2, offset)
152
+ , WARP_SHFL_DOWN(acc.n, offset)
153
+ , WARP_SHFL_DOWN(acc.nf, offset)
154
+ };
155
+ }
156
+ #endif
157
+ C10_HOST_DEVICE WelfordOps(acc_scalar_t correction, bool take_sqrt)
158
+ : correction(correction), take_sqrt(take_sqrt) {}
159
+ };
160
+
161
+ template <typename scalar_t, typename acc_t=scalar_t, typename factor_t=acc_t, typename out_t = acc_t>
162
+ struct MeanOps {
163
+ factor_t factor;
164
+
165
+ inline C10_DEVICE acc_t reduce(acc_t a, scalar_t b, int64_t /*idx*/) const {
166
+ return combine(a, static_cast<acc_t>(b));
167
+ }
168
+
169
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
170
+ return a + b;
171
+ }
172
+
173
+ inline C10_DEVICE out_t project(acc_t a) const {
174
+ return a * factor;
175
+ }
176
+
177
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
178
+ return acc;
179
+ }
180
+
181
+ #if defined(__CUDACC__) || defined(__HIPCC__)
182
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const {
183
+ return WARP_SHFL_DOWN(data, offset);
184
+ }
185
+ #endif
186
+
187
+ MeanOps(factor_t factor): factor(factor) {
188
+ }
189
+ };
190
+
191
+ // This accumulator template is used to calculate the minimum absolute value of
192
+ // a set of numbers.
193
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
194
+ // value. These types differ for complex number input support.
195
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
196
+ struct AbsMinOps {
197
+
198
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
199
+ return MIN(acc, static_cast<acc_t>(std::abs(data)));
200
+ }
201
+
202
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
203
+ return MIN(a, b);
204
+ }
205
+
206
+ inline C10_DEVICE out_t project(acc_t a) const {
207
+ return a;
208
+ }
209
+
210
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
211
+ return acc;
212
+ }
213
+
214
+ #if defined(__CUDACC__) || defined(__HIPCC__)
215
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
216
+ return WARP_SHFL_DOWN(acc, offset);
217
+ }
218
+ #endif
219
+ };
220
+
221
+ // This accumulator template is used to calculate the maximum absolute value of
222
+ // a set of numbers.
223
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
224
+ // value. These types differ for complex number input support.
225
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
226
+ struct AbsMaxOps {
227
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
228
+ return MAX(acc, static_cast<acc_t>(std::abs(data)));
229
+ }
230
+
231
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
232
+ return MAX(a, b);
233
+ }
234
+
235
+ inline C10_DEVICE out_t project(acc_t a) const {
236
+ return a;
237
+ }
238
+
239
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
240
+ return acc;
241
+ }
242
+
243
+ #if defined(__CUDACC__) || defined(__HIPCC__)
244
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
245
+ return WARP_SHFL_DOWN(acc, offset);
246
+ }
247
+ #endif
248
+ };
249
+
250
+ // This accumulator template is used to calculate the norm of the absolute value
251
+ // of a set of numbers.
252
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
253
+ // value. These types differ for complex number input support.
254
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
255
+ struct NormOps {
256
+ acc_t norm_;
257
+
258
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
259
+ return acc + compat_pow(static_cast<acc_t>(std::abs(data)), norm_);
260
+ }
261
+
262
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
263
+ return a + b;
264
+ }
265
+
266
+ inline C10_DEVICE out_t project(acc_t a) const {
267
+ return compat_pow(a, static_cast<acc_t>(1.0) / norm_);
268
+ }
269
+
270
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
271
+ return acc;
272
+ }
273
+
274
+ #if defined(__CUDACC__) || defined(__HIPCC__)
275
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
276
+ return WARP_SHFL_DOWN(acc, offset);
277
+ }
278
+ #endif
279
+
280
+ NormOps(acc_t norm_): norm_(norm_) {
281
+ }
282
+ };
283
+
284
+ // This accumulator template is used to calculate the order zero norm of the
285
+ // absolute value of a set of numbers.
286
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
287
+ // value. These types differ for complex number input support.
288
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
289
+ struct NormZeroOps {
290
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
291
+ return acc + (data == static_cast<scalar_t>(0) ? static_cast<acc_t>(0) : static_cast<acc_t>(1));
292
+ }
293
+
294
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
295
+ return a + b;
296
+ }
297
+
298
+ inline C10_DEVICE out_t project(acc_t a) const {
299
+ return a;
300
+ }
301
+
302
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
303
+ return acc;
304
+ }
305
+
306
+
307
+ #if defined(__CUDACC__) || defined(__HIPCC__)
308
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
309
+ return WARP_SHFL_DOWN(acc, offset);
310
+ }
311
+ #endif
312
+ };
313
+
314
+ // This accumulator template is used to calculate the order one norm of the
315
+ // absolute value of a set of numbers.
316
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
317
+ // value. These types differ for complex number input support.
318
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
319
+ struct NormOneOps {
320
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
321
+ return acc + static_cast<acc_t>(std::abs(data));
322
+ }
323
+
324
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
325
+ return a + b;
326
+ }
327
+
328
+ inline C10_DEVICE out_t project(acc_t a) const {
329
+ return a;
330
+ }
331
+
332
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
333
+ return acc;
334
+ }
335
+
336
+ #if defined(__CUDACC__) || defined(__HIPCC__)
337
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
338
+ return WARP_SHFL_DOWN(acc, offset);
339
+ }
340
+ #endif
341
+ };
342
+
343
+
344
+ template<typename acc_t>
345
+ struct AbsSwitch {};
346
+
347
+ template<typename scalar_t, typename acc_t>
348
+ inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch<acc_t>) {
349
+ return static_cast<acc_t>(data);
350
+ }
351
+
352
+ template<typename scalar_t, typename acc_t>
353
+ inline C10_DEVICE acc_t abs_if_complex(std::complex<scalar_t> data, AbsSwitch<acc_t>) {
354
+ return static_cast<acc_t>(std::abs(data));
355
+ }
356
+
357
+ template<typename scalar_t, typename acc_t>
358
+ inline C10_DEVICE acc_t abs_if_complex(c10::complex<scalar_t> data, AbsSwitch<acc_t>) {
359
+ return static_cast<acc_t>(std::abs(data));
360
+ }
361
+
362
+ // This accumulator template is used to calculate the order two norm of the
363
+ // absolute value of a set of numbers.
364
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
365
+ // value. These types differ for complex number input support.
366
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
367
+ struct NormTwoOps {
368
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
369
+ acc_t data_ = abs_if_complex(data, AbsSwitch<acc_t>());
370
+ return acc + data_ * data_;
371
+ }
372
+
373
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
374
+ return a + b;
375
+ }
376
+
377
+ inline C10_DEVICE out_t project(acc_t a) const {
378
+ return device_sqrt(a);
379
+ }
380
+
381
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
382
+ return acc;
383
+ }
384
+
385
+ #if defined(__CUDACC__) || defined(__HIPCC__)
386
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
387
+ return WARP_SHFL_DOWN(acc, offset);
388
+ }
389
+ #endif
390
+ };
391
+
392
+ template <typename acc_t, typename data_t>
393
+ struct NanSumOps {
394
+ inline C10_DEVICE acc_t reduce(acc_t a, data_t b, int64_t /*idx*/) const {
395
+ return a + (at::_isnan(b) ? acc_t{0.} : acc_t{b});
396
+ }
397
+
398
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
399
+ return a + b;
400
+ }
401
+
402
+ inline C10_DEVICE data_t project(acc_t a) const {
403
+ return data_t{a};
404
+ }
405
+
406
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
407
+ return acc;
408
+ }
409
+
410
+ #if defined(__CUDACC__) || defined(__HIPCC__)
411
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const {
412
+ return WARP_SHFL_DOWN(data, offset);
413
+ }
414
+ #endif
415
+ };
416
+
417
+ namespace detail {
418
+
419
+ template <typename scalar_t>
420
+ struct LessOrNan {
421
+ C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const {
422
+ // If (a == b), then choose the one with lower idx, else min(a, b)
423
+ if (at::_isnan(a)) {
424
+ if (at::_isnan(b)) {
425
+ return idx_a < idx_b;
426
+ }
427
+ return true;
428
+ }
429
+ return (a == b) ? idx_a < idx_b : (a < b);
430
+ }
431
+ };
432
+
433
+ template <typename scalar_t>
434
+ struct GreaterOrNan {
435
+ C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const {
436
+ // If (a == b), then choose the one with lower idx, else max(a, b)
437
+ if (at::_isnan(a)) {
438
+ if (at::_isnan(b)) {
439
+ return idx_a < idx_b;
440
+ }
441
+ return true;
442
+ }
443
+ return (a == b) ? idx_a < idx_b : (a > b);
444
+ }
445
+ };
446
+
447
+ template <typename comp_t>
448
+ struct MinMaxReductionOps {
449
+ using scalar_t = typename binary_function_traits<comp_t>::arg1_t;
450
+ using index_t = int64_t;
451
+ using arg_t = detail::pair<scalar_t, index_t>;
452
+
453
+ static C10_DEVICE arg_t project(arg_t arg) {
454
+ return arg;
455
+ }
456
+
457
+ static C10_DEVICE arg_t reduce(arg_t arg, scalar_t val, int64_t idx) {
458
+ return comp_t{}(arg.first, val, arg.second, idx) ? arg : arg_t(val, idx);
459
+ }
460
+
461
+ static C10_DEVICE arg_t combine(arg_t a, arg_t b) {
462
+ return comp_t{}(a.first, b.first, a.second, b.second) ? a : b;
463
+ }
464
+
465
+ static C10_DEVICE arg_t translate_idx(arg_t a, int64_t base_idx) {
466
+ return {a.first, a.second + base_idx};
467
+ }
468
+
469
+ #if defined(__CUDACC__) || defined(__HIPCC__)
470
+ static C10_DEVICE arg_t warp_shfl_down(arg_t arg, int offset) {
471
+ return arg_t(WARP_SHFL_DOWN(arg.first, offset),
472
+ WARP_SHFL_DOWN(arg.second, offset));
473
+ }
474
+ #endif
475
+ };
476
+
477
+ template <typename comp_t>
478
+ struct ArgReductionOps : public MinMaxReductionOps<comp_t> {
479
+ using typename MinMaxReductionOps<comp_t>::scalar_t;
480
+ using typename MinMaxReductionOps<comp_t>::index_t;
481
+ using typename MinMaxReductionOps<comp_t>::arg_t;
482
+
483
+ static C10_DEVICE index_t project(arg_t arg) {
484
+ return arg.second;
485
+ }
486
+ };
487
+
488
+ } // namespace detail
489
+
490
+ template <typename scalar_t>
491
+ struct ArgMaxOps :
492
+ public detail::ArgReductionOps<detail::GreaterOrNan<scalar_t>> {
493
+ };
494
+
495
+ template <typename scalar_t>
496
+ struct ArgMinOps :
497
+ public detail::ArgReductionOps<detail::LessOrNan<scalar_t>> {
498
+ };
499
+
500
+ template <typename scalar_t>
501
+ struct MinOps :
502
+ public detail::MinMaxReductionOps<detail::LessOrNan<scalar_t>> {
503
+ };
504
+
505
+ template <typename scalar_t>
506
+ struct MaxOps :
507
+ public detail::MinMaxReductionOps<detail::GreaterOrNan<scalar_t>> {
508
+ };
509
+
510
+ template <typename scalar_t, typename acc_scalar_t, typename index_t>
511
+ struct MinMaxOps {
512
+ using acc_t = detail::pair<acc_scalar_t, acc_scalar_t>;
513
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const {
514
+ return combine(acc, {data, data});
515
+ }
516
+
517
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
518
+ auto min_val = (at::_isnan(a.first) || a.first < b.first) ? a.first : b.first;
519
+ auto max_val = (at::_isnan(a.second) || a.second > b.second) ? a.second : b.second;
520
+
521
+ return {min_val, max_val};
522
+ }
523
+
524
+ inline C10_DEVICE acc_t project(acc_t acc) const {
525
+ return acc;
526
+ }
527
+
528
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
529
+ return acc;
530
+ }
531
+
532
+ #if defined(__CUDACC__) || defined(__HIPCC__)
533
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
534
+ return {
535
+ WARP_SHFL_DOWN(acc.first, offset), WARP_SHFL_DOWN(acc.second, offset)
536
+ };
537
+ }
538
+ #endif
539
+ };
540
+
541
+ }} // namespace at::native
542
+
543
+ #undef MAX
544
+ #undef MIN
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SobolEngineOpsUtils.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /// This file contains some tensor-agnostic operations to be used in the
2
+ /// core functions of the `SobolEngine`
3
+ #include <ATen/core/Tensor.h>
4
+
5
+ #ifndef AT_PER_OPERATOR_HEADERS
6
+ #include <ATen/Functions.h>
7
+ #else
8
+ #include <ATen/ops/arange.h>
9
+ #include <ATen/ops/mul.h>
10
+ #include <ATen/ops/pow.h>
11
+ #endif
12
+
13
+ namespace at::native::sobol_utils {
14
+
15
+ /// Function to return the minimum of number of bits to represent the integer `n`
16
+ inline int64_t bit_length(const int64_t n) {
17
+ int64_t nbits, nloc;
18
+ for (nloc = n, nbits = 0; nloc > 0; nloc /= 2, nbits++);
19
+ return nbits;
20
+ }
21
+
22
+ /// Function to get the position of the rightmost zero in the bit representation of an integer
23
+ /// This value is the zero-indexed position
24
+ inline int64_t rightmost_zero(const int64_t n) {
25
+ int64_t z, i;
26
+ for (z = n, i = 0; z % 2 == 1; z /= 2, i++);
27
+ return i;
28
+ }
29
+
30
+ /// Function to get a subsequence of bits in the representation of an integer starting from
31
+ /// `pos` and of length `length`
32
+ inline int64_t bitsubseq(const int64_t n, const int64_t pos, const int64_t length) {
33
+ return (n >> pos) & ((1 << length) - 1);
34
+ }
35
+
36
+ /// Function to perform the inner product between a batched square matrix and a power of 2 vector
37
+ inline at::Tensor cdot_pow2(const at::Tensor& bmat) {
38
+ at::Tensor inter = at::arange(bmat.size(-1) - 1, -1, -1, bmat.options());
39
+ inter = at::pow(2, inter).expand_as(bmat);
40
+ return at::mul(inter, bmat).sum(-1);
41
+ }
42
+
43
+ /// All definitions below this point are data. These are constant, and should not be modified
44
+ /// without notice
45
+
46
+ constexpr int64_t MAXDIM = 21201;
47
+ constexpr int64_t MAXDEG = 18;
48
+ constexpr int64_t MAXBIT = 30;
49
+ constexpr int64_t LARGEST_NUMBER = 1 << MAXBIT;
50
+ constexpr float RECIPD = 1.0 / LARGEST_NUMBER;
51
+
52
+ extern const int64_t poly[MAXDIM];
53
+ extern const int64_t initsobolstate[MAXDIM][MAXDEG];
54
+
55
+ } // namespace at::native::sobol_utils
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SortingUtils.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/NumericUtils.h>
4
+ #include <ATen/native/Resize.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/Functions.h>
9
+ #else
10
+ #include <ATen/ops/empty.h>
11
+ #endif
12
+
13
+ namespace at::native {
14
+
15
+ // ensure we get good values and indices for kthvalue, mode
16
+ // this will always be with the reducing dim as 1-d
17
+ inline void _reduction_with_indices_allocate_or_resize_output(
18
+ Tensor& values,
19
+ Tensor& indices,
20
+ const Tensor& self,
21
+ int64_t dim_,
22
+ bool keepdim) {
23
+ int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true);
24
+ auto result_sizes = self.sizes().vec();
25
+ if (!result_sizes.empty()) {
26
+ result_sizes[dim] = 1;
27
+ }
28
+ if (values.defined()) {
29
+ TORCH_CHECK(
30
+ self.options().type_equal(values.options()),
31
+ "output values must be of same type as input");
32
+ if (!keepdim && values.dim() == self.dim() - 1) {
33
+ // unsqueeze to preserve passed in noncontiguous tensor in resize
34
+ values.unsqueeze_(dim);
35
+ }
36
+ resize_output(values, result_sizes);
37
+ } else {
38
+ values = at::empty(result_sizes, self.options());
39
+ }
40
+ if (indices.defined()) {
41
+ TORCH_CHECK(
42
+ indices.dtype() == kLong, "output indices must be of scalar type Long");
43
+ TORCH_CHECK(
44
+ indices.device() == self.device(),
45
+ "output indices must be on same device as input");
46
+ if (!keepdim && indices.dim() == self.dim() - 1) {
47
+ // unsqueeze to preserve passed in noncontiguous tensor in resize
48
+ indices.unsqueeze_(dim);
49
+ }
50
+ resize_output(indices, result_sizes);
51
+ } else {
52
+ indices = at::empty(result_sizes, self.options().dtype(kLong));
53
+ }
54
+ }
55
+
56
+ // ensure we get good values and indices for topk
57
+ inline void _allocate_or_resize_output_with_indices(
58
+ Tensor& values,
59
+ Tensor& indices,
60
+ const Tensor& self,
61
+ int64_t dim_,
62
+ int64_t k) {
63
+ int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true);
64
+ auto result_sizes = self.sizes().vec();
65
+ if (!result_sizes.empty()) {
66
+ result_sizes[dim] = k;
67
+ }
68
+ if (values.defined()) {
69
+ TORCH_CHECK(
70
+ self.options().type_equal(values.options()),
71
+ "output values must be of same type as input");
72
+ values.resize_(result_sizes);
73
+ } else {
74
+ values = at::empty(result_sizes, self.options());
75
+ }
76
+ if (indices.defined()) {
77
+ TORCH_CHECK(
78
+ indices.dtype() == kLong, "output indices must be of scalar type Long");
79
+ TORCH_CHECK(
80
+ indices.device() == self.device(),
81
+ "output indices must be on same device as input");
82
+ indices.resize_(result_sizes);
83
+ } else {
84
+ indices = at::empty(result_sizes, self.options().dtype(kLong));
85
+ }
86
+ }
87
+
88
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/SparseTensorUtils.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <ATen/SparseTensorImpl.h>
5
+ #include <ATen/core/Tensor.h>
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/Functions.h>
9
+ #else
10
+ #include <ATen/ops/empty.h>
11
+ #include <ATen/ops/tensor.h>
12
+ #endif
13
+
14
+ namespace at::sparse {
15
+
16
+ // Just for documentary purposes
17
+ using SparseTensor = Tensor;
18
+ using SparseType = Type;
19
+
20
+ // This is an internal utility function for getting at the SparseTensorImpl,
21
+ // so that we can write sparse tensor specific accessors for special fields
22
+ // in SparseTensor. You should only use this for writing low level
23
+ // setters/getters for SparseTensorImpl fields; otherwise, you should use
24
+ // the low level setters/getters that were implemented using this.
25
+ //
26
+ // This may be called repeatedly, so make sure it's pretty cheap.
27
+ inline SparseTensorImpl* get_sparse_impl(const SparseTensor& self) {
28
+ TORCH_INTERNAL_ASSERT(
29
+ self.is_sparse(), "_internal_get_SparseTensorImpl: not a sparse tensor");
30
+ return static_cast<SparseTensorImpl*>(self.unsafeGetTensorImpl());
31
+ }
32
+
33
+ // Takes indices and values and directly puts them into the sparse tensor, no
34
+ // copy. This used to be called THSTensor_(_move)
35
+ inline void alias_into_sparse(
36
+ const SparseTensor& self,
37
+ const Tensor& indices,
38
+ const Tensor& values) {
39
+ get_sparse_impl(self)->set_indices_and_values_unsafe(indices, values);
40
+ }
41
+
42
+ // Take indices and values and makes a (data) copy of them to put into the
43
+ // sparse indices/values. This used to be called THSTensor_(_set)
44
+ inline void copy_into_sparse(
45
+ const SparseTensor& self,
46
+ const Tensor& indices,
47
+ const Tensor& values,
48
+ bool non_blocking) {
49
+ alias_into_sparse(
50
+ self,
51
+ indices.to(self._indices().options(), non_blocking, /*copy=*/true),
52
+ values.to(self._values().options(), non_blocking, /*copy=*/true));
53
+ }
54
+
55
+ // TODO: put this into the public API
56
+ inline bool is_same_tensor(const Tensor& lhs, const Tensor& rhs) {
57
+ return lhs.unsafeGetTensorImpl() == rhs.unsafeGetTensorImpl();
58
+ }
59
+
60
+ inline bool is_same_density(const SparseTensor& self, const SparseTensor& src) {
61
+ return self.sparse_dim() == src.sparse_dim() &&
62
+ self.dense_dim() == src.dense_dim();
63
+ }
64
+
65
+ // Give us a new values tensor, with the same dimensionality
66
+ // as 'values' but with a new number of non-zero elements.
67
+ // TODO: Expose this for real in ATen, some day?
68
+ // NB: Doesn't preserve data.
69
+ inline Tensor new_values_with_size_of(const Tensor& values, int64_t nnz) {
70
+ std::vector<int64_t> size = values.sizes().vec();
71
+ size[0] = nnz;
72
+ return at::empty(size, values.options());
73
+ }
74
+
75
+ // NOTE [ Flatten Sparse Indices ]
76
+ // This helper function flattens a sparse indices tensor (a Tensor) into a 1D
77
+ // indices tensor. E.g.,
78
+ // input = [[2, 4, 0],
79
+ // [3, 1, 10]]
80
+ // full_size = [2, 12]
81
+ // output = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 10 ] = [27, 49, 10]
82
+ //
83
+ // In other words, assuming that each `indices[i, :]` is a valid index to a
84
+ // tensor `t` of shape `full_size`. This returns the corresponding indices to
85
+ // the flattened tensor `t.reshape( prod(full_size[:indices.size(0)]), -1 )`.
86
+ // if forceClone is true, the result will forced to be a clone of self.
87
+ // if force_clone is true, the result will forced to be a clone of self.
88
+ TORCH_API Tensor flatten_indices(
89
+ const Tensor& indices,
90
+ IntArrayRef full_size,
91
+ bool force_clone = false);
92
+
93
+ // Flatten sparse tensor's indices from nD to 1D, similar to NOTE [ Flatten
94
+ // Sparse Indices ], except this one allows partial flatten: only flatten on
95
+ // specified dims. Note that the flatten indices might be uncoalesced if
96
+ // dims_to_flatten.size() < sparse_dim. Also if input indices is already
97
+ // coalesced, the flattened indices will also be sorted.
98
+ //
99
+ // args:
100
+ // indices: sparse tensor indices
101
+ // sizes: sparse tensor sizes
102
+ // dims_to_flatten: a list of dim index to flatten
103
+ //
104
+ // Ex1:
105
+ // indices = [[2, 4, 0],
106
+ // [3, 1, 3]]
107
+ // sizes = [2, 12]
108
+ // dims_to_flatten = [0, 1]
109
+ // new_indices = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 3 ] = [27, 49, 3]
110
+ //
111
+ // Ex2:
112
+ // dims_to_flatten = [1]
113
+ // new_indices = [ 3, 1, 3 ] # uncoalesced
114
+ TORCH_API Tensor flatten_indices_by_dims(
115
+ const Tensor& indices,
116
+ const IntArrayRef& sizes,
117
+ const IntArrayRef& dims_to_flatten);
118
+
119
+ // Find the CSR representation for a row `indices` from the COO format
120
+ TORCH_API Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz);
121
+
122
+ TORCH_API Tensor zeros_like_with_indices(const Tensor& t);
123
+
124
+ template <size_t static_shape_max_len>
125
+ class TensorGeometryHolder {
126
+ using geometry_holder_t = std::array<int64_t, static_shape_max_len>;
127
+
128
+ public:
129
+ explicit TensorGeometryHolder(
130
+ IntArrayRef sizes,
131
+ IntArrayRef strides,
132
+ TensorOptions options = {}) {
133
+ std::copy(sizes.begin(), sizes.end(), t_sizes.begin());
134
+ std::copy(strides.begin(), strides.end(), t_strides.begin());
135
+ }
136
+
137
+ explicit TensorGeometryHolder(const Tensor& t)
138
+ : TensorGeometryHolder(t.sizes(), t.strides()) {}
139
+
140
+ auto operator*() const {
141
+ return std::make_tuple(t_sizes, t_strides);
142
+ }
143
+
144
+ private:
145
+ geometry_holder_t t_sizes;
146
+ geometry_holder_t t_strides;
147
+ };
148
+
149
+ template <>
150
+ class TensorGeometryHolder<0> {
151
+ using geometry_holder_t = Tensor;
152
+
153
+ public:
154
+ explicit TensorGeometryHolder(
155
+ IntArrayRef sizes,
156
+ IntArrayRef strides,
157
+ TensorOptions options) {
158
+ const int64_t t_ndims = sizes.size();
159
+ const auto cpu_options = TensorOptions(options).dtype(kLong).device(kCPU);
160
+ Tensor t_sizes_and_strides_cpu = at::empty({2, t_ndims}, cpu_options);
161
+ t_sizes_and_strides_cpu.select(0, 0).copy_(at::tensor(sizes, cpu_options));
162
+ t_sizes_and_strides_cpu.select(0, 1).copy_(
163
+ at::tensor(strides, cpu_options));
164
+ const Tensor t_sizes_and_strides =
165
+ t_sizes_and_strides_cpu.to(options.device());
166
+ t_sizes = t_sizes_and_strides.select(0, 0);
167
+ t_strides = t_sizes_and_strides.select(0, 1);
168
+ }
169
+
170
+ explicit TensorGeometryHolder(const Tensor& t)
171
+ : TensorGeometryHolder(t.sizes(), t.strides(), t.options()) {}
172
+
173
+ auto operator*() const {
174
+ return std::make_tuple(
175
+ t_sizes.template data_ptr<int64_t>(),
176
+ t_strides.template data_ptr<int64_t>());
177
+ }
178
+
179
+ private:
180
+ geometry_holder_t t_sizes;
181
+ geometry_holder_t t_strides;
182
+ };
183
+
184
+ // Return all indices of a tensor with the given shape.
185
+ //
186
+ // full_coo_indices(shape) is equivalent to
187
+ // torch.ones(shape).nonzero().transpose(-2, -1) but much faster.
188
+ TORCH_API Tensor full_coo_indices(IntArrayRef sizes, TensorOptions options);
189
+
190
+ } // namespace at::sparse