applied-ai-018 commited on
Commit
89256fc
·
verified ·
1 Parent(s): 8733894

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Activation.h +98 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h +13 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h +97 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ConvUtils.h +446 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Fill.h +21 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h +369 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h +41 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h +46 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebra.h +18 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h +624 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h +11 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h +53 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h +56 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/SharedReduceOps.h +544 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/StridedRandomAccessor.h +301 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h +49 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorFactories.h +140 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorIteratorDynamicCasting.h +53 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorShape.h +58 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/UnfoldBackward.h +112 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ChannelShuffleKernel.h +14 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DepthwiseConvKernel.h +21 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/GridSamplerKernel.h +34 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IndexKernelUtils.h +88 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Intrinsics.h +33 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/LogAddExp.h +61 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Loops.h +394 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/MaxUnpoolKernel.h +14 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/PixelShuffleKernel.h +14 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Reduce.h +313 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SampledAddmmKernel.h +12 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SerialStackImpl.h +144 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/UpSampleKernelAVXAntialias.h +1376 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/WeightNormKernel.h +20 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/avx_mathfun.h +522 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/moments_utils.h +207 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/utils.h +184 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/zmath.h +251 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/BinaryInternal.h +48 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDAJitLoops.cuh +297 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDALoops.cuh +267 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CompositeRandomAccessor.h +35 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTPlanCache.h +532 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DeviceSqrt.cuh +25 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DistributionTemplates.h +671 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ForeachFunctors.cuh +681 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/GridSampler.cuh +321 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/GridSampler.h +32 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/JitLoops.cuh +187 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/KernelUtils.cuh +149 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Activation.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/string_view.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ struct TensorIterator;
13
+ struct TensorIteratorBase;
14
+ class TensorBase;
15
+ }
16
+
17
+ namespace at::native {
18
+
19
+ // These constants control the approximation behavior of gelu function.
20
+ enum class GeluType {
21
+ None, // Baseline Gelu
22
+ Tanh, // Tahn Gelu Approximation
23
+ END
24
+ };
25
+
26
+ static GeluType get_gelutype_enum(const c10::string_view approximate) {
27
+ if (approximate == "none") {
28
+ return GeluType::None;
29
+ } else if (approximate == "tanh") {
30
+ return GeluType::Tanh;
31
+ } else {
32
+ TORCH_CHECK(false, "approximate argument must be either none or tanh.");
33
+ }
34
+ }
35
+
36
+ static std::string gelutype_to_string(const GeluType type) {
37
+ switch(type) {
38
+ case GeluType::None: return "none";
39
+ case GeluType::Tanh: return "tanh";
40
+ default: TORCH_CHECK(false, "unknown GELU type: ", static_cast<int>(type));
41
+ }
42
+ }
43
+
44
+ using structured_activation_fn = void (*)(TensorIteratorBase&);
45
+ using structured_activation_backward_fn = void (*)(TensorIteratorBase&);
46
+
47
+ using activation_fn = void (*)(TensorIterator&);
48
+ using activation_backward_fn = void (*)(TensorIterator&);
49
+ using softplus_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
50
+ using softplus_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
51
+ using threshold_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
52
+ using hardtanh_backward_fn = void (*)(TensorIterator&, const c10::Scalar&, const c10::Scalar&);
53
+ using hardsigmoid_fn = void(*)(TensorIteratorBase&);
54
+ using hardsigmoid_backward_fn = void(*)(TensorIteratorBase&);
55
+ using hardswish_fn = void(*)(TensorIterator&);
56
+ using hardswish_backward_fn = void(*)(TensorIterator&);
57
+ using shrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
58
+ using softshrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
59
+ using shrink_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
60
+ using elu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&);
61
+ using elu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&, bool);
62
+ using leaky_relu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
63
+ using leaky_relu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
64
+ using log_sigmoid_cpu_fn = void (*)(TensorBase&, TensorBase&, const TensorBase&);
65
+ using gelu_fn = void (*)(TensorIteratorBase&, GeluType);
66
+ using gelu_backward_fn = void (*)(TensorIteratorBase&, GeluType);
67
+ using glu_jvp_fn = void (*)(TensorIteratorBase&);
68
+
69
+ DECLARE_DISPATCH(elu_fn, elu_stub);
70
+ DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub);
71
+ DECLARE_DISPATCH(softplus_fn, softplus_stub);
72
+ DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub);
73
+ DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub);
74
+ DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub);
75
+ DECLARE_DISPATCH(threshold_fn, threshold_stub);
76
+ DECLARE_DISPATCH(gelu_fn, GeluKernel);
77
+ DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel);
78
+ DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub);
79
+ DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub);
80
+ DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub);
81
+ DECLARE_DISPATCH(hardswish_fn, hardswish_stub);
82
+ DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub);
83
+ DECLARE_DISPATCH(shrink_fn, hardshrink_stub);
84
+ DECLARE_DISPATCH(softshrink_fn, softshrink_stub);
85
+ DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub);
86
+ DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub);
87
+ DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub);
88
+ DECLARE_DISPATCH(structured_activation_fn, glu_stub);
89
+ DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub);
90
+ DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub);
91
+ DECLARE_DISPATCH(structured_activation_fn, silu_stub);
92
+ DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub);
93
+ DECLARE_DISPATCH(structured_activation_fn, mish_stub);
94
+ DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub);
95
+ DECLARE_DISPATCH(activation_fn, prelu_stub);
96
+ DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub);
97
+
98
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <limits>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
12
+
13
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ComplexHelper.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <c10/util/irange.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/NativeFunctions.h>
8
+ #else
9
+ #include <ATen/ops/view_as_real_native.h>
10
+ #include <ATen/ops/view_as_complex_native.h>
11
+
12
+ #include <utility>
13
+ #endif
14
+
15
+ // WARNING: this header contains non-inline functions and should be only
16
+ // included from ONE cpp file
17
+
18
+ namespace at::native {
19
+
20
+ // View tensor with new dtype, storage offset, sizes and strides
21
+ inline Tensor view_tensor(
22
+ const Tensor &tensor, ScalarType dtype,
23
+ c10::SymInt offset, SymIntArrayRef sizes, SymIntArrayRef strides) {
24
+ Storage storage = tensor.storage();
25
+ auto key_set = tensor.key_set().remove(DispatchKey::Conjugate);
26
+ auto new_tensor = detail::make_tensor<TensorImpl>(
27
+ c10::TensorImpl::VIEW, std::move(storage), key_set, scalarTypeToTypeMeta(dtype));
28
+ auto * impl = new_tensor.unsafeGetTensorImpl();
29
+ impl->set_sizes_and_strides(sizes, strides, offset);
30
+ return new_tensor;
31
+ }
32
+
33
+ inline SymDimVector computeStrideForViewAsReal(SymIntArrayRef oldstride) {
34
+ SymDimVector res(oldstride.size() + 1);
35
+ for (const auto i : c10::irange(oldstride.size())) {
36
+ res[i] = oldstride[i] * 2;
37
+ }
38
+ res.back() = 1;
39
+ return res;
40
+ }
41
+
42
+ inline Tensor _view_as_real_physical(const Tensor& self) {
43
+ TORCH_CHECK(self.is_complex(), "view_as_real is only supported for complex tensors");
44
+ auto old_sizes = self.sym_sizes();
45
+ SymDimVector new_sizes(old_sizes.size() + 1);
46
+ std::copy(old_sizes.begin(), old_sizes.end(), new_sizes.begin());
47
+ // last dimension will always have two elements containing the real and imag vals
48
+ new_sizes.back() = 2;
49
+ auto new_strides = computeStrideForViewAsReal(self.sym_strides());
50
+ auto new_storage_offset = self.sym_storage_offset() * 2;
51
+ const auto float_type = c10::toRealValueType(self.scalar_type());
52
+ auto real_tensor = view_tensor(self, float_type, std::move(new_storage_offset), new_sizes, new_strides);
53
+ return real_tensor;
54
+ }
55
+
56
+ // expects as input a complex tensor and returns back a tensor
57
+ // with corresponding real dtype containing the complex values
58
+ // in the last two dimensions
59
+ Tensor view_as_real(const Tensor& self) {
60
+ TORCH_CHECK(!self.is_conj(), "view_as_real doesn't work on unresolved conjugated tensors. To resolve the conjugate tensor so you can view it as real, use self.resolve_conj(); however, be warned that the resulting tensor will NOT alias the original.");
61
+ return _view_as_real_physical(self);
62
+ }
63
+
64
+ inline SymDimVector computeStrideForViewAsComplex(SymIntArrayRef oldstride) {
65
+ const int64_t dim = oldstride.size();
66
+ TORCH_CHECK(oldstride[dim-1] == 1, "Tensor must have a last dimension with stride 1");
67
+
68
+ SymDimVector res(dim - 1);
69
+ for (const auto i : c10::irange(res.size())) {
70
+ TORCH_CHECK(oldstride[i] % 2 == 0, "Tensor must have a stride divisible by 2 for all but last dimension");
71
+ res[i] = oldstride[i] / 2;
72
+ }
73
+ return res;
74
+ }
75
+
76
+ // expects as input a float or double tensor with last dimension of size 2
77
+ // and returns back a tensor with corresponding complex dtype
78
+ Tensor view_as_complex(const Tensor& self) {
79
+ TORCH_CHECK(
80
+ self.scalar_type() == kFloat || self.scalar_type() == kDouble || self.scalar_type() == kHalf,
81
+ "view_as_complex is only supported for half, float and double tensors, but got a tensor of scalar type: ", self.scalar_type());
82
+
83
+ auto old_sizes = self.sym_sizes();
84
+ TORCH_CHECK(!old_sizes.empty(), "Input tensor must have one or more dimensions");
85
+ TORCH_CHECK(old_sizes[old_sizes.size()-1] == 2, "Tensor must have a last dimension of size 2");
86
+ SymDimVector new_sizes(old_sizes.begin(), old_sizes.end() - 1);
87
+
88
+ const auto new_strides = computeStrideForViewAsComplex(self.sym_strides());
89
+ const auto complex_type = c10::toComplexType(self.scalar_type());
90
+
91
+ TORCH_CHECK(self.sym_storage_offset() % 2 == 0, "Tensor must have a storage_offset divisible by 2");
92
+ const auto new_storage_offset = self.sym_storage_offset() / 2;
93
+
94
+ return view_tensor(self, complex_type, new_storage_offset, new_sizes, new_strides);
95
+ }
96
+
97
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ConvUtils.h ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <ATen/detail/CUDAHooksInterface.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+ #include <c10/util/env.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::native {
10
+
11
+ using conv_depthwise2d_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
12
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
13
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 2>);
14
+ DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub);
15
+ using conv_depthwise3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
16
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
17
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
18
+ DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub);
19
+ using cudnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
20
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
21
+ at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
22
+ DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub);
23
+ using mps_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
24
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
25
+ at::IntArrayRef, int64_t, std::array<bool,3>);
26
+ DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub);
27
+ using cudnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
28
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
29
+ at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
30
+ DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub);
31
+ using miopen_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
32
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
33
+ at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
34
+ DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub);
35
+ using miopen_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
36
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
37
+ at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
38
+ DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub);
39
+ using miopen_depthwise_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
40
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
41
+ at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
42
+ DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub);
43
+ using mkldnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
44
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
45
+ at::IntArrayRef, int64_t, std::array<bool,3>);
46
+ DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub);
47
+ using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const c10::optional<Tensor>&,
48
+ IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t);
49
+ DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub);
50
+ using mkldnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
51
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
52
+ at::IntArrayRef, at::IntArrayRef, int64_t, std::array<bool,3>);
53
+ DECLARE_DISPATCH(mkldnn_convolution_transpose_backward_fn, mkldnn_convolution_transpose_backward_stub);
54
+ using slow_conv_dilated2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
55
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
56
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
57
+ DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub);
58
+ using slow_conv_dilated3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
59
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
60
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
61
+ DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub);
62
+ using slow_conv_transpose2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
63
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
64
+ at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
65
+ DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub);
66
+ using slow_conv_transpose3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
67
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
68
+ at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
69
+ DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub);
70
+
71
+ namespace {
72
+ static bool cudnnv8_heuristic_mode_b = c10::utils::check_env("TORCH_CUDNN_USE_HEURISTIC_MODE_B") == true;
73
+ }
74
+
75
+ static inline bool cudnnv8_enabled_check_debug() {
76
+ static bool cudnnv8_flag = c10::utils::check_env("TORCH_CUDNN_V8_API_DISABLED") != true;
77
+ static bool cudnnv8_debug = c10::utils::check_env("TORCH_CUDNN_V8_API_DEBUG") == true;
78
+ static uint8_t cudnnv8_debugcount = 0;
79
+ if (cudnnv8_debug == 1 && cudnnv8_debugcount < 10) {
80
+ TORCH_WARN("TORCH_CUDNN_V8_DEBUG ON, V8 ON: ", cudnnv8_flag, " TORCH_CUDNN_USE_HEURISTIC_MODE B: ", cudnnv8_heuristic_mode_b);
81
+ cudnnv8_debugcount++;
82
+ }
83
+ return cudnnv8_flag == 1;
84
+ }
85
+
86
+ static inline bool cudnnv8_use_heur_mode_b() {
87
+ return cudnnv8_heuristic_mode_b;
88
+ }
89
+
90
+ // Keep in sync with py::enum_ in Module.cpp
91
+ enum class ConvBackend {
92
+ CudaDepthwise2d,
93
+ CudaDepthwise3d,
94
+ Cudnn,
95
+ CudnnTranspose,
96
+ Empty,
97
+ Miopen,
98
+ MiopenDepthwise,
99
+ MiopenTranspose,
100
+ Mkldnn,
101
+ MkldnnTranspose,
102
+ MkldnnEmpty,
103
+ NnpackSpatial,
104
+ Overrideable,
105
+ Slow2d,
106
+ Slow3d,
107
+ SlowDilated2d,
108
+ SlowDilated3d,
109
+ SlowTranspose2d,
110
+ SlowTranspose3d,
111
+ Winograd3x3Depthwise,
112
+ Xnnpack2d,
113
+ Mps,
114
+ MpsTranspose,
115
+ };
116
+
117
+ // Overload for selecting the convolution backend from the full set of convolution inputs.
118
+ // This overload is exposed to python for testing, etc.
119
+ TORCH_API ConvBackend select_conv_backend(
120
+ const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt,
121
+ SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation,
122
+ bool transposed, SymIntArrayRef output_padding, c10::SymInt groups, const at::OptionalSymIntArrayRef bias_sizes_opt);
123
+
124
+ TORCH_API at::MemoryFormat _determine_backend_memory_format(const Tensor& input,
125
+ const Tensor& weight,
126
+ const ConvBackend backend);
127
+
128
+ // ---------------------------------------------------------------------
129
+ //
130
+ // Math
131
+ //
132
+ // ---------------------------------------------------------------------
133
+
134
+ constexpr int input_batch_size_dim = 0; // also grad_input
135
+ constexpr int input_channels_dim = 1;
136
+ constexpr int output_batch_size_dim = 0; // also grad_output
137
+ constexpr int output_channels_dim = 1;
138
+ constexpr int weight_output_channels_dim = 0;
139
+ constexpr int weight_input_channels_dim = 1;
140
+
141
+ // Often written as 2 + max_dim (extra dims for batch size and channels)
142
+ constexpr int max_dim = 3;
143
+
144
+ // ---------------------------------------------------------------------
145
+ //
146
+ // Checking
147
+ //
148
+ // ---------------------------------------------------------------------
149
+
150
+ // Used on pad, stride and dilation
151
+ static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name)
152
+ {
153
+ TORCH_CHECK(args.size() <= expected_size,
154
+ "Too many ", arg_name, " values (", args.size(), ") supplied, expecting ",
155
+ expected_size, " (while checking arguments for ", c, ")");
156
+ TORCH_CHECK(args.size() >= expected_size,
157
+ "Not enough ", arg_name, " values (", args.size(), ") supplied, expecting ",
158
+ expected_size, " (while checking arguments for ", c, ")");
159
+
160
+ auto num_negative_values = std::count_if(args.begin(), args.end(), [](int x){return x < 0;});
161
+ if (num_negative_values > 0){
162
+ std::stringstream ss;
163
+ ss << arg_name << " should be greater than zero but got (";
164
+ std::copy(args.begin(), args.end() - 1, std::ostream_iterator<int>(ss,", "));
165
+ ss << args.back() << ")" << " (while checking arguments for " << c << ")";
166
+ AT_ERROR(ss.str());
167
+ }
168
+ }
169
+
170
+
171
+ // NOTE [ Convolution checks ]
172
+ //
173
+ // NB: For many call sites, it is not strictly necessary to check all of
174
+ // these relationships (for example, for forward convolution, we compute
175
+ // the size of output ourselves, so we don't actually need to check
176
+ // output. However, writing a single function that does everything
177
+ // means we get to reuse it for both forwards and all backwards
178
+ // variants, even when the set of "real" inputs varies. The magic of
179
+ // relational computing!
180
+ //
181
+ // (There is one downside, which is that it is slightly harder to write
182
+ // error messages which are able to distinguish between real inputs
183
+ // (which the user can change) and computed inputs (which the user can
184
+ // only indirectly affect). It would be an interesting exercise to
185
+ // come up with a general framework to handle such situations.)
186
+ static void convolution_shape_check(
187
+ CheckedFrom c,
188
+ const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output,
189
+ IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
190
+ {
191
+ check_args(c, padding, input->dim() - 2, "padding");
192
+ check_args(c, stride, padding.size(), "stride");
193
+ check_args(c, dilation, padding.size(), "dilation");
194
+
195
+ // Input
196
+ checkDimRange(c, input, 3, 6 /* exclusive */);
197
+ checkSize_symint(c, input, input_channels_dim, weight->size(1) * groups);
198
+
199
+ // Weight
200
+ checkSameDim(c, input, weight);
201
+
202
+ // TODO: check that output->size() matches output_sizes
203
+ // TODO: check that weight matches output->sizes()
204
+ checkSameDim(c, input, output);
205
+ }
206
+
207
+ // NB: conv_output_size and conv_input_size are not bijections,
208
+ // as conv_output_size loses information; this is why conv_input_size
209
+ // takes an extra output_padding argument to resolve the ambiguity.
210
+
211
+ template <typename T>
212
+ static inline std::vector<T> _conv_output_size(
213
+ ArrayRef<T> input_size, ArrayRef<T> weight_size,
214
+ ArrayRef<T> padding, ArrayRef<T> stride, ArrayRef<T> dilation = ArrayRef<T>()
215
+ ) {
216
+ // ASSERT(input_size.size() > 2)
217
+ // ASSERT(input_size.size() == weight_size.size())
218
+ bool has_dilation = !dilation.empty();
219
+ auto dim = input_size.size();
220
+ std::vector<T> output_size(dim);
221
+ output_size[0] = input_size[input_batch_size_dim];
222
+ output_size[1] = weight_size[weight_output_channels_dim];
223
+ for (const auto d : c10::irange(2, dim)) {
224
+ auto dilation_ = has_dilation ? dilation[d - 2] : 1;
225
+ auto kernel = dilation_ * (weight_size[d] - 1) + 1;
226
+ output_size[d] = (input_size[d] + (2 * padding[d - 2]) - kernel) / stride[d - 2] + 1;
227
+ }
228
+ return output_size;
229
+ }
230
+
231
+ static inline std::vector<int64_t> conv_output_size(
232
+ IntArrayRef input_size, IntArrayRef weight_size,
233
+ IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef()
234
+ ) {
235
+ return _conv_output_size(input_size, weight_size, padding, stride, dilation);
236
+ }
237
+
238
+ static inline std::vector<c10::SymInt> conv_output_size(
239
+ SymIntArrayRef input_size, SymIntArrayRef weight_size,
240
+ SymIntArrayRef padding, SymIntArrayRef stride, SymIntArrayRef dilation = SymIntArrayRef()
241
+ ) {
242
+ return _conv_output_size(input_size, weight_size, padding, stride, dilation);
243
+ }
244
+
245
+ template <typename T>
246
+ std::vector<T> _conv_input_size(
247
+ ArrayRef<T> output_size, ArrayRef<T> weight_size,
248
+ ArrayRef<T> padding, ArrayRef<T> output_padding, ArrayRef<T> stride, ArrayRef<T> dilation, T groups
249
+ ) {
250
+ // ASSERT(output_size.size() > 2)
251
+ // ASSERT(output_size.size() == weight_size.size())
252
+ auto dim = output_size.size();
253
+ std::vector<T> input_size(dim);
254
+ input_size[0] = output_size[output_batch_size_dim];
255
+ input_size[1] = weight_size[weight_input_channels_dim] * groups;
256
+ for (const auto d : c10::irange(2, dim)) {
257
+ auto kernel = (weight_size[d] - 1) * dilation[d - 2] + 1;
258
+ input_size[d] = (output_size[d] - 1) * stride[d - 2] - (padding[d - 2] * 2) +
259
+ kernel + output_padding[d - 2];
260
+ }
261
+ return input_size;
262
+ }
263
+
264
+ static inline std::vector<c10::SymInt> conv_input_size(
265
+ SymIntArrayRef output_size, SymIntArrayRef weight_size,
266
+ SymIntArrayRef padding, SymIntArrayRef output_padding, SymIntArrayRef stride, SymIntArrayRef dilation, c10::SymInt groups
267
+ ) {
268
+ return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
269
+ }
270
+
271
+ static inline std::vector<int64_t> conv_input_size(
272
+ IntArrayRef output_size, IntArrayRef weight_size,
273
+ IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
274
+ ) {
275
+ return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
276
+ }
277
+
278
+ template <typename T>
279
+ std::vector<T> _conv_weight_size(
280
+ ArrayRef<T> input_size, ArrayRef<T> output_size,
281
+ ArrayRef<T> padding, ArrayRef<T> output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
282
+ ) {
283
+ auto dim = input_size.size();
284
+ std::vector<T> weight_size(dim);
285
+ weight_size[0] = output_size[1];
286
+ weight_size[1] = input_size[1] / groups;
287
+ for (const auto d : c10::irange(2, dim)) {
288
+ auto kernel = input_size[d] - (output_size[d] - 1) * stride[d - 2]
289
+ + padding[d - 2] * 2 - output_padding[d - 2];
290
+ weight_size[d] = (kernel - 1) / dilation[d - 2] + 1;
291
+ }
292
+ return weight_size;
293
+ }
294
+
295
+ static inline std::vector<c10::SymInt> conv_weight_size(
296
+ SymIntArrayRef input_size, SymIntArrayRef output_size,
297
+ SymIntArrayRef padding, SymIntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
298
+ ) {
299
+ return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
300
+ }
301
+
302
+ static inline std::vector<int64_t> conv_weight_size(
303
+ IntArrayRef input_size, IntArrayRef output_size,
304
+ IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
305
+ ) {
306
+ return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
307
+ }
308
+
309
+ static inline Tensor reshape_bias(int64_t dim, const Tensor& bias) {
310
+ std::vector<int64_t> shape(dim, 1);
311
+ shape[1] = -1;
312
+ return bias.reshape(shape);
313
+ }
314
+
315
+ static inline at::MemoryFormat cudnn_conv_suggest_memory_format(const at::Tensor& input, const at::Tensor& weight) {
316
+ // disable NHWC for float64 input.
317
+ if (!at::detail::getCUDAHooks().compiledWithCuDNN() ||
318
+ input.scalar_type() == at::kDouble ||
319
+ weight.scalar_type() == at::kDouble) {
320
+ return at::MemoryFormat::Contiguous;
321
+ }
322
+ long cudnn_version = at::detail::getCUDAHooks().versionCuDNN();
323
+ auto input_memory_format = input.suggest_memory_format();
324
+ auto weight_memory_format = weight.suggest_memory_format();
325
+ auto weight_ndim = weight.ndimension();
326
+
327
+ bool can_use_cudnn_channels_last_2d = (cudnn_version >= 7603) && (weight_ndim == 4) && (
328
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
329
+ (weight_memory_format == at::MemoryFormat::ChannelsLast)
330
+ );
331
+ if (can_use_cudnn_channels_last_2d) {
332
+ return at::MemoryFormat::ChannelsLast;
333
+ }
334
+
335
+ bool can_use_cudnn_channels_last_3d = (cudnn_version >= 8005) && (weight_ndim == 5) && (
336
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
337
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d)
338
+ );
339
+ if (can_use_cudnn_channels_last_3d) {
340
+ return at::MemoryFormat::ChannelsLast3d;
341
+ }
342
+
343
+ return at::MemoryFormat::Contiguous;
344
+ }
345
+
346
+ // controls whether emptyCache will be called following cudnn conv benchmarking
347
+ TORCH_API void _cudnn_set_conv_benchmark_empty_cache(bool enable);
348
+ TORCH_API bool _cudnn_get_conv_benchmark_empty_cache();
349
+
350
+
351
+ static inline bool miopen_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
352
+
353
+ // disable NHWC for float64 input.
354
+ if (!at::detail::getCUDAHooks().compiledWithMIOpen() ||
355
+ input.scalar_type() == at::kDouble ||
356
+ weight.scalar_type() == at::kDouble) {
357
+ return false;
358
+ }
359
+
360
+ bool can_use_miopen_channels_last_2d = false;
361
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 40300)
362
+ // TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
363
+ // See #64427
364
+ static c10::optional<bool> PYTORCH_MIOPEN_SUGGEST_NHWC = c10::utils::check_env("PYTORCH_MIOPEN_SUGGEST_NHWC");
365
+
366
+ auto input_memory_format = input.suggest_memory_format();
367
+ auto weight_memory_format = weight.suggest_memory_format();
368
+
369
+ can_use_miopen_channels_last_2d = PYTORCH_MIOPEN_SUGGEST_NHWC && *PYTORCH_MIOPEN_SUGGEST_NHWC && (
370
+ ( (input_memory_format == at::MemoryFormat::ChannelsLast) ||
371
+ (weight_memory_format == at::MemoryFormat::ChannelsLast) )
372
+ );
373
+ #endif
374
+
375
+ bool can_use_miopen_channels_last_3d = false;
376
+
377
+ return can_use_miopen_channels_last_2d || can_use_miopen_channels_last_3d;
378
+ }
379
+
380
+ static inline bool mkldnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
381
+
382
+ // disable NHWC for float64 input.
383
+ if (input.scalar_type() == at::kDouble ||
384
+ weight.scalar_type() == at::kDouble) {
385
+ return false;
386
+ }
387
+
388
+ // disable NHWC for MkldnnCPU tensor.
389
+ if (input.is_mkldnn() || weight.is_mkldnn()) {
390
+ return false;
391
+ }
392
+
393
+ auto input_memory_format = input.suggest_memory_format();
394
+ auto weight_memory_format = weight.suggest_memory_format();
395
+
396
+ bool can_use_mkldnn_channels_last_2d =
397
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
398
+ (weight_memory_format == at::MemoryFormat::ChannelsLast);
399
+
400
+ bool can_use_mkldnn_channels_last_3d =
401
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
402
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d);
403
+
404
+ return can_use_mkldnn_channels_last_2d || can_use_mkldnn_channels_last_3d;
405
+ }
406
+
407
+ static inline bool thnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
408
+
409
+ auto input_memory_format = input.suggest_memory_format();
410
+ auto weight_memory_format = weight.suggest_memory_format();
411
+
412
+ bool can_use_thnn_channels_last_2d = input.device().is_cpu() && (
413
+ (input_memory_format == at::MemoryFormat::ChannelsLast) || (
414
+ weight_memory_format == at::MemoryFormat::ChannelsLast));
415
+
416
+ return can_use_thnn_channels_last_2d;
417
+ }
418
+
419
+ static inline bool xpu_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
420
+
421
+ // check layout only for xpu tensor.
422
+ if (!input.is_xpu() || !weight.is_xpu()) {
423
+ return false;
424
+ }
425
+
426
+ // disable NHWC for float64 input.
427
+ if (input.scalar_type() == at::kDouble ||
428
+ weight.scalar_type() == at::kDouble) {
429
+ return false;
430
+ }
431
+
432
+ auto input_memory_format = input.suggest_memory_format();
433
+ auto weight_memory_format = weight.suggest_memory_format();
434
+
435
+ bool can_use_xpu_channels_last_2d =
436
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
437
+ (weight_memory_format == at::MemoryFormat::ChannelsLast);
438
+
439
+ bool can_use_xpu_channels_last_3d =
440
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
441
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d);
442
+
443
+ return can_use_xpu_channels_last_2d || can_use_xpu_channels_last_3d;
444
+ }
445
+
446
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Fill.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Functions that fill Tensors with constants. Implementations are in Fill.cpp.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/native/DispatchStub.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ class Tensor;
13
+ struct TensorIterator;
14
+
15
+ namespace native {
16
+
17
+ DECLARE_DISPATCH(void(*)(TensorIterator&, const c10::Scalar&), fill_stub);
18
+
19
+ Tensor& fill_out(Tensor& self, const Scalar& value);
20
+
21
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Device.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/ScalarType.h>
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/native/utils/ParamsHash.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/NativeFunctions.h>
13
+ #else
14
+ #include <ATen/ops/result_type_native.h>
15
+ #endif
16
+
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace at::native {
21
+ namespace {
22
+ // Check if tensor list has either a boolean tensor or a integer tensor
23
+ inline bool has_integral_tensor(TensorList tensors, const bool includeBool) {
24
+ return std::any_of(
25
+ tensors.begin(), tensors.end(), [&includeBool](const auto& t) {
26
+ return at::isIntegralType(t.scalar_type(), includeBool);
27
+ });
28
+ }
29
+ // check if tensor list has bool tensors
30
+ inline bool has_bool_tensor(TensorList tensors) {
31
+ return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool {
32
+ return t.scalar_type() == ScalarType::Bool;
33
+ });
34
+ }
35
+
36
+ // Check foreach API restrictions
37
+ // - Tensor lists must be non-empty.
38
+ // - All TensorLists and ScalarLists must have the same number of elements.
39
+ // - Corresponding tensors must have the same size.
40
+ inline void check_foreach_api_restrictions(TensorList tensors) {
41
+ TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor.");
42
+ }
43
+
44
+ inline void check_foreach_api_restrictions(
45
+ TensorList tensors,
46
+ ArrayRef<Scalar> scalars) {
47
+ check_foreach_api_restrictions(tensors);
48
+ TORCH_CHECK(
49
+ tensors.size() == scalars.size(),
50
+ "Tensor list must have same number of elements as scalar list.");
51
+ }
52
+
53
+ inline void check_foreach_api_restrictions(
54
+ TensorList tensors1,
55
+ TensorList tensors2) {
56
+ TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
57
+ TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
58
+ TORCH_CHECK(
59
+ tensors1.size() == tensors2.size(),
60
+ "Tensor lists must have the same number of tensors, got ",
61
+ tensors1.size(),
62
+ " and ",
63
+ tensors2.size());
64
+ }
65
+
66
+ inline void check_foreach_api_restrictions(
67
+ TensorList tensors1,
68
+ TensorList tensors2,
69
+ TensorList tensors3) {
70
+ TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
71
+ TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
72
+ TORCH_CHECK(!tensors3.empty(), "Tensor list must have at least one tensor.");
73
+ TORCH_CHECK(
74
+ tensors1.size() == tensors2.size(),
75
+ "Tensor lists must have the same number of tensors, got ",
76
+ tensors1.size(),
77
+ " and ",
78
+ tensors2.size());
79
+ TORCH_CHECK(
80
+ tensors1.size() == tensors3.size(),
81
+ "Tensor lists must have the same number of tensors, got ",
82
+ tensors1.size(),
83
+ " and ",
84
+ tensors3.size());
85
+ }
86
+
87
+ inline void check_foreach_api_restrictions(
88
+ TensorList tensors1,
89
+ TensorList tensors2,
90
+ TensorList tensors3,
91
+ ArrayRef<Scalar> scalars) {
92
+ check_foreach_api_restrictions(tensors1, tensors2, tensors3);
93
+ TORCH_CHECK(
94
+ tensors1.size() == scalars.size(),
95
+ "Tensor list must have same number of elements as scalar list, got ",
96
+ tensors1.size(),
97
+ " and ",
98
+ scalars.size());
99
+ }
100
+
101
+ // Helper function called in check_fast_path_restrictions to check whether all
102
+ // corresponding tensors (aligning in index across the tensorLists) share the
103
+ // same device and dtype.
104
+ inline bool _check_tensors_share_device_and_dtype(
105
+ ArrayRef<TensorList> tensorLists) {
106
+ const auto expected_dtype = tensorLists[0][0].dtype();
107
+ const auto expected_device = tensorLists[0][0].device();
108
+
109
+ auto is_tensor_okay = [&](const Tensor& tensor) {
110
+ return tensor.dtype() == expected_dtype &&
111
+ tensor.device() == expected_device && tensor.layout() == at::kStrided &&
112
+ tensor.is_non_overlapping_and_dense();
113
+ };
114
+
115
+ for (const auto& tensorList : tensorLists) {
116
+ for (const auto& tensor : tensorList) {
117
+ if (!is_tensor_okay(tensor)) {
118
+ return false;
119
+ }
120
+ }
121
+ }
122
+
123
+ return true;
124
+ }
125
+
126
+ // Helper function called in check_fast_path_restrictions to check if
127
+ // corresponding tensors in tensor lists have the same sizes and strides.
128
+ inline bool _check_tensors_share_sizes_and_strides(
129
+ ArrayRef<TensorList> tensorLists) {
130
+ for (const auto i : c10::irange(1, tensorLists.size())) {
131
+ for (const auto j : c10::irange(tensorLists[0].size())) {
132
+ if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() ||
133
+ tensorLists[0][j].strides() != tensorLists[i][j].strides()) {
134
+ return false;
135
+ }
136
+ }
137
+ }
138
+
139
+ return true;
140
+ }
141
+
142
+ // Helper function called in check_fast_path_restrictions to check whether
143
+ // all tensors type promote properly with the scalars in scalarList. This
144
+ // function assumes that _check_tensors_share_device_and_dtype has already been
145
+ // called so that all corresponding tensors in tensorLists have the same dtype.
146
+ // Then, it is sufficient to check the type promotion with just one tensorList.
147
+ inline bool _check_tensors_do_type_promotion_with_scalars(
148
+ TensorList tensorList,
149
+ ArrayRef<Scalar> scalarList = {},
150
+ bool does_op_promote_integer_inputs_to_float = false) {
151
+ for (const auto i : c10::irange(tensorList.size())) {
152
+ // For division, integer inputs will result in float.
153
+ if (does_op_promote_integer_inputs_to_float) {
154
+ if (at::isIntegralType(
155
+ tensorList[i].scalar_type(), /*includeBool*/ true)) {
156
+ return false;
157
+ }
158
+ }
159
+ if (!scalarList.empty()) {
160
+ const auto& scalar =
161
+ scalarList.size() == 1 ? scalarList[0] : scalarList[i];
162
+ const auto& tensor = tensorList[i];
163
+ // note(mkozuki): This check might be responsible for
164
+ // `_foreach_add(bool_tensors, bool_tensors)` being pushed to slow path.
165
+ if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) {
166
+ return false;
167
+ }
168
+ }
169
+ }
170
+
171
+ return true;
172
+ }
173
+
174
+ // To go via 'fast' path, several conditions must be satisfied
175
+ // - All tensors in all lists must have the same dtype.
176
+ // - All tensors must be on the same device
177
+ // - All tensors must have strided layout
178
+ // - All tensors must be non-overlapping and dense
179
+ // - Resulting tensor must have the same dtype as the input one
180
+
181
+ // Please, make sure to call check_foreach_api_restrictions before calling this
182
+ // method. There is a set of preconditions that have to be satisfied.
183
+ inline bool check_fast_path_restrictions(
184
+ ArrayRef<TensorList> tensorLists,
185
+ ArrayRef<Scalar> scalarList = {},
186
+ bool does_op_promote_integer_inputs_to_float = false) {
187
+ return _check_tensors_share_device_and_dtype(tensorLists) &&
188
+ _check_tensors_share_sizes_and_strides(tensorLists) &&
189
+ _check_tensors_do_type_promotion_with_scalars(
190
+ tensorLists[0],
191
+ scalarList,
192
+ does_op_promote_integer_inputs_to_float);
193
+ }
194
+
195
+ inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
196
+ const Tensor& scalarList_,
197
+ int64_t expect_length) {
198
+ std::vector<c10::Scalar> scalarList;
199
+ TORCH_CHECK(
200
+ scalarList_.device() == c10::kCPU,
201
+ "Expected scalars to be on CPU, got ",
202
+ scalarList_.device(),
203
+ " instead.");
204
+ TORCH_CHECK(
205
+ scalarList_.is_contiguous(), "Expected scalars to be contiguous.");
206
+ TORCH_CHECK(
207
+ scalarList_.dim() == 1,
208
+ "Expected packed scalar Tensor to be of dimension 1. Got ",
209
+ scalarList_.dim(),
210
+ " instead.");
211
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
212
+ kComplexHalf,
213
+ kHalf,
214
+ kBool,
215
+ kBFloat16,
216
+ scalarList_.scalar_type(),
217
+ "convert_tensor_to_scalar_list",
218
+ [&]() {
219
+ const scalar_t* scalar_data = scalarList_.data_ptr<scalar_t>();
220
+ TORCH_CHECK(
221
+ (expect_length == scalarList_.size(0)),
222
+ "Expected length of scalars to match input of length ",
223
+ expect_length,
224
+ " but got ",
225
+ scalarList_.size(0),
226
+ " instead.");
227
+ for (int64_t i = 0; i < scalarList_.size(0); i++) {
228
+ scalarList.emplace_back(scalar_data[i]);
229
+ }
230
+ });
231
+ return scalarList;
232
+ }
233
+
234
+ inline bool can_use_fast_route(
235
+ ArrayRef<TensorList> tensorLists,
236
+ ArrayRef<Scalar> scalarList = {},
237
+ bool does_op_promote_integer_inputs_to_float = false) {
238
+ return check_fast_path_restrictions(
239
+ tensorLists, scalarList, does_op_promote_integer_inputs_to_float);
240
+ }
241
+
242
+ inline bool can_use_fast_route(
243
+ TensorList tensors1,
244
+ TensorList tensors2,
245
+ bool does_op_promote_integer_inputs_to_float = false) {
246
+ return can_use_fast_route(
247
+ {tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float);
248
+ }
249
+
250
+ using DeviceDtypeKey = std::pair<at::Device, at::ScalarType>;
251
+ using IndicesT = std::vector<int>;
252
+ using nested_optional_tensorvec_t =
253
+ std::vector<std::vector<c10::optional<at::Tensor>>>;
254
+ using TensorsAndIndicesT = std::pair<nested_optional_tensorvec_t, IndicesT>;
255
+ using FlatMap = std::unordered_map<
256
+ DeviceDtypeKey,
257
+ TensorsAndIndicesT,
258
+ ParamsHash<DeviceDtypeKey>>;
259
+
260
+ inline FlatMap _group_tensors_by_first_tensors_device_and_dtype(
261
+ const nested_optional_tensorvec_t& nested_tensorlist,
262
+ const bool with_indices) {
263
+ FlatMap grouped_tensors_with_indices;
264
+
265
+ TORCH_CHECK(!nested_tensorlist.empty());
266
+ TORCH_CHECK(!nested_tensorlist[0].empty());
267
+ const auto num_lists = nested_tensorlist.size();
268
+ const auto num_tensors = nested_tensorlist[0].size();
269
+
270
+ TORCH_CHECK(std::all_of(
271
+ nested_tensorlist.cbegin(),
272
+ nested_tensorlist.cend(),
273
+ [&](const auto& tensorlist) -> bool {
274
+ // note(crcrpar): Allow empty tensorlists following
275
+ // ref:
276
+ // https://github.com/pytorch/pytorch/blob/85885301fd3c6adb8b9dc3cf7afadf6945566684/torch/utils/_foreach_utils.py#L21-L24
277
+ return tensorlist.size() == num_tensors || tensorlist.size() == 0;
278
+ }));
279
+
280
+ for (const auto& tensor_index : c10::irange(num_tensors)) {
281
+ const auto key = [&]() -> DeviceDtypeKey {
282
+ const auto t = nested_tensorlist[0][tensor_index];
283
+ TORCH_CHECK(
284
+ t.has_value(),
285
+ "Tensors of the first list of nested Tensor lists are supposed to be defined but ",
286
+ "the ",
287
+ tensor_index,
288
+ "-th Tensor is not.");
289
+ return {t->device(), t->scalar_type()};
290
+ }();
291
+ TORCH_CHECK(
292
+ std::all_of(
293
+ nested_tensorlist.cbegin(),
294
+ nested_tensorlist.cend(),
295
+ [&](const auto& tensorlist) -> bool {
296
+ if (tensorlist.size() == 0) {
297
+ return true;
298
+ }
299
+ const auto& tensor = tensorlist[tensor_index];
300
+ // note(crcrpar): Currently the scope of this function is
301
+ // optimizers so there could be `state_steps` and other scalars
302
+ // whose elements are float tensors no matter what the parameter's
303
+ // dtype is.
304
+ if (!tensor.has_value()) {
305
+ return true;
306
+ } else {
307
+ const auto s = tensor->scalar_type();
308
+ const auto d = tensor->device();
309
+ // Note: `step` or `state_step` is float32 by default.
310
+ if (key.first == d) {
311
+ return key.second == s || s == at::ScalarType::Float;
312
+ } else if (d.is_cpu()) {
313
+ // note(crcrpar): There are some test cases (e.g.
314
+ // TestOptim::test_adam) where state_steps are on CPU and the
315
+ // others are on CUDA. Currently a state_step Tensor has the
316
+ // dtype of float.
317
+ return s == at::ScalarType::Float;
318
+ } else {
319
+ return false;
320
+ }
321
+ }
322
+ }),
323
+ "Tensors of the same index must be on the same device and the same dtype except `step` tensors that can be CPU and float32 notwithstanding");
324
+ if (!grouped_tensors_with_indices.count(key)) {
325
+ grouped_tensors_with_indices.insert(
326
+ {key,
327
+ TensorsAndIndicesT{
328
+ [&]() -> nested_optional_tensorvec_t {
329
+ nested_optional_tensorvec_t nested_tensorvec;
330
+ nested_tensorvec.reserve(num_lists);
331
+ for (const auto& i : c10::irange(num_lists)) {
332
+ std::vector<c10::optional<at::Tensor>> tensors;
333
+ if (!nested_tensorlist[i].empty()) {
334
+ // NB: num_tensors is the max possible length for any of
335
+ // the inner lists of tensor references. Reserving the max
336
+ // trades memory for perf. This should not have significant
337
+ // impact.
338
+ tensors.reserve(num_tensors);
339
+ }
340
+ nested_tensorvec.emplace_back(tensors);
341
+ }
342
+ return nested_tensorvec;
343
+ }(),
344
+ [&]() -> IndicesT {
345
+ if (!with_indices) {
346
+ return {};
347
+ } else {
348
+ IndicesT indices;
349
+ indices.reserve(num_tensors);
350
+ return indices;
351
+ }
352
+ }()}});
353
+ }
354
+ for (const auto& list_index : c10::irange(num_lists)) {
355
+ if (!nested_tensorlist[list_index].empty()) {
356
+ grouped_tensors_with_indices[key].first[list_index].emplace_back(
357
+ nested_tensorlist[list_index][tensor_index]);
358
+ }
359
+ }
360
+ if (with_indices) {
361
+ grouped_tensors_with_indices[key].second.emplace_back(tensor_index);
362
+ }
363
+ }
364
+
365
+ return grouped_tensors_with_indices;
366
+ }
367
+
368
+ } // namespace
369
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <c10/util/ArrayRef.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+ class TensorBase;
8
+ struct TensorIterator;
9
+ struct TensorIteratorBase;
10
+ }
11
+
12
+ namespace c10 {
13
+ class Scalar;
14
+ }
15
+
16
+ namespace at::native {
17
+
18
+ using index_fn = void(*)(TensorIteratorBase &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides);
19
+ using index_fill_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source);
20
+ using index_copy_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride);
21
+ using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides, bool accumulate);
22
+ using put_fn = void(*)(TensorIterator & iter, const TensorBase& self, const bool accumulate);
23
+ using take_fn = void(*)(TensorIterator & iter, const TensorBase& input);
24
+ using flip_fn = void(*)(TensorIterator &, const bool);
25
+ using masked_fill_fn = void(*)(TensorIterator &, const Scalar& scalar);
26
+ using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride);
27
+ using masked_scatter_fn = void(*)(TensorIterator &, const TensorBase &);
28
+
29
+ DECLARE_DISPATCH(index_fn, index_stub);
30
+ DECLARE_DISPATCH(index_fill_fn, index_fill_stub);
31
+ DECLARE_DISPATCH(index_copy_fn, index_copy_stub);
32
+ DECLARE_DISPATCH(index_put_fn, index_put_stub);
33
+ DECLARE_DISPATCH(put_fn, put_stub);
34
+ DECLARE_DISPATCH(take_fn, take_stub);
35
+ DECLARE_DISPATCH(flip_fn, flip_stub);
36
+ DECLARE_DISPATCH(masked_fill_fn, masked_fill_stub);
37
+ DECLARE_DISPATCH(masked_select_fn, masked_select_serial_stub);
38
+ DECLARE_DISPATCH(masked_select_fn, masked_select_stub);
39
+ DECLARE_DISPATCH(masked_scatter_fn, masked_scatter_stub);
40
+
41
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/OpMathType.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <c10/core/Scalar.h>
7
+
8
+ namespace at::native {
9
+
10
+ template <typename scalar_t>
11
+ C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(scalar_t weight) {
12
+ return std::abs(weight) < scalar_t(0.5);
13
+ }
14
+ template <typename scalar_t>
15
+ C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(c10::complex<scalar_t> weight) {
16
+ // Avoid the sqrt in abs(weight)
17
+ return (weight.real() * weight.real() + weight.imag() * weight.imag()) < scalar_t(0.25);
18
+ }
19
+
20
+ template <typename scalar_t, typename weight_t>
21
+ C10_HOST_DEVICE C10_ALWAYS_INLINE scalar_t lerp(scalar_t self_, scalar_t end_, weight_t weight_) {
22
+ using opmath_t = at::opmath_type<scalar_t>;
23
+ using opmath_weight_t = at::opmath_type<weight_t>;
24
+
25
+ opmath_t self = self_;
26
+ opmath_t end = end_;
27
+ opmath_weight_t weight = weight_;
28
+
29
+ // Conditional for better numeric. This has been discussed in
30
+ // https://github.com/pytorch/pytorch/pull/18871
31
+ return is_lerp_weight_small(weight)
32
+ ? self + weight * (end - self)
33
+ : end - (end - self) * (opmath_t(1) - weight);
34
+ }
35
+
36
+ using lerp_fn_scalar = void (*)(
37
+ at::TensorIteratorBase& iter,
38
+ const Scalar& weight);
39
+
40
+ using lerp_fn_tensor = void (*)(
41
+ at::TensorIteratorBase& iter);
42
+
43
+ DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight);
44
+ DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight);
45
+
46
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebra.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/Optional.h>
5
+
6
+ namespace c10 {
7
+ class Scalar;
8
+ }
9
+
10
+ namespace at {
11
+ struct TensorIterator;
12
+ }
13
+
14
+ namespace at::native {
15
+
16
+ using addr_fn = void (*)(TensorIterator &, const Scalar& beta, const Scalar& alpha);
17
+ DECLARE_DISPATCH(addr_fn, addr_stub);
18
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h ADDED
@@ -0,0 +1,624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/irange.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/strides.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <ATen/ExpandUtils.h>
9
+ #include <ATen/TensorUtils.h>
10
+ #include <ATen/native/TensorIterator.h>
11
+ #include <ATen/native/TransposeType.h>
12
+ #include <limits>
13
+ #include <type_traits>
14
+ #include <sstream>
15
+ #include <cstring>
16
+ #include <cctype>
17
+
18
+ #ifndef AT_PER_OPERATOR_HEADERS
19
+ #include <ATen/Functions.h>
20
+ #else
21
+ #include <ATen/ops/arange.h>
22
+ #include <ATen/ops/empty.h>
23
+ #include <ATen/ops/empty_like.h>
24
+ #include <ATen/ops/empty_strided.h>
25
+ #include <ATen/ops/zeros.h>
26
+ #endif
27
+
28
+ namespace at::native {
29
+
30
+ static inline c10::MaybeOwned<Tensor> expect_resolved_conj(const Tensor& tensor) {
31
+ if (tensor.is_conj()) {
32
+ return c10::MaybeOwned<Tensor>::owned(tensor.resolve_conj());
33
+ } else {
34
+ return c10::MaybeOwned<Tensor>::borrowed(tensor);
35
+ }
36
+ }
37
+
38
+ static inline DimVector batched_matrix_contiguous_strides(
39
+ const IntArrayRef sizes,
40
+ const bool f_contig = false) {
41
+ // f_contig chooses between the strides of a batch of Fortran (F-contiguous)
42
+ // and C-contiguous matrices
43
+ auto strides = c10::contiguous_strides(sizes);
44
+ auto dim = strides.size();
45
+
46
+ if (f_contig && dim >= 2) {
47
+ // Fix the strides of the last two dimensions, so that we return
48
+ // C-contiguous batches of F-contiguous matrices.
49
+ strides[dim - 1] = std::max(sizes[dim - 2], static_cast<int64_t>(1));
50
+ strides[dim - 2] = 1;
51
+ }
52
+ return strides;
53
+ }
54
+
55
+ /*
56
+ * Clones a Tensor so that the following conditions hold:
57
+ * If we think of a Tensor of having size (B, M, N), where B is any number
58
+ * of batch dimensions, then:
59
+ * - Each (M, N) matrix is in column major form
60
+ * - Let Tensor P have size (B, M, N) and Q have size (B, M', N').
61
+ * Then when laid out in memory, the M by N matrix starting at
62
+ * P.data_ptr()[B * M * N] is of the same corresponding batch as the M' by N'
63
+ * matrix starting at Q.data_ptr()[B * M' * N'].
64
+ */
65
+ static inline Tensor cloneBatchedColumnMajor(const Tensor& src) {
66
+ // If src is already in batched column major format, then
67
+ // this will be efficient (no reordering of the data will occur)
68
+ // because the first transpose will make the tensor contiguous,
69
+ // and cloning a contiguous tensor is fast.
70
+ auto result = src.mT().clone(at::MemoryFormat::Contiguous);
71
+ result.transpose_(-2, -1);
72
+ return result;
73
+ }
74
+
75
+ /*
76
+ * contig chooses between C-contig (true) and F-contig (false)
77
+ */
78
+ static inline c10::MaybeOwned<Tensor> borrow_else_clone(const bool cond, const Tensor& borrow, const Tensor& clone, const bool contig) {
79
+ return cond ? c10::MaybeOwned<Tensor>::borrowed(borrow)
80
+ : c10::MaybeOwned<Tensor>::owned(contig ? clone.clone(MemoryFormat::Contiguous)
81
+ : cloneBatchedColumnMajor(clone));
82
+ }
83
+
84
+ /*
85
+ * This method is designed to be a faster alternative to
86
+ * `cloneBatchedColumnMajor` with some additional features,
87
+ * namely:
88
+ * 1. It uses `copy` instead of `clone` which could be much faster.
89
+ * 2. `nrows` parameter used to create inputs with the number of rows larger
90
+ * than the original input, which is required for some LAPACK/MAGMA methods.
91
+ * 3. `desired_batch_size` is used to create copies with the batch size
92
+ * which is either the original batch size of the input, or its larger
93
+ * broadcasted shape.
94
+ */
95
+ static inline Tensor copyBatchedColumnMajor(const Tensor& src, int64_t nrows = -1,
96
+ at::OptionalIntArrayRef desired_batch_sizes = c10::nullopt) {
97
+ nrows = (nrows == -1) ? src.size(-2) : nrows;
98
+ auto copy_sizes = desired_batch_sizes.has_value()
99
+ ? desired_batch_sizes.value().vec()
100
+ : IntArrayRef(src.sizes().data(), src.dim() - 2).vec();
101
+ copy_sizes.insert(copy_sizes.end(), {nrows, src.size(-1)});
102
+ const auto copy_strides = batched_matrix_contiguous_strides(copy_sizes, /*f-contig*/true);
103
+ auto copy = at::empty_strided(copy_sizes, copy_strides, src.options());
104
+ copy.narrow(-2, 0, src.size(-2)).copy_(src);
105
+ return copy;
106
+ }
107
+
108
+ /*
109
+ * Given batches of matrices with arbitrary batch dim,
110
+ * computes the number of batches.
111
+ */
112
+ static inline int64_t batchCount(const Tensor& batched_matrices) {
113
+ int64_t result = 1;
114
+ for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
115
+ result *= batched_matrices.size(i);
116
+ }
117
+ return result;
118
+ }
119
+
120
+ // Computes the number of elements of a matrix in a batched matrix tensor
121
+ static inline int64_t matrixStride(const Tensor& batched_matrices) {
122
+ return batched_matrices.size(-1) * batched_matrices.size(-2);
123
+ }
124
+
125
+ // Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig, eig)
126
+ static inline void checkIsMatrix(const Tensor& A, const char* const f_name, const char* const arg_name = "A") {
127
+ TORCH_CHECK(A.dim() >= 2, f_name, ": The input tensor ", arg_name, " must have at least 2 dimensions.");
128
+ }
129
+ static inline void squareCheckInputs(const Tensor& self, const char* const f_name, const char* const arg_name = "A") {
130
+ checkIsMatrix(self, f_name, arg_name);
131
+ TORCH_CHECK(self.sym_size(-1) == self.sym_size(-2),
132
+ f_name,
133
+ ": ", arg_name, " must be batches of square matrices, "
134
+ "but they are ", self.sym_size(-2), " by ", self.sym_size(-1), " matrices");
135
+ }
136
+
137
+ static inline void checkInputsSolver(const Tensor& A,
138
+ const Tensor& B,
139
+ const bool left,
140
+ const char* const f_name) {
141
+ squareCheckInputs(A, f_name, "A");
142
+ checkIsMatrix(B, f_name, "B");
143
+ TORCH_CHECK(left ? A.size(-2) == B.size(-2) : A.size(-1) == B.size(-1),
144
+ f_name, ": Incompatible shapes of A and B for the equation ",
145
+ left ? "AX = B" : "XA = B",
146
+ " (", A.size(-2), "x", A.size(-1), " and ", B.size(-2), "x", B.size(-1), ")");
147
+ }
148
+
149
+ static inline bool is_row_or_column_contiguous(const Tensor& t) {
150
+ // This could be made more general, similar to how it's checked in matmul, which would allow to
151
+ // ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky.
152
+ // We choose to be conservative for simplicity
153
+ return t.is_contiguous() || t.transpose(-2, -1).is_contiguous();
154
+ }
155
+
156
+ static inline TransposeType to_transpose_type(const bool contig, const bool conj) {
157
+ if (conj) {
158
+ if (contig) { TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); }
159
+ else { return TransposeType::ConjTranspose; }
160
+ } else {
161
+ if (contig) { return TransposeType::NoTranspose; }
162
+ else { return TransposeType::Transpose; }
163
+ }
164
+ }
165
+
166
+
167
+ // This function is designed to be used with linear algebra methods that minimize
168
+ // L(ax - b) = 0, where L is generally the identity map (`solve`, for example)
169
+ // or the L2 norm (`lstsq`).
170
+ // It is expected that `a` and `b` are contiguous tensors of column-major matrices
171
+ // (so that a.view({-1, a.size(-2), a.size(-1)}) succeeds, same for `b`),
172
+ // with the following additional properties:
173
+ //
174
+ // 1. a.dim() == b.dim()
175
+ // 2. a.shape[:-2] broadcasts over b.shape[:-2]
176
+ // 3. a.size(i) <= b.size(i) for i=0,..., a.dim() - 3 (only for batch dimensions)
177
+ //
178
+ // MAGMA/LAPACK modify tensor `a` in-place, and the main goal of this method
179
+ // is to be memory efficient, which means that if there exists an index i such that
180
+ // a.shape[i] < b.shape[i], 0 <= i <= a.dim() - 3,
181
+ // then instead of materializing copies of `a` in the broadcasted shape, we keep
182
+ // a buffer copy of `a` along with flags that check whether specific batch dimension
183
+ // indices for `a` were already accessed. If they were, we copy the data from the buffer
184
+ // into `a`. The number of copies does not exceed
185
+ // prod(max(a.shape[:-2], b.shape[:-2]) - a.shape[:-2] + 1)
186
+ // and this value is attained by tensors with non-empty batch dimensions.
187
+ //
188
+ // func_t `f` is a callable that is being supplied with
189
+ // scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx.
190
+ // a_working_ptr and b_working_ptr can directly be passed to LAPACK/MAGMA routines,
191
+ // and a_linear_batch_idx is an index in the 3d representation which corresponds to
192
+ // the memory a_working_ptr points to, in other words:
193
+ // a_working_ptr == a.view({-1, a.size(-2), a.size(-1)}.select(0, a_linear_batch_idx).data_ptr<scalar_t>();
194
+ // a_linear_batch_idx is useful to store metadata related to `a`, such as, for example,
195
+ // its rank or singular values (see linalg_lstsq).
196
+ template<typename scalar_t, typename func_t>
197
+ void batch_iterator_with_broadcasting(const Tensor& a, const Tensor& b, const func_t& f) {
198
+ IntArrayRef a_batch_sizes(a.sizes().data(), a.dim() - 2);
199
+ IntArrayRef b_batch_sizes(b.sizes().data(), b.dim() - 2);
200
+
201
+ auto a_linear_batch_idx = at::arange(batchCount(a)).view(a_batch_sizes);
202
+ auto b_linear_batch_idx = at::arange(batchCount(b)).view(b_batch_sizes);
203
+
204
+ TensorIterator iter = TensorIteratorConfig()
205
+ .set_check_mem_overlap(false)
206
+ .check_all_same_dtype(false)
207
+ .resize_outputs(false)
208
+ .add_output(b_linear_batch_idx)
209
+ .add_input(a_linear_batch_idx)
210
+ .build();
211
+
212
+ auto m = a.size(-2);
213
+ auto n = a.size(-1);
214
+ auto a_3d = a.view({batchCount(a), m, n});
215
+ auto b_3d = b.view({batchCount(b), b.size(-2), b.size(-1)});
216
+
217
+ auto a_broadcasts_over_b = (a_batch_sizes != b_batch_sizes);
218
+ Tensor a_buffer, a_was_accessed, a_buffer_3d;
219
+ std::function<void(int64_t)> check_if_copy_needed_for_a
220
+ = [](int64_t /*a_curr_linear_batch_idx*/){};
221
+ if (a_broadcasts_over_b) {
222
+ a_buffer = at::empty_strided(a.sizes(), a.strides(), a.options())
223
+ .copy_(a);
224
+ a_was_accessed = at::zeros(batchCount(a), at::kBool);
225
+ a_buffer_3d = a_buffer.view({batchCount(a), m, n});
226
+ check_if_copy_needed_for_a = [&](int64_t a_curr_linear_batch_idx) {
227
+ auto* a_was_accessed_flag = a_was_accessed
228
+ .select(0, a_curr_linear_batch_idx)
229
+ .data_ptr<bool>();
230
+ if (!(*a_was_accessed_flag)) {
231
+ *a_was_accessed_flag = true;
232
+ }
233
+ else {
234
+ a_3d.select(0, a_curr_linear_batch_idx)
235
+ .copy_(a_buffer_3d.select(0, a_curr_linear_batch_idx));
236
+ }
237
+ };
238
+ }
239
+
240
+ auto loop = [&](char** data, const int64_t* strides, int64_t nelems) {
241
+ auto* b_batch_idx_ptr = data[0];
242
+ auto* a_batch_idx_ptr = data[1];
243
+
244
+ for (const auto elem C10_UNUSED : c10::irange(nelems)) {
245
+ auto b_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(b_batch_idx_ptr);
246
+ auto a_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(a_batch_idx_ptr);
247
+
248
+ check_if_copy_needed_for_a(a_curr_linear_batch_idx);
249
+
250
+ auto* a_working_ptr = a_3d.select(0, a_curr_linear_batch_idx)
251
+ .data_ptr<scalar_t>();
252
+ auto* b_working_ptr = b_3d.select(0, b_curr_linear_batch_idx)
253
+ .data_ptr<scalar_t>();
254
+ f(a_working_ptr, b_working_ptr, a_curr_linear_batch_idx);
255
+
256
+ b_batch_idx_ptr += strides[0];
257
+ a_batch_idx_ptr += strides[1];
258
+ }
259
+ };
260
+ iter.serial_for_each(loop, {0, batchCount(b)});
261
+ }
262
+
263
+ // Returns the epsilon value for floating types except half
264
+ static inline double _get_epsilon(const ScalarType& sc_type) {
265
+ switch (sc_type) {
266
+ case at::ScalarType::Float:
267
+ return static_cast<double>(std::numeric_limits<float>::epsilon());
268
+ case at::ScalarType::Double:
269
+ return std::numeric_limits<double>::epsilon();
270
+ default:
271
+ AT_ERROR("This function doesn't handle types other than float and double");
272
+ }
273
+ }
274
+
275
+ // Validates input shapes and devices
276
+ // for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve)
277
+ static inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) {
278
+ TORCH_CHECK(self.device() == A.device(),
279
+ "Expected b and A to be on the same device, but found b on ",
280
+ self.device(), " and A on ", A.device(), " instead.");
281
+
282
+ TORCH_CHECK(self.scalar_type() == A.scalar_type(),
283
+ "Expected b and A to have the same dtype, but found b of type ",
284
+ self.scalar_type(), " and A of type ", A.scalar_type(), " instead.");
285
+
286
+ TORCH_CHECK(A.size(-1) == A.size(-2),
287
+ "A must be batches of square matrices, "
288
+ "but they are ", A.size(-2), " by ", A.size(-1), " matrices");
289
+
290
+ TORCH_CHECK(A.size(-1) == self.size(-2),
291
+ "Incompatible matrix sizes for ", name, ": each A "
292
+ "matrix is ", A.size(-1), " by ", A.size(-1),
293
+ " but each b matrix is ", self.size(-2), " by ", self.size(-1));
294
+ }
295
+
296
+ static inline void checkFloatingOrComplex(const Tensor& t, const char* const f_name, const bool allow_low_precision_dtypes=true) {
297
+ auto dtype = t.scalar_type();
298
+ TORCH_CHECK((at::isFloatingType(dtype) || at::isComplexType(dtype)),
299
+ f_name, ": Expected a floating point or complex tensor as input. Got ", dtype);
300
+ if (!allow_low_precision_dtypes) {
301
+ TORCH_CHECK(dtype == kFloat || dtype == kDouble || dtype == kComplexFloat || dtype == kComplexDouble,
302
+ f_name, ": Low precision dtypes not supported. Got ", dtype);
303
+ }
304
+ }
305
+
306
+
307
+ // Checks if all the Tensors in a TensorList are of the same dimensions
308
+ static inline void checkAllSameDim(TensorList tensors, int64_t dim) {
309
+ for (auto &t : tensors) {
310
+ TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead.");
311
+ }
312
+ }
313
+
314
+ static inline std::tuple<std::vector<int64_t>, std::vector<int64_t>> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2) {
315
+ // broadcast the batch dimensions of arg1 and arg2.
316
+ IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
317
+ IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
318
+ std::vector<int64_t> expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes);
319
+
320
+ std::vector<int64_t> arg1_expand_size({expand_batch_portion});
321
+ arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) });
322
+
323
+ std::vector<int64_t> arg2_expand_size({expand_batch_portion});
324
+ arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) });
325
+ return std::make_tuple(std::move(arg1_expand_size), std::move(arg2_expand_size));
326
+ }
327
+
328
+ static inline std::tuple<Tensor,Tensor> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) {
329
+ // If there's no name we assume we don't want to check the errors
330
+ if (name != nullptr) {
331
+ linearSolveCheckInputs(arg1, arg2, name);
332
+ }
333
+
334
+ std::vector<int64_t> arg1_expand_size, arg2_expand_size;
335
+ std::tie(arg1_expand_size, arg2_expand_size) = at::native::_linalg_broadcast_batch_dims(arg1, arg2);
336
+
337
+ auto arg1_broadcasted = arg1_expand_size == arg1.sizes() ? arg1 : arg1.expand(arg1_expand_size);
338
+ auto arg2_broadcasted = arg2_expand_size == arg2.sizes() ? arg2 : arg2.expand(arg2_expand_size);
339
+ return std::make_tuple(arg1_broadcasted, arg2_broadcasted);
340
+ }
341
+
342
+ static inline std::vector<int64_t> broadcast_batch_size(const Tensor& t1, const Tensor& t2, int64_t n_batch_dims) {
343
+ IntArrayRef t1_batch_sizes(t1.sizes().data(), n_batch_dims);
344
+ IntArrayRef t2_batch_sizes(t2.sizes().data(), n_batch_dims);
345
+ auto broadcasted_batch_sizes = infer_size(t1_batch_sizes, t2_batch_sizes);
346
+ return broadcasted_batch_sizes;
347
+ }
348
+
349
+ // Return a permutation with the given axes moved to the end.
350
+ static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) {
351
+ const std::vector<int64_t> a = axes.vec();
352
+ const int64_t ndim = self.ndimension();
353
+ std::vector<int64_t> perm;
354
+
355
+ for (const auto i : c10::irange(ndim)) {
356
+ auto it = std::find(a.begin(), a.end(), i);
357
+ if (it == a.end()) {
358
+ perm.push_back(i);
359
+ }
360
+ }
361
+ for (auto i : a) {
362
+ perm.push_back(i);
363
+ }
364
+
365
+ TORCH_CHECK((int64_t)perm.size() == ndim,
366
+ "duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim);
367
+
368
+ return self.permute(perm);
369
+ }
370
+
371
+ // parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced)
372
+ static inline std::tuple<bool, bool> _parse_qr_mode(c10::string_view mode) {
373
+ bool compute_q;
374
+ bool reduced;
375
+ if (mode == "reduced") {
376
+ compute_q = true;
377
+ reduced = true;
378
+ } else if (mode == "complete") {
379
+ compute_q = true;
380
+ reduced = false;
381
+ } else if (mode == "r") {
382
+ compute_q = false;
383
+ reduced = true; // this is actually irrelevant in this mode
384
+ } else {
385
+ TORCH_CHECK(false, "qr received unrecognized mode '", mode,
386
+ "' but expected one of 'reduced' (default), 'r', or 'complete'");
387
+ }
388
+ return std::make_tuple(compute_q, reduced);
389
+ }
390
+
391
+ // Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition
392
+ static inline std::tuple<DimVector, DimVector, int64_t> _compute_geometry_for_Q(
393
+ const Tensor& input,
394
+ bool reduced) {
395
+ int64_t m = input.size(-2), n = input.size(-1);
396
+ int64_t n_columns_q;
397
+
398
+ // We need to compute the required size of Q based on the `reduced` option
399
+ DimVector q_sizes(input.sizes());
400
+ if (!reduced && m > n) {
401
+ q_sizes[input.dim() - 1] = m;
402
+ n_columns_q = m;
403
+ } else {
404
+ q_sizes[input.dim() - 1] = n;
405
+ n_columns_q = std::min(m, n);
406
+ }
407
+ auto q_strides = batched_matrix_contiguous_strides(q_sizes, /*f-contig*/true);
408
+ return std::make_tuple(q_sizes, q_strides, n_columns_q);
409
+ }
410
+
411
+ static inline bool svd_uses_cusolver(const Tensor& A) {
412
+ // if cusolver is available, it is used unconditionally
413
+ return A.is_cuda()
414
+ && at::globalContext().hasCuSOLVER()
415
+ && at::globalContext().linalgPreferredBackend() != at::LinalgBackend::Magma;
416
+ }
417
+
418
+
419
+ // Function used instead of .to so that the original strides are retained
420
+ // .to doesn't retain strides and make the output tensor contiguous
421
+ static inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) {
422
+ auto strided_to = at::empty_strided(original_tensor.sizes(),
423
+ original_tensor.strides(),
424
+ options);
425
+ strided_to.copy_(original_tensor);
426
+ return strided_to;
427
+ }
428
+
429
+ // Creates a dimension permutation array that can be given to `at::permute()`, which will shift
430
+ // the two specified dimensions to the end of a tensor, without changing the order of
431
+ // the other dimensions. `dim1` will be placed at the very end, and `dim0` will be
432
+ // placed just to the left of it.
433
+ //
434
+ // For instance, given a 4-D tensor, dimensions 1 and 3 can be shifted to the end by
435
+ // calling `create_dim_backshift_permutation(1, 3, 4)`. The resulting vector will
436
+ // be `vec(0, 2, 1, 3)`.
437
+ static inline std::vector<int64_t> create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) {
438
+ TORCH_CHECK(
439
+ (dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0),
440
+ "duplicate or invalid dimensions");
441
+ std::vector<int64_t> permutation(ndim);
442
+ int64_t cur_permuted_dim = 0;
443
+ for (const auto dim_ind : c10::irange(ndim)) {
444
+ if ((dim_ind != dim0) && (dim_ind != dim1)) {
445
+ permutation[cur_permuted_dim++] = dim_ind;
446
+ }
447
+ }
448
+ permutation[cur_permuted_dim++] = dim0;
449
+ permutation[cur_permuted_dim] = dim1;
450
+ return permutation;
451
+ }
452
+
453
+ // Creates a dimension permutation array that can be given to `at::permute()`, which
454
+ // will reverse a given permutation.
455
+ // The reverse permutation array is created by swapping the indices and their
456
+ // associated values from the given permutation array.
457
+ static inline std::vector<int64_t> create_reverse_permutation(std::vector<int64_t> permutation) {
458
+ int64_t ndim = permutation.size();
459
+ std::vector<int64_t> reverse_permutation(ndim);
460
+ for (const auto dim_ind : c10::irange(ndim)) {
461
+ reverse_permutation[permutation[dim_ind]] = dim_ind;
462
+ }
463
+ return reverse_permutation;
464
+ }
465
+
466
+ // Compute R-work array size for MAGMA/LAPACK cgesdd/zgesdd
467
+ // See https://github.com/Reference-LAPACK/lapack/blob/122506cd8b6ce050a200920c3d4c0b153b150fd8/SRC/cgesdd.f#L186
468
+ static inline int64_t computeLRWorkDim(const char jobz, int64_t m, int64_t n) {
469
+ auto mn = std::min(m, n);
470
+ auto mx = std::max(m, n);
471
+ if (jobz == 'N') {
472
+ #ifdef __APPLE__
473
+ // According to `vecLib.framework/Headers/clapack.h` Accelerate.framework is based on LAPACK 3.2.1
474
+ return 7 * mn;
475
+ #else
476
+ // These setting is valid for on LAPACK 3.6+
477
+ return 5 * mn;
478
+ #endif
479
+ }
480
+ if (mx > 10 * mn) {
481
+ return 5 * mn * mn + 5 * mn;
482
+ }
483
+ return std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn);
484
+ }
485
+
486
+ // This function checks whether the uplo argument input is valid
487
+ // Allowed strings are "u", "U", "l", "L"
488
+ static inline void checkUplo(const c10::string_view uplo) {
489
+ // To use std::toupper safely with plain chars (or signed chars), the argument should first be converted to unsigned char
490
+ char uplo_uppercase = static_cast<char>(std::toupper(static_cast<unsigned char>(uplo[0])));
491
+ TORCH_CHECK(uplo.size() == 1 && (uplo_uppercase == 'U' || uplo_uppercase == 'L'),
492
+ "Expected UPLO argument to be 'L' or 'U', but got ", uplo);
493
+ }
494
+
495
+ static inline void checkSameDevice(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
496
+ TORCH_CHECK(
497
+ result.device() == input.device(),
498
+ fn_name,
499
+ ": Expected ", result_name, " and input tensors to be on the same device, but got ",
500
+ result_name, " on ", result.device(), " and input on ", input.device());
501
+ }
502
+
503
+ // Check the dtype of result and input tensors (for _out variants).
504
+ // Most linear algebra functions have the same dtype for input and output
505
+ // (either floating or complex type input), so we can check whether input's dtype can be casted to result's dtype.
506
+ // According to https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
507
+ // c10::canCast is used for checking the "safe copy" dtype requirements.
508
+ static inline void checkLinalgCompatibleDtype(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
509
+ bool can_cast = c10::canCast(input.scalar_type(), result.scalar_type());
510
+ TORCH_CHECK(
511
+ can_cast,
512
+ fn_name,
513
+ ": Expected ", result_name, " to be safely castable from ", input.scalar_type(), " dtype, but got ",
514
+ result_name, " with dtype ", result.scalar_type());
515
+ }
516
+
517
+ // Alternatively, we can check whether the specific expected output type (result_type) can be safely casted to out tensor dtype (out_type)
518
+ static inline void checkLinalgCompatibleDtype(const std::string& fn_name, ScalarType out_type, ScalarType result_type, const std::string& out_name = "result") {
519
+ bool can_cast = c10::canCast(result_type, out_type);
520
+ TORCH_CHECK(
521
+ can_cast,
522
+ fn_name,
523
+ ": Expected ", out_name, " to be safely castable from ", result_type, " dtype, but got ",
524
+ out_name, " with dtype ", out_type);
525
+ }
526
+
527
+ static inline void checkNotComplexTolerance(const Tensor& tol, const c10::string_view f_name, const c10::string_view tol_name) {
528
+ TORCH_CHECK(!at::isComplexType(tol.scalar_type()),
529
+ f_name, ": ", tol_name, " tensor of complex type is not supported. Got ", tol.scalar_type());
530
+ }
531
+
532
+ /*
533
+ Two types of 'other' tensors are supported when solving
534
+ a system of linear equations matmul(input, x) = other:
535
+ * 1-dimensional (1D) tensor or batch of 1D tensors (vector case)
536
+ * 2-dimensional (2D) tensor or batch of 2D tensors (matrix case).
537
+ The original torch.solve supported only the matrix case, while NumPy works for both cases.
538
+ For the batched input we need to be able to distinguish them.
539
+ Let input.shape = (batch_dimensions, m, n), then 'other' is of vector type if other.shape == (batch_dimensions, m).
540
+ This rule is compatible with NumPy, see https://github.com/numpy/numpy/blob/v1.20.0/numpy/linalg/linalg.py#L384-L389
541
+ */
542
+ static inline bool linalg_solve_is_vector_rhs(const Tensor& input, const Tensor& other) {
543
+ auto expected_batched_rhs_shape = SymIntArrayRef(input.sym_sizes().data(), input.dim() - 1); // input.shape[:-1]
544
+ bool vector_case = other.dim() == 1 || (input.dim() - 1 == other.dim() && other.sym_sizes().equals(expected_batched_rhs_shape));
545
+ return vector_case;
546
+ }
547
+
548
+ /*
549
+ Computes linear indices for a tensor with original_shape to access its elements like it was a materialized broadcast tensor.
550
+ */
551
+ static inline Tensor get_linear_indices(int64_t numel, IntArrayRef original_shape, IntArrayRef broadcast_shape) {
552
+ TensorOptions options = at::TensorOptions().dtype(at::kLong).device(at::kCPU);
553
+ return at::arange(numel, options).view(original_shape).broadcast_to(broadcast_shape).contiguous();
554
+ }
555
+
556
+ class BroadcastLinearIndices {
557
+ private:
558
+ Tensor linear_indices_;
559
+ bool is_broadcasting_;
560
+
561
+ public:
562
+ BroadcastLinearIndices(
563
+ int64_t numel,
564
+ IntArrayRef original_shape,
565
+ IntArrayRef broadcast_shape) : is_broadcasting_(!original_shape.equals(broadcast_shape)) {
566
+ // The assumption is that the broadcast_shape is a materialized broadcast
567
+ // shape of the original_shape. We need to compute the linear indices
568
+ // compatible with the original_shape to access the elements in the original
569
+ // tensor corresponding to the broadcast tensor.
570
+ if (is_broadcasting_) {
571
+ linear_indices_ =
572
+ get_linear_indices(numel, original_shape, broadcast_shape);
573
+ }
574
+ }
575
+ int64_t operator()(int64_t broadcast_linear_index) {
576
+ return is_broadcasting_
577
+ ? linear_indices_.data_ptr<int64_t>()[broadcast_linear_index]
578
+ : broadcast_linear_index;
579
+ }
580
+ };
581
+
582
+ static inline bool is_blas_compatible_column_major_order(const Tensor& input) {
583
+ IntArrayRef input_strides = input.strides();
584
+ IntArrayRef input_sizes = input.sizes();
585
+ auto ndim = input.dim();
586
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
587
+ if (ndim > 3) {
588
+ return input.transpose(-2, -1).is_contiguous();
589
+ }
590
+ auto leading_dimension = input_strides[ndim - 1];
591
+ auto rows = input_sizes[ndim - 2];
592
+ bool batch_stride_compatible = true;
593
+ if (ndim == 3) {
594
+ auto cols = input_sizes[ndim - 1];
595
+ batch_stride_compatible =
596
+ input_strides[ndim - 3] >= leading_dimension * cols;
597
+ }
598
+ return (input_strides[ndim - 2] == 1) &&
599
+ (leading_dimension >= std::max<int64_t>(1, rows)) &&
600
+ batch_stride_compatible;
601
+ }
602
+
603
+ static inline bool is_blas_compatible_row_major_order(const Tensor& input) {
604
+ IntArrayRef input_strides = input.strides();
605
+ IntArrayRef input_sizes = input.sizes();
606
+ auto ndim = input.dim();
607
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
608
+ if (ndim > 3) {
609
+ return input.is_contiguous();
610
+ }
611
+ auto leading_dimension = input_strides[ndim - 2];
612
+ auto cols = input_sizes[ndim - 1];
613
+ bool batch_stride_compatible = true;
614
+ if (ndim == 3) {
615
+ auto rows = input_sizes[ndim - 2];
616
+ batch_stride_compatible =
617
+ input_strides[ndim - 3] >= leading_dimension * rows;
618
+ }
619
+ return (input_strides[ndim - 1] == 1) &&
620
+ (leading_dimension >= std::max<int64_t>(1, cols)) &&
621
+ batch_stride_compatible;
622
+ }
623
+
624
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/TensorIterator.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm);
9
+ DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub);
10
+
11
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using lstm_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool, bool);
9
+ using rnn_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool, bool);
10
+ using lstm_packed_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool);
11
+ using rnn_packed_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool);
12
+
13
+ DECLARE_DISPATCH(lstm_fn, lstm_cudnn_stub);
14
+ DECLARE_DISPATCH(lstm_fn, lstm_miopen_stub);
15
+ DECLARE_DISPATCH(lstm_fn, lstm_mkldnn_stub);
16
+ DECLARE_DISPATCH(rnn_fn, gru_cudnn_stub);
17
+ DECLARE_DISPATCH(rnn_fn, gru_miopen_stub);
18
+ DECLARE_DISPATCH(rnn_fn, rnn_tanh_cudnn_stub);
19
+ DECLARE_DISPATCH(rnn_fn, rnn_tanh_miopen_stub);
20
+ DECLARE_DISPATCH(rnn_fn, rnn_relu_cudnn_stub);
21
+ DECLARE_DISPATCH(rnn_fn, rnn_relu_miopen_stub);
22
+ DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_cudnn_stub);
23
+ DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_miopen_stub);
24
+ DECLARE_DISPATCH(rnn_packed_fn, gru_packed_cudnn_stub);
25
+ DECLARE_DISPATCH(rnn_packed_fn, gru_packed_miopen_stub);
26
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_cudnn_stub);
27
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_miopen_stub);
28
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_cudnn_stub);
29
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_miopen_stub);
30
+
31
+ inline void check_attributes(const Tensor& input, const TensorList& params, const TensorList& hiddens, bool check_dtype=false) {
32
+ auto input_device = input.device();
33
+ auto input_dtype = input.scalar_type();
34
+
35
+ auto check_tensors = [&](const std::string& name, const Tensor& t) {
36
+ if (!t.defined()) return;
37
+ auto t_device = t.device();
38
+ TORCH_CHECK(input_device == t_device,
39
+ "Input and ", name, " tensors are not at the same device, found input tensor at ",
40
+ input_device, " and ", name, " tensor at ", t_device);
41
+ if (check_dtype) {
42
+ auto t_dtype = t.scalar_type();
43
+ TORCH_CHECK(input_dtype == t_dtype,
44
+ "Input and ", name, " tensors are not the same dtype, found input tensor with ",
45
+ input_dtype, " and ", name, " tensor with ", t_dtype);
46
+ }
47
+ };
48
+
49
+ for (const auto& h : hiddens) check_tensors("hidden", h);
50
+ for (const auto& p : params) check_tensors("parameter", p);
51
+ }
52
+
53
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Optional.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ struct TensorIterator;
13
+ class Tensor;
14
+ }
15
+
16
+ namespace at::native {
17
+
18
+ using reduce_fn = void(*)(TensorIterator &);
19
+
20
+ DECLARE_DISPATCH(reduce_fn, sum_stub);
21
+ DECLARE_DISPATCH(reduce_fn, nansum_stub);
22
+ DECLARE_DISPATCH(reduce_fn, prod_stub);
23
+ DECLARE_DISPATCH(reduce_fn, mean_stub);
24
+ DECLARE_DISPATCH(reduce_fn, and_stub);
25
+ DECLARE_DISPATCH(reduce_fn, or_stub);
26
+ DECLARE_DISPATCH(reduce_fn, min_values_stub);
27
+ DECLARE_DISPATCH(reduce_fn, max_values_stub);
28
+ DECLARE_DISPATCH(reduce_fn, argmax_stub);
29
+ DECLARE_DISPATCH(reduce_fn, argmin_stub);
30
+
31
+ using reduce_std_var_function =
32
+ void (*)(TensorIterator&, double correction, bool take_sqrt);
33
+ DECLARE_DISPATCH(reduce_std_var_function, std_var_stub);
34
+
35
+ using reduce_norm_fn =
36
+ void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional<int64_t>);
37
+ DECLARE_DISPATCH(reduce_norm_fn, norm_kernel);
38
+
39
+ using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&);
40
+ DECLARE_DISPATCH(reduce_fn_flag, norm_stub);
41
+
42
+ using structured_cum_fn = void (*)(const Tensor&, const Tensor&, int64_t);
43
+ using cum_fn = void (*)(Tensor&, const Tensor&, int64_t);
44
+ DECLARE_DISPATCH(structured_cum_fn, cumsum_stub);
45
+ DECLARE_DISPATCH(structured_cum_fn, cumprod_stub);
46
+ DECLARE_DISPATCH(cum_fn, logcumsumexp_stub);
47
+
48
+ DECLARE_DISPATCH(void (*)(const Tensor&, int64_t, bool, Tensor&, Tensor&), aminmax_stub);
49
+ DECLARE_DISPATCH(void (*)(const Tensor&, Tensor&, Tensor&), aminmax_allreduce_stub);
50
+
51
+ // Used in cuda/Normalization.cu
52
+ TORCH_API std::tuple<Tensor&,Tensor&> var_mean_out(
53
+ Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim,
54
+ int64_t correction, bool keepdim);
55
+
56
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/SharedReduceOps.h ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // Please note that this file is
3
+ // used across both CPU and GPU.
4
+
5
+ #include <type_traits>
6
+ #include <complex>
7
+ #include <c10/macros/Macros.h>
8
+ #include <ATen/detail/FunctionTraits.h>
9
+ #include <ATen/NumericUtils.h>
10
+ #if defined(__CUDACC__)
11
+ #include <ATen/cuda/DeviceUtils.cuh>
12
+ #include <ATen/native/cuda/DeviceSqrt.cuh>
13
+ #elif defined(__HIPCC__)
14
+ #include <ATen/hip/DeviceUtils.cuh>
15
+ #include <ATen/native/hip/DeviceSqrt.cuh>
16
+ #endif
17
+ #if defined(__CUDACC__) || defined(__HIPCC__)
18
+ #include <thrust/pair.h>
19
+ #else
20
+ #include <cmath>
21
+ #define device_sqrt std::sqrt
22
+ #endif
23
+ #if defined(__CUDACC__) || defined(__HIPCC__)
24
+ template <typename scalar_t>
25
+ inline C10_DEVICE scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
26
+ #if defined(__HIPCC__)
27
+ // TODO: remove this special case for HIP when issue is fixed:
28
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
29
+ scalar_t max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b));
30
+ #else
31
+ scalar_t max = at::_isnan(b) ? b : std::max(a, b);
32
+ #endif
33
+ return max;
34
+ }
35
+ template <typename scalar_t>
36
+ inline C10_DEVICE scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
37
+ #if defined(__HIPCC__)
38
+ // TODO: remove this special case for HIP when issue is fixed:
39
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
40
+ scalar_t min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b));
41
+ #else
42
+ scalar_t min = at::_isnan(b) ? b : std::min(a, b);
43
+ #endif
44
+ return min;
45
+ }
46
+ #define MAX(X, Y) max_propagate_nan(X,Y)
47
+ #define MIN(X, Y) min_propagate_nan(X,Y)
48
+ #else
49
+ #include <ATen/native/cpu/zmath.h>
50
+ #define MAX(X, Y) max_impl(X,Y)
51
+ #define MIN(X, Y) min_impl(X,Y)
52
+ #endif
53
+
54
+ // ROCM hcc doesn't work well with using std:: in kernel functions
55
+ #if defined(__CUDA_ARCH__)
56
+ #include <c10/cuda/CUDAMathCompat.h>
57
+ #define compat_pow c10::cuda::compat::pow
58
+ #elif defined(__HIPCC__)
59
+ #include <c10/hip/HIPMathCompat.h>
60
+ #define compat_pow c10::hip::compat::pow
61
+ #else
62
+ #define compat_pow std::pow
63
+ #endif
64
+
65
+ namespace at { namespace native {
66
+
67
+ namespace detail {
68
+
69
+ #if defined(__CUDACC__) || defined(__HIPCC__)
70
+ template <typename T1, typename T2> using pair = thrust::pair<T1, T2>;
71
+ #else
72
+ template <typename T1, typename T2> using pair = std::pair<T1, T2>;
73
+ #endif
74
+
75
+ } // namespace detail
76
+
77
+ template <typename scalar_t, typename index_t>
78
+ struct WelfordData {
79
+ scalar_t mean;
80
+ scalar_t m2;
81
+ index_t n;
82
+ scalar_t nf;
83
+
84
+ C10_HOST_DEVICE WelfordData() : mean(0), m2(0), n(0), nf(0) {}
85
+
86
+ C10_HOST_DEVICE WelfordData(
87
+ scalar_t mean,
88
+ scalar_t m2,
89
+ index_t n,
90
+ scalar_t nf)
91
+ : mean(mean), m2(m2), n(n), nf(nf) {}
92
+ };
93
+
94
+
95
+ template <typename scalar_t, typename acc_scalar_t, typename index_t, typename res_t>
96
+ struct WelfordOps {
97
+ acc_scalar_t correction;
98
+ bool take_sqrt;
99
+ public:
100
+ using acc_t = WelfordData<acc_scalar_t, index_t>;
101
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const {
102
+ // We accumulate n in index_t to avoid cumulative rounding error, but still
103
+ // need nf for use in combine where int32 may overflow.
104
+ index_t new_n = acc.n + 1;
105
+ acc_scalar_t new_nf = static_cast<acc_scalar_t>(new_n);
106
+ acc_scalar_t delta = data - acc.mean;
107
+ acc_scalar_t new_mean = acc.mean + delta / new_nf;
108
+ acc_scalar_t new_delta = data - new_mean;
109
+ return {
110
+ new_mean,
111
+ acc.m2 + delta * new_delta,
112
+ new_n,
113
+ new_nf,
114
+ };
115
+ }
116
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
117
+ if (a.nf == 0) {
118
+ return b;
119
+ }
120
+ if (b.nf == 0) {
121
+ return a;
122
+ }
123
+ acc_scalar_t delta = b.mean - a.mean;
124
+ acc_scalar_t new_count = a.nf + b.nf;
125
+ acc_scalar_t nb_over_n = b.nf / new_count;
126
+ return {
127
+ a.mean + delta * nb_over_n,
128
+ a.m2 + b.m2 + delta * delta * a.nf * nb_over_n,
129
+ // setting acc.n as -1 since acc.n might not be able to represent the count
130
+ // correctly within its range, setting it to -1 to avoid confusion
131
+ -1,
132
+ new_count
133
+ };
134
+ }
135
+ inline C10_DEVICE res_t project(acc_t acc) const __ubsan_ignore_float_divide_by_zero__ {
136
+ const auto mean = static_cast<scalar_t>(acc.mean);
137
+ const auto divisor = acc.nf > correction ? acc.nf - correction : 0;
138
+ const auto var = acc.m2 / divisor;
139
+ res_t results(take_sqrt ? device_sqrt(var) : var, mean);
140
+ return results;
141
+ }
142
+
143
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
144
+ return acc;
145
+ }
146
+
147
+ #if defined(__CUDACC__) || defined(__HIPCC__)
148
+ inline __device__ acc_t warp_shfl_down(acc_t acc, int offset) const {
149
+ return {
150
+ WARP_SHFL_DOWN(acc.mean, offset)
151
+ , WARP_SHFL_DOWN(acc.m2, offset)
152
+ , WARP_SHFL_DOWN(acc.n, offset)
153
+ , WARP_SHFL_DOWN(acc.nf, offset)
154
+ };
155
+ }
156
+ #endif
157
+ C10_HOST_DEVICE WelfordOps(acc_scalar_t correction, bool take_sqrt)
158
+ : correction(correction), take_sqrt(take_sqrt) {}
159
+ };
160
+
161
+ template <typename scalar_t, typename acc_t=scalar_t, typename factor_t=acc_t, typename out_t = acc_t>
162
+ struct MeanOps {
163
+ factor_t factor;
164
+
165
+ inline C10_DEVICE acc_t reduce(acc_t a, scalar_t b, int64_t /*idx*/) const {
166
+ return combine(a, static_cast<acc_t>(b));
167
+ }
168
+
169
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
170
+ return a + b;
171
+ }
172
+
173
+ inline C10_DEVICE out_t project(acc_t a) const {
174
+ return a * factor;
175
+ }
176
+
177
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
178
+ return acc;
179
+ }
180
+
181
+ #if defined(__CUDACC__) || defined(__HIPCC__)
182
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const {
183
+ return WARP_SHFL_DOWN(data, offset);
184
+ }
185
+ #endif
186
+
187
+ MeanOps(factor_t factor): factor(factor) {
188
+ }
189
+ };
190
+
191
+ // This accumulator template is used to calculate the minimum absolute value of
192
+ // a set of numbers.
193
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
194
+ // value. These types differ for complex number input support.
195
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
196
+ struct AbsMinOps {
197
+
198
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
199
+ return MIN(acc, static_cast<acc_t>(std::abs(data)));
200
+ }
201
+
202
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
203
+ return MIN(a, b);
204
+ }
205
+
206
+ inline C10_DEVICE out_t project(acc_t a) const {
207
+ return a;
208
+ }
209
+
210
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
211
+ return acc;
212
+ }
213
+
214
+ #if defined(__CUDACC__) || defined(__HIPCC__)
215
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
216
+ return WARP_SHFL_DOWN(acc, offset);
217
+ }
218
+ #endif
219
+ };
220
+
221
+ // This accumulator template is used to calculate the maximum absolute value of
222
+ // a set of numbers.
223
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
224
+ // value. These types differ for complex number input support.
225
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
226
+ struct AbsMaxOps {
227
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
228
+ return MAX(acc, static_cast<acc_t>(std::abs(data)));
229
+ }
230
+
231
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
232
+ return MAX(a, b);
233
+ }
234
+
235
+ inline C10_DEVICE out_t project(acc_t a) const {
236
+ return a;
237
+ }
238
+
239
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
240
+ return acc;
241
+ }
242
+
243
+ #if defined(__CUDACC__) || defined(__HIPCC__)
244
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
245
+ return WARP_SHFL_DOWN(acc, offset);
246
+ }
247
+ #endif
248
+ };
249
+
250
+ // This accumulator template is used to calculate the norm of the absolute value
251
+ // of a set of numbers.
252
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
253
+ // value. These types differ for complex number input support.
254
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
255
+ struct NormOps {
256
+ acc_t norm_;
257
+
258
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
259
+ return acc + compat_pow(static_cast<acc_t>(std::abs(data)), norm_);
260
+ }
261
+
262
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
263
+ return a + b;
264
+ }
265
+
266
+ inline C10_DEVICE out_t project(acc_t a) const {
267
+ return compat_pow(a, static_cast<acc_t>(1.0) / norm_);
268
+ }
269
+
270
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
271
+ return acc;
272
+ }
273
+
274
+ #if defined(__CUDACC__) || defined(__HIPCC__)
275
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
276
+ return WARP_SHFL_DOWN(acc, offset);
277
+ }
278
+ #endif
279
+
280
+ NormOps(acc_t norm_): norm_(norm_) {
281
+ }
282
+ };
283
+
284
+ // This accumulator template is used to calculate the order zero norm of the
285
+ // absolute value of a set of numbers.
286
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
287
+ // value. These types differ for complex number input support.
288
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
289
+ struct NormZeroOps {
290
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
291
+ return acc + (data == static_cast<scalar_t>(0) ? static_cast<acc_t>(0) : static_cast<acc_t>(1));
292
+ }
293
+
294
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
295
+ return a + b;
296
+ }
297
+
298
+ inline C10_DEVICE out_t project(acc_t a) const {
299
+ return a;
300
+ }
301
+
302
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
303
+ return acc;
304
+ }
305
+
306
+
307
+ #if defined(__CUDACC__) || defined(__HIPCC__)
308
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
309
+ return WARP_SHFL_DOWN(acc, offset);
310
+ }
311
+ #endif
312
+ };
313
+
314
+ // This accumulator template is used to calculate the order one norm of the
315
+ // absolute value of a set of numbers.
316
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
317
+ // value. These types differ for complex number input support.
318
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
319
+ struct NormOneOps {
320
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
321
+ return acc + static_cast<acc_t>(std::abs(data));
322
+ }
323
+
324
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
325
+ return a + b;
326
+ }
327
+
328
+ inline C10_DEVICE out_t project(acc_t a) const {
329
+ return a;
330
+ }
331
+
332
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
333
+ return acc;
334
+ }
335
+
336
+ #if defined(__CUDACC__) || defined(__HIPCC__)
337
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
338
+ return WARP_SHFL_DOWN(acc, offset);
339
+ }
340
+ #endif
341
+ };
342
+
343
+
344
+ template<typename acc_t>
345
+ struct AbsSwitch {};
346
+
347
+ template<typename scalar_t, typename acc_t>
348
+ inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch<acc_t>) {
349
+ return static_cast<acc_t>(data);
350
+ }
351
+
352
+ template<typename scalar_t, typename acc_t>
353
+ inline C10_DEVICE acc_t abs_if_complex(std::complex<scalar_t> data, AbsSwitch<acc_t>) {
354
+ return static_cast<acc_t>(std::abs(data));
355
+ }
356
+
357
+ template<typename scalar_t, typename acc_t>
358
+ inline C10_DEVICE acc_t abs_if_complex(c10::complex<scalar_t> data, AbsSwitch<acc_t>) {
359
+ return static_cast<acc_t>(std::abs(data));
360
+ }
361
+
362
+ // This accumulator template is used to calculate the order two norm of the
363
+ // absolute value of a set of numbers.
364
+ // `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
365
+ // value. These types differ for complex number input support.
366
+ template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
367
+ struct NormTwoOps {
368
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
369
+ acc_t data_ = abs_if_complex(data, AbsSwitch<acc_t>());
370
+ return acc + data_ * data_;
371
+ }
372
+
373
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
374
+ return a + b;
375
+ }
376
+
377
+ inline C10_DEVICE out_t project(acc_t a) const {
378
+ return device_sqrt(a);
379
+ }
380
+
381
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
382
+ return acc;
383
+ }
384
+
385
+ #if defined(__CUDACC__) || defined(__HIPCC__)
386
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
387
+ return WARP_SHFL_DOWN(acc, offset);
388
+ }
389
+ #endif
390
+ };
391
+
392
+ template <typename acc_t, typename data_t>
393
+ struct NanSumOps {
394
+ inline C10_DEVICE acc_t reduce(acc_t a, data_t b, int64_t /*idx*/) const {
395
+ return a + (at::_isnan(b) ? acc_t{0.} : acc_t{b});
396
+ }
397
+
398
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
399
+ return a + b;
400
+ }
401
+
402
+ inline C10_DEVICE data_t project(acc_t a) const {
403
+ return data_t{a};
404
+ }
405
+
406
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
407
+ return acc;
408
+ }
409
+
410
+ #if defined(__CUDACC__) || defined(__HIPCC__)
411
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const {
412
+ return WARP_SHFL_DOWN(data, offset);
413
+ }
414
+ #endif
415
+ };
416
+
417
+ namespace detail {
418
+
419
+ template <typename scalar_t>
420
+ struct LessOrNan {
421
+ C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const {
422
+ // If (a == b), then choose the one with lower idx, else min(a, b)
423
+ if (at::_isnan(a)) {
424
+ if (at::_isnan(b)) {
425
+ return idx_a < idx_b;
426
+ }
427
+ return true;
428
+ }
429
+ return (a == b) ? idx_a < idx_b : (a < b);
430
+ }
431
+ };
432
+
433
+ template <typename scalar_t>
434
+ struct GreaterOrNan {
435
+ C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const {
436
+ // If (a == b), then choose the one with lower idx, else max(a, b)
437
+ if (at::_isnan(a)) {
438
+ if (at::_isnan(b)) {
439
+ return idx_a < idx_b;
440
+ }
441
+ return true;
442
+ }
443
+ return (a == b) ? idx_a < idx_b : (a > b);
444
+ }
445
+ };
446
+
447
+ template <typename comp_t>
448
+ struct MinMaxReductionOps {
449
+ using scalar_t = typename binary_function_traits<comp_t>::arg1_t;
450
+ using index_t = int64_t;
451
+ using arg_t = detail::pair<scalar_t, index_t>;
452
+
453
+ static C10_DEVICE arg_t project(arg_t arg) {
454
+ return arg;
455
+ }
456
+
457
+ static C10_DEVICE arg_t reduce(arg_t arg, scalar_t val, int64_t idx) {
458
+ return comp_t{}(arg.first, val, arg.second, idx) ? arg : arg_t(val, idx);
459
+ }
460
+
461
+ static C10_DEVICE arg_t combine(arg_t a, arg_t b) {
462
+ return comp_t{}(a.first, b.first, a.second, b.second) ? a : b;
463
+ }
464
+
465
+ static C10_DEVICE arg_t translate_idx(arg_t a, int64_t base_idx) {
466
+ return {a.first, a.second + base_idx};
467
+ }
468
+
469
+ #if defined(__CUDACC__) || defined(__HIPCC__)
470
+ static C10_DEVICE arg_t warp_shfl_down(arg_t arg, int offset) {
471
+ return arg_t(WARP_SHFL_DOWN(arg.first, offset),
472
+ WARP_SHFL_DOWN(arg.second, offset));
473
+ }
474
+ #endif
475
+ };
476
+
477
+ template <typename comp_t>
478
+ struct ArgReductionOps : public MinMaxReductionOps<comp_t> {
479
+ using typename MinMaxReductionOps<comp_t>::scalar_t;
480
+ using typename MinMaxReductionOps<comp_t>::index_t;
481
+ using typename MinMaxReductionOps<comp_t>::arg_t;
482
+
483
+ static C10_DEVICE index_t project(arg_t arg) {
484
+ return arg.second;
485
+ }
486
+ };
487
+
488
+ } // namespace detail
489
+
490
+ template <typename scalar_t>
491
+ struct ArgMaxOps :
492
+ public detail::ArgReductionOps<detail::GreaterOrNan<scalar_t>> {
493
+ };
494
+
495
+ template <typename scalar_t>
496
+ struct ArgMinOps :
497
+ public detail::ArgReductionOps<detail::LessOrNan<scalar_t>> {
498
+ };
499
+
500
+ template <typename scalar_t>
501
+ struct MinOps :
502
+ public detail::MinMaxReductionOps<detail::LessOrNan<scalar_t>> {
503
+ };
504
+
505
+ template <typename scalar_t>
506
+ struct MaxOps :
507
+ public detail::MinMaxReductionOps<detail::GreaterOrNan<scalar_t>> {
508
+ };
509
+
510
+ template <typename scalar_t, typename acc_scalar_t, typename index_t>
511
+ struct MinMaxOps {
512
+ using acc_t = detail::pair<acc_scalar_t, acc_scalar_t>;
513
+ inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const {
514
+ return combine(acc, {data, data});
515
+ }
516
+
517
+ inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
518
+ auto min_val = (at::_isnan(a.first) || a.first < b.first) ? a.first : b.first;
519
+ auto max_val = (at::_isnan(a.second) || a.second > b.second) ? a.second : b.second;
520
+
521
+ return {min_val, max_val};
522
+ }
523
+
524
+ inline C10_DEVICE acc_t project(acc_t acc) const {
525
+ return acc;
526
+ }
527
+
528
+ static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
529
+ return acc;
530
+ }
531
+
532
+ #if defined(__CUDACC__) || defined(__HIPCC__)
533
+ inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
534
+ return {
535
+ WARP_SHFL_DOWN(acc.first, offset), WARP_SHFL_DOWN(acc.second, offset)
536
+ };
537
+ }
538
+ #endif
539
+ };
540
+
541
+ }} // namespace at::native
542
+
543
+ #undef MAX
544
+ #undef MIN
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/StridedRandomAccessor.h ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at::native {
4
+
5
+ // (Const)StridedRandomAccessor is a
6
+ // (const) random access iterator defined over
7
+ // a strided array.
8
+
9
+ // The traits below are to introduce __restrict__
10
+ // modifier on different platforms.
11
+
12
+ template <typename T>
13
+ struct DefaultPtrTraits {
14
+ using PtrType = T*;
15
+ };
16
+
17
+ #if (defined(_WIN32) || defined(_WIN64))
18
+ #define RESTRICT __restrict
19
+ #else
20
+ #define RESTRICT __restrict__
21
+ #endif
22
+
23
+ template <typename T>
24
+ struct RestrictPtrTraits {
25
+ using PtrType = T* RESTRICT;
26
+ };
27
+
28
+ template <
29
+ typename T,
30
+ typename index_t = int64_t,
31
+ template <typename U> class PtrTraits = DefaultPtrTraits
32
+ >
33
+ class ConstStridedRandomAccessor {
34
+ public:
35
+ using difference_type = index_t;
36
+ using value_type = const T;
37
+ using pointer = const typename PtrTraits<T>::PtrType;
38
+ using reference = const value_type&;
39
+ using iterator_category = std::random_access_iterator_tag;
40
+
41
+ using PtrType = typename PtrTraits<T>::PtrType;
42
+ using index_type = index_t;
43
+
44
+ // Constructors {
45
+ C10_HOST_DEVICE
46
+ ConstStridedRandomAccessor(PtrType ptr, index_t stride)
47
+ : ptr{ptr}, stride{stride}
48
+ {}
49
+
50
+ C10_HOST_DEVICE
51
+ explicit ConstStridedRandomAccessor(PtrType ptr)
52
+ : ptr{ptr}, stride{static_cast<index_t>(1)}
53
+ {}
54
+
55
+ C10_HOST_DEVICE
56
+ ConstStridedRandomAccessor()
57
+ : ptr{nullptr}, stride{static_cast<index_t>(1)}
58
+ {}
59
+ // }
60
+
61
+ // Pointer-like operations {
62
+ C10_HOST_DEVICE
63
+ reference operator*() const {
64
+ return *ptr;
65
+ }
66
+
67
+ C10_HOST_DEVICE
68
+ const value_type* operator->() const {
69
+ return reinterpret_cast<const value_type*>(ptr);
70
+ }
71
+
72
+ C10_HOST_DEVICE
73
+ reference operator[](index_t idx) const {
74
+ return ptr[idx * stride];
75
+ }
76
+ // }
77
+
78
+ // Prefix/postfix increment/decrement {
79
+ C10_HOST_DEVICE
80
+ ConstStridedRandomAccessor& operator++() {
81
+ ptr += stride;
82
+ return *this;
83
+ }
84
+
85
+ C10_HOST_DEVICE
86
+ ConstStridedRandomAccessor operator++(int) {
87
+ ConstStridedRandomAccessor copy(*this);
88
+ ++*this;
89
+ return copy;
90
+ }
91
+
92
+ C10_HOST_DEVICE
93
+ ConstStridedRandomAccessor& operator--() {
94
+ ptr -= stride;
95
+ return *this;
96
+ }
97
+
98
+ C10_HOST_DEVICE
99
+ ConstStridedRandomAccessor operator--(int) {
100
+ ConstStridedRandomAccessor copy(*this);
101
+ --*this;
102
+ return copy;
103
+ }
104
+ // }
105
+
106
+ // Arithmetic operations {
107
+ C10_HOST_DEVICE
108
+ ConstStridedRandomAccessor& operator+=(index_t offset) {
109
+ ptr += offset * stride;
110
+ return *this;
111
+ }
112
+
113
+ C10_HOST_DEVICE
114
+ ConstStridedRandomAccessor operator+(index_t offset) const {
115
+ return ConstStridedRandomAccessor(ptr + offset * stride, stride);
116
+ }
117
+
118
+ C10_HOST_DEVICE
119
+ friend ConstStridedRandomAccessor operator+(
120
+ index_t offset,
121
+ const ConstStridedRandomAccessor& accessor
122
+ ) {
123
+ return accessor + offset;
124
+ }
125
+
126
+ C10_HOST_DEVICE
127
+ ConstStridedRandomAccessor& operator-=(index_t offset) {
128
+ ptr -= offset * stride;
129
+ return *this;
130
+ }
131
+
132
+ C10_HOST_DEVICE
133
+ ConstStridedRandomAccessor operator-(index_t offset) const {
134
+ return ConstStridedRandomAccessor(ptr - offset * stride, stride);
135
+ }
136
+
137
+ // Note that this operator is well-defined when `this` and `other`
138
+ // represent the same sequences, i.e. when
139
+ // 1. this.stride == other.stride,
140
+ // 2. |other - this| / this.stride is an Integer.
141
+ C10_HOST_DEVICE
142
+ difference_type operator-(const ConstStridedRandomAccessor& other) const {
143
+ return (ptr - other.ptr) / stride;
144
+ }
145
+ // }
146
+
147
+ // Comparison operators {
148
+ C10_HOST_DEVICE
149
+ bool operator==(const ConstStridedRandomAccessor& other) const {
150
+ return (ptr == other.ptr) && (stride == other.stride);
151
+ }
152
+
153
+ C10_HOST_DEVICE
154
+ bool operator!=(const ConstStridedRandomAccessor& other) const {
155
+ return !(*this == other);
156
+ }
157
+
158
+ C10_HOST_DEVICE
159
+ bool operator<(const ConstStridedRandomAccessor& other) const {
160
+ return ptr < other.ptr;
161
+ }
162
+
163
+ C10_HOST_DEVICE
164
+ bool operator<=(const ConstStridedRandomAccessor& other) const {
165
+ return (*this < other) || (*this == other);
166
+ }
167
+
168
+ C10_HOST_DEVICE
169
+ bool operator>(const ConstStridedRandomAccessor& other) const {
170
+ return !(*this <= other);
171
+ }
172
+
173
+ C10_HOST_DEVICE
174
+ bool operator>=(const ConstStridedRandomAccessor& other) const {
175
+ return !(*this < other);
176
+ }
177
+ // }
178
+
179
+ protected:
180
+ PtrType ptr;
181
+ index_t stride;
182
+ };
183
+
184
+ template <
185
+ typename T,
186
+ typename index_t = int64_t,
187
+ template <typename U> class PtrTraits = DefaultPtrTraits
188
+ >
189
+ class StridedRandomAccessor
190
+ : public ConstStridedRandomAccessor<T, index_t, PtrTraits> {
191
+ public:
192
+ using difference_type = index_t;
193
+ using value_type = T;
194
+ using pointer = typename PtrTraits<T>::PtrType;
195
+ using reference = value_type&;
196
+
197
+ using BaseType = ConstStridedRandomAccessor<T, index_t, PtrTraits>;
198
+ using PtrType = typename PtrTraits<T>::PtrType;
199
+
200
+ // Constructors {
201
+ C10_HOST_DEVICE
202
+ StridedRandomAccessor(PtrType ptr, index_t stride)
203
+ : BaseType(ptr, stride)
204
+ {}
205
+
206
+ C10_HOST_DEVICE
207
+ explicit StridedRandomAccessor(PtrType ptr)
208
+ : BaseType(ptr)
209
+ {}
210
+
211
+ C10_HOST_DEVICE
212
+ StridedRandomAccessor()
213
+ : BaseType()
214
+ {}
215
+ // }
216
+
217
+ // Pointer-like operations {
218
+ C10_HOST_DEVICE
219
+ reference operator*() const {
220
+ return *this->ptr;
221
+ }
222
+
223
+ C10_HOST_DEVICE
224
+ value_type* operator->() const {
225
+ return reinterpret_cast<value_type*>(this->ptr);
226
+ }
227
+
228
+ C10_HOST_DEVICE
229
+ reference operator[](index_t idx) const {
230
+ return this->ptr[idx * this->stride];
231
+ }
232
+ // }
233
+
234
+ // Prefix/postfix increment/decrement {
235
+ C10_HOST_DEVICE
236
+ StridedRandomAccessor& operator++() {
237
+ this->ptr += this->stride;
238
+ return *this;
239
+ }
240
+
241
+ C10_HOST_DEVICE
242
+ StridedRandomAccessor operator++(int) {
243
+ StridedRandomAccessor copy(*this);
244
+ ++*this;
245
+ return copy;
246
+ }
247
+
248
+ C10_HOST_DEVICE
249
+ StridedRandomAccessor& operator--() {
250
+ this->ptr -= this->stride;
251
+ return *this;
252
+ }
253
+
254
+ C10_HOST_DEVICE
255
+ StridedRandomAccessor operator--(int) {
256
+ StridedRandomAccessor copy(*this);
257
+ --*this;
258
+ return copy;
259
+ }
260
+ // }
261
+
262
+ // Arithmetic operations {
263
+ C10_HOST_DEVICE
264
+ StridedRandomAccessor& operator+=(index_t offset) {
265
+ this->ptr += offset * this->stride;
266
+ return *this;
267
+ }
268
+
269
+ C10_HOST_DEVICE
270
+ StridedRandomAccessor operator+(index_t offset) const {
271
+ return StridedRandomAccessor(this->ptr + offset * this->stride, this->stride);
272
+ }
273
+
274
+ C10_HOST_DEVICE
275
+ friend StridedRandomAccessor operator+(
276
+ index_t offset,
277
+ const StridedRandomAccessor& accessor
278
+ ) {
279
+ return accessor + offset;
280
+ }
281
+
282
+ C10_HOST_DEVICE
283
+ StridedRandomAccessor& operator-=(index_t offset) {
284
+ this->ptr -= offset * this->stride;
285
+ return *this;
286
+ }
287
+
288
+ C10_HOST_DEVICE
289
+ StridedRandomAccessor operator-(index_t offset) const {
290
+ return StridedRandomAccessor(this->ptr - offset * this->stride, this->stride);
291
+ }
292
+
293
+ // Note that here we call BaseType::operator- version
294
+ C10_HOST_DEVICE
295
+ difference_type operator-(const BaseType& other) const {
296
+ return (static_cast<const BaseType&>(*this) - other);
297
+ }
298
+ // }
299
+ };
300
+
301
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace c10 {
6
+ class Scalar;
7
+ }
8
+
9
+ namespace at {
10
+ class Tensor;
11
+ struct TensorIterator;
12
+ struct TensorIteratorBase;
13
+ }
14
+
15
+ namespace at::native {
16
+
17
+ using reduce_minmax_fn =
18
+ void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool);
19
+ using structured_reduce_minmax_fn =
20
+ void (*)(const Tensor&, const Tensor&, const Tensor&, int64_t, bool);
21
+
22
+ DECLARE_DISPATCH(structured_reduce_minmax_fn, max_stub);
23
+ DECLARE_DISPATCH(structured_reduce_minmax_fn, min_stub);
24
+
25
+ using where_fn = void (*)(TensorIterator &);
26
+ DECLARE_DISPATCH(where_fn, where_kernel);
27
+
28
+ using is_infinity_op_fn = void (*)(TensorIteratorBase &);
29
+ DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub);
30
+ DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub);
31
+
32
+ using mode_fn = void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool);
33
+ DECLARE_DISPATCH(mode_fn, mode_stub);
34
+
35
+ using clamp_tensor_fn = void (*)(TensorIteratorBase &);
36
+ DECLARE_DISPATCH(clamp_tensor_fn, clamp_stub);
37
+
38
+ namespace detail {
39
+ enum class ClampLimits {Min, Max, MinMax};
40
+ }
41
+
42
+ DECLARE_DISPATCH(void (*)(TensorIteratorBase &, const c10::Scalar&, const c10::Scalar&), clamp_scalar_stub);
43
+ DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_min_scalar_stub);
44
+ DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_max_scalar_stub);
45
+
46
+ using isin_default_fn = void (*)(const Tensor&, const Tensor&, bool, const Tensor&);
47
+ DECLARE_DISPATCH(isin_default_fn, isin_default_stub);
48
+
49
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorFactories.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/EmptyTensor.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <ATen/Dispatch.h>
7
+ #include <ATen/native/DispatchStub.h>
8
+
9
+ #ifndef AT_PER_OPERATOR_HEADERS
10
+ #include <ATen/Functions.h>
11
+ #else
12
+ #include <ATen/ops/scalar_tensor.h>
13
+ #endif
14
+
15
+ namespace at::native {
16
+ // Different combinations of row, col, and offset can lead to two cases:
17
+ //
18
+ // Case 1 - Trapezoid (Triangle as a special case): row + offset <= col
19
+ // Example A: offset > 0
20
+ // 1 1 0 0 0
21
+ // 1 1 1 0 0
22
+ // 1 1 1 1 0
23
+ // Example B: offset <= 0
24
+ // 0 0 0
25
+ // 1 0 0
26
+ // 1 1 0
27
+ // In this case, we calculate the number of elements in the first row and
28
+ // last row of the tril respectively, and then compute the tril size.
29
+ //
30
+ // Case 2 - Trapezoid + Rectangle: row + offset > col
31
+ // Example:
32
+ // 1 1 0
33
+ // 1 1 1
34
+ // 1 1 1
35
+ // In this case, we first calculate the size of top trapezoid, and then
36
+ // calculate the size of the bottom rectangle.
37
+ inline int64_t get_tril_size(int64_t row, int64_t col, int64_t offset) {
38
+ // If either dimension is 0 then the there is no tril
39
+ if (row == 0 || col == 0) {
40
+ return 0;
41
+ }
42
+ // number of elements in the first row of the tril
43
+ auto m_first_row = offset > 0 ?
44
+ std::min<int64_t>(col, 1 + offset) : // upper bounded by col
45
+ row + offset > 0; // either 0 or 1
46
+ // number of elements in the last row of the tril, bounded by [0, col]
47
+ auto m_last_row = std::max<int64_t>(0, std::min<int64_t>(col, row + offset));
48
+ // number of rows, bounded by [0, row]
49
+ auto n_row_all = std::max<int64_t>(0, std::min<int64_t>(row, row + offset));
50
+ auto n_row_trapezoid = (m_last_row - m_first_row + 1);
51
+
52
+ // calculate # of elements in the top trapezoid
53
+ auto tril_size = (m_first_row + m_last_row) * n_row_trapezoid >> 1;
54
+
55
+ // calculate # of elements in the bottom rectangle if there is any
56
+ auto diff_row = n_row_all - n_row_trapezoid;
57
+ if (diff_row > 0) {
58
+ tril_size += diff_row * col;
59
+ }
60
+
61
+ return tril_size;
62
+ }
63
+
64
+ inline void check_args(
65
+ int64_t row, int64_t col, c10::optional<Layout> layout_opt) {
66
+ TORCH_CHECK(row >= 0, "row must be non-negative, got", row);
67
+ TORCH_CHECK(col >= 0, "col must be non-negative, got", col);
68
+ if (layout_opt.has_value()) {
69
+ TORCH_CHECK(
70
+ *layout_opt == at::kStrided,
71
+ "only support layout=torch.strided, got",
72
+ *layout_opt)
73
+ }
74
+ }
75
+
76
+ using at::check_size_nonnegative;
77
+
78
+ // assumes maximum value in created tensor is n-1 (e.g., torch.randperm(n))
79
+ inline void check_supported_max_int_with_precision(int64_t n, const Tensor& tensor) {
80
+ // match defined() to behavior of checks below
81
+ TORCH_CHECK(at::scalar_tensor(n>0?n-1:n, tensor.options()).defined(),
82
+ "n is too large for result tensor type: '", tensor.toString(), "'");
83
+
84
+ // Ensure sufficient precision for floating point representation.
85
+ switch (tensor.scalar_type()) {
86
+ case at::ScalarType::Half:
87
+ TORCH_CHECK(n <= (int64_t(1) << 11) + 1, "n cannot be greater than 2049 for Half type.");
88
+ break;
89
+ case at::ScalarType::Float:
90
+ TORCH_CHECK(n <= (int64_t(1) << 24) + 1, "n cannot be greater than 2^24+1 for Float type.");
91
+ break;
92
+ case at::ScalarType::Double: // Unlikely to happen, but doesn't hurt to check
93
+ TORCH_CHECK(n <= (int64_t(1) << 53) + 1, "n cannot be greater than 2^53+1 for Double type.");
94
+ break;
95
+ default:
96
+ break;
97
+ }
98
+ }
99
+
100
+ // Called by `empty*` functions when deterministic algorithms are enabled to
101
+ // fill the tensor with NaN if it is floating point or complex type, or fill
102
+ // with max value if it is integer type
103
+ inline Tensor& fill_empty_deterministic_(Tensor& tensor) {
104
+ if (tensor.is_floating_point() || tensor.is_complex()) {
105
+ AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
106
+ kBFloat16, kHalf, tensor.scalar_type(), "fill_empty_deterministic_", [&]() {
107
+ tensor.fill_(std::numeric_limits<scalar_t>::quiet_NaN());
108
+ });
109
+ } else {
110
+ AT_DISPATCH_INTEGRAL_TYPES_AND(
111
+ kBool, tensor.scalar_type(), "fill_empty_deterministic_", [&]() {
112
+ tensor.fill_(std::numeric_limits<scalar_t>::max());
113
+ });
114
+ }
115
+ return tensor;
116
+ }
117
+
118
+ // The ZeroTensor allocator ignores whatever allocation is requested and always
119
+ // gives you nullptr
120
+ struct ZeroTensorAllocator final : public at::Allocator {
121
+ ZeroTensorAllocator(at::Device device) : device_(device) {};
122
+ ~ZeroTensorAllocator() override = default;
123
+ static void deleter(void* const pointer) {
124
+ TORCH_INTERNAL_ASSERT(!pointer);
125
+ }
126
+ DataPtr allocate(const size_t /*nbytes*/) const override {
127
+ return {nullptr, nullptr, &deleter, device_};
128
+ }
129
+ DeleterFnPtr raw_deleter() const override {
130
+ return deleter;
131
+ }
132
+ at::Device device_;
133
+ };
134
+
135
+ using binary_fn = void (*)(TensorIterator&);
136
+
137
+ DECLARE_DISPATCH(binary_fn, complex_stub);
138
+ DECLARE_DISPATCH(binary_fn, polar_stub);
139
+
140
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorIteratorDynamicCasting.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <complex>
4
+ #include <type_traits>
5
+ #include <c10/core/ScalarType.h>
6
+ #include <c10/util/C++17.h>
7
+ #include <ATen/detail/FunctionTraits.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+
10
+
11
+ // This file includes utilties for dynamic_casting done by TensorIterator, see CUDALoops.cuh and Loops.h.
12
+
13
+ // dynamic_casting handles when the types expected by the iterator do not match the types of the arguments
14
+ // to the function that is being called.
15
+ // On CUDA, the cast is currently pushed down into the kernel (for performance reasons).
16
+ // On CPU, there is currently an internal assert that a dynamic_cast is not needed.
17
+
18
+ namespace at::native {
19
+
20
+ // `needs_dynamic_casting` compares the types expected by iterator
21
+ // (i.e. dtypes of the operands) with the actual type of the arguments
22
+ // (and returns) of func_t
23
+ template<typename func_t, int nargs=function_traits<func_t>::arity>
24
+ struct needs_dynamic_casting {
25
+ static bool check(TensorIteratorBase& iter) {
26
+ using traits = function_traits<func_t>;
27
+ using cpp_type = typename traits::template arg<nargs - 1>::type;
28
+ using cpp_map = c10::CppTypeToScalarType<cpp_type>;
29
+
30
+ if (iter.input_dtype(nargs-1) != cpp_map::value) {
31
+ return true;
32
+ }
33
+ return needs_dynamic_casting<func_t, nargs - 1>::check(iter);
34
+ }
35
+ };
36
+
37
+ template<typename func_t>
38
+ struct needs_dynamic_casting<func_t, 0> {
39
+ static bool check(TensorIteratorBase& iter) {
40
+ using traits = function_traits<func_t>;
41
+ using cpp_type = typename traits::result_type;
42
+
43
+ // we could assert output numbers are correct here, but checks
44
+ // (including arity) are currently pushed outside of this struct.
45
+ if constexpr (std::is_void_v<cpp_type>) {
46
+ return false;
47
+ } else {
48
+ return iter.dtype(0) != c10::CppTypeToScalarType<cpp_type>::value;
49
+ }
50
+ }
51
+ };
52
+
53
+ } //namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/TensorShape.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <c10/util/irange.h>
4
+ #include <ATen/core/IListRef.h>
5
+
6
+ namespace at::native {
7
+
8
+ TORCH_API at::Tensor clone_preserve_strides(const at::Tensor& self);
9
+
10
+ inline bool cat_should_skip_tensor(const Tensor& t) {
11
+ return t.numel() == 0 && t.dim() == 1;
12
+ }
13
+
14
+ // Check to see if the shape of tensors is compatible
15
+ // for being concatenated along a given dimension.
16
+ inline void check_cat_shape_except_dim(const Tensor & first, const Tensor & second, int64_t dimension, int64_t index) {
17
+ int64_t first_dims = first.dim();
18
+ int64_t second_dims = second.dim();
19
+ TORCH_CHECK(first_dims == second_dims, "Tensors must have same number of dimensions: got ",
20
+ first_dims, " and ", second_dims);
21
+ for (const auto dim : c10::irange(first_dims)) {
22
+ if (dim == dimension) {
23
+ continue;
24
+ }
25
+ int64_t first_dim_size = first.sizes()[dim];
26
+ int64_t second_dim_size = second.sizes()[dim];
27
+ TORCH_CHECK(first_dim_size == second_dim_size, "Sizes of tensors must match except in dimension ",
28
+ dimension, ". Expected size ", static_cast<long long>(first_dim_size), " but got size ", static_cast<long long>(second_dim_size), " for tensor number ", index, " in the list.");
29
+ }
30
+ }
31
+
32
+ inline void check_cat_no_zero_dim(const MaterializedITensorListRef& tensors) {
33
+ int64_t i = 0;
34
+ for(const Tensor& t : tensors) {
35
+ TORCH_CHECK(t.dim() > 0,
36
+ "zero-dimensional tensor (at position ", i, ") cannot be concatenated");
37
+ i++;
38
+ }
39
+ }
40
+
41
+ inline int64_t get_num_splits(const Tensor& self, int64_t split_size, int64_t dim) {
42
+ TORCH_CHECK(self.dim() != 0, "split expects at least a 1-dimensional tensor");
43
+ TORCH_CHECK(split_size >= 0, "split expects split_size be non-negative, but got split_size=", split_size);
44
+ int64_t dim_size = self.size(dim);
45
+ TORCH_CHECK(split_size > 0 || dim_size == 0,
46
+ "split_size can only be 0 if dimension size is 0, "
47
+ "but got dimension size of ", dim_size);
48
+ // if split_size is 0 and dimension size is 0, there is 1 split.
49
+ int64_t num_splits = 1;
50
+ if (split_size != 0) {
51
+ // ensuring num_splits is at least 1 makes consistent the case where split_size > dim_size
52
+ // (returns a single split). We might want to error here, but keep it for BC.
53
+ num_splits = std::max<int64_t>((dim_size + split_size - 1) / split_size, 1);
54
+ }
55
+ return num_splits;
56
+ }
57
+
58
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/UnfoldBackward.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/TensorIterator.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+ #include <ATen/native/NonEmptyUtils.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/Functions.h>
10
+ #else
11
+ #include <ATen/ops/arange.h>
12
+ #endif
13
+
14
+ namespace at::native {
15
+
16
+ using unfold_backward_fn = void (*)(
17
+ Tensor& grad_in,
18
+ const Tensor& grad,
19
+ int64_t dim,
20
+ int64_t size,
21
+ int64_t step
22
+ );
23
+
24
+ DECLARE_DISPATCH(unfold_backward_fn, unfold_backward_stub);
25
+
26
+ namespace {
27
+
28
+ // Note on naming: it is unconventional.
29
+ // grad_in does not mean that it is a gradient wrt to input,
30
+ // grad_in/grad_out is just an input/output of unfold_backward kernel.
31
+
32
+ static C10_UNUSED TensorIterator _make_unfold_backward_iter_over_grad_out(
33
+ Tensor& grad_out,
34
+ const Tensor& grad_in,
35
+ int64_t dim,
36
+ int64_t size,
37
+ int64_t step
38
+ ) {
39
+ dim = maybe_wrap_dim(dim, grad_out.dim());
40
+ // last dim stores the folds
41
+
42
+ auto grad_out_dim_size = ensure_nonempty_size(grad_out, dim);
43
+ auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim);
44
+ // dictates the number of elements to iterate over
45
+ // in dimension `dim`
46
+ auto iter_dim_size = std::min(
47
+ grad_out_dim_size,
48
+ (grad_in_dim_size - 1) * step + size
49
+ );
50
+
51
+ /* prepare grad_out for TensorIterator { */
52
+ auto grad_out_strides = ensure_nonempty_vec(grad_out.strides().vec());
53
+ auto grad_out_sizes = ensure_nonempty_vec(grad_out.sizes().vec());
54
+ grad_out_sizes[dim] = iter_dim_size;
55
+ auto grad_out_restrided = grad_out.as_strided(
56
+ grad_out_sizes, grad_out_strides
57
+ );
58
+ /* } */
59
+
60
+ /* prepare grad_in for TensorIterator { */
61
+ auto grad_in_strides = ensure_nonempty_vec(grad_in.strides().vec());
62
+ auto grad_in_sizes = ensure_nonempty_vec(grad_in.sizes().vec());
63
+
64
+ // set strides for dim to 0
65
+ // and size to 1 because
66
+ // this dimension is indexed inside the kernel
67
+ grad_in_strides[dim] = 0;
68
+ grad_in_sizes[dim] = 1;
69
+
70
+ grad_in_strides.pop_back();
71
+ grad_in_sizes.pop_back();
72
+
73
+ auto grad_in_restrided = grad_in.squeeze(-1).as_strided(
74
+ grad_in_sizes, grad_in_strides
75
+ );
76
+ /* } */
77
+
78
+ // During the TensorIterator iteration we have to know
79
+ // i_dim in grad_out[i_1,...,i_dim,...i_n],
80
+ // idx_dim stores this information
81
+ /* prepare idx_dim for TensorIterator { */
82
+ auto idx_dim = at::arange(
83
+ 0, iter_dim_size, grad_in.options().dtype(at::kLong)
84
+ );
85
+
86
+ auto grad_out_dim = ensure_nonempty_dim(grad_out.dim());
87
+
88
+ auto idx_dim_strides = std::vector<int64_t>(grad_out_dim, 0);
89
+ auto idx_dim_sizes = std::vector<int64_t>(grad_out_dim, 1);
90
+
91
+ idx_dim_strides[dim] = 1;
92
+ idx_dim_sizes[dim] = iter_dim_size;
93
+
94
+ // idx_dim size will broadcast over determined by grad_out sizes in TensorIterator
95
+ auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides);
96
+ /* } */
97
+
98
+ auto iter = TensorIteratorConfig()
99
+ .set_check_mem_overlap(false)
100
+ .check_all_same_dtype(false)
101
+ .resize_outputs(false)
102
+ .add_owned_output(grad_out_restrided)
103
+ .add_owned_input(grad_in_restrided)
104
+ .add_owned_input(idx_dim_restrided)
105
+ .build();
106
+
107
+ return iter;
108
+ }
109
+
110
+ }
111
+
112
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ChannelShuffleKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at { namespace native {
10
+
11
+ using channel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t);
12
+ DECLARE_DISPATCH(channel_shuffle_fn, channel_shuffle_kernel);
13
+
14
+ }} // at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DepthwiseConvKernel.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/ArrayRef.h>
5
+
6
+ /*
7
+ Depthwise 3x3 Winograd convolution operator
8
+ */
9
+
10
+ namespace at {
11
+ class Tensor;
12
+
13
+ namespace native {
14
+
15
+ using convolution_depthwise3x3_winograd_fn =
16
+ Tensor (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t);
17
+
18
+ DECLARE_DISPATCH(convolution_depthwise3x3_winograd_fn, convolution_depthwise3x3_winograd_stub);
19
+
20
+ } // namespace native
21
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/GridSamplerKernel.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ #include <array>
6
+ #include <cstdint>
7
+
8
+ namespace at {
9
+ class TensorBase;
10
+ }
11
+
12
+ namespace at { namespace native {
13
+
14
+ using forward_2d_fn = void (*) (
15
+ const TensorBase &output,
16
+ const TensorBase &input,
17
+ const TensorBase &grid,
18
+ int64_t interpolation_mode,
19
+ int64_t padding_mode,
20
+ bool align_corners);
21
+ using backward_2d_fn = void (*) (
22
+ const TensorBase &grad_input,
23
+ const TensorBase &grad_grid,
24
+ const TensorBase &grad_output,
25
+ const TensorBase &input,
26
+ const TensorBase &grid,
27
+ int64_t interpolation_mode,
28
+ int64_t padding_mode,
29
+ bool align_corners,
30
+ std::array<bool, 2> output_mask);
31
+ DECLARE_DISPATCH(forward_2d_fn, grid_sampler_2d_cpu_kernel);
32
+ DECLARE_DISPATCH(backward_2d_fn, grid_sampler_2d_backward_cpu_kernel);
33
+
34
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IndexKernelUtils.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/TensorIterator.h>
3
+ #include <c10/util/irange.h>
4
+
5
+ namespace at {
6
+ namespace native {
7
+
8
+ namespace {
9
+ static bool is_constant_index(int ntensor, const int64_t* strides) {
10
+ AT_ASSERT(ntensor >= 3);
11
+ for (const auto arg : c10::irange(2, ntensor)) {
12
+ if (strides[arg] != 0) {
13
+ return false;
14
+ }
15
+ }
16
+ return true;
17
+ }
18
+
19
+
20
+ struct Indexer {
21
+ Indexer(int64_t num_indexers, char** indexers, const int64_t* indexer_strides,
22
+ IntArrayRef original_sizes, IntArrayRef original_strides)
23
+ : num_indexers(num_indexers)
24
+ , indexers(indexers)
25
+ , indexer_strides(indexer_strides)
26
+ , original_strides(original_strides.data())
27
+ , original_sizes(original_sizes.data()) {
28
+ AT_ASSERT(static_cast<int64_t>(original_strides.size()) == num_indexers);
29
+ AT_ASSERT(static_cast<int64_t>(original_sizes.size()) == num_indexers);
30
+ }
31
+
32
+ int64_t num_indexers;
33
+ char** indexers;
34
+ const int64_t* indexer_strides;
35
+ const int64_t* original_strides;
36
+ const int64_t* original_sizes;
37
+
38
+ int64_t get(int64_t idx) {
39
+ int64_t offset = 0;
40
+ for (const auto j : c10::irange(num_indexers)) {
41
+ int64_t value = *(int64_t*)&indexers[j][idx * indexer_strides[j]];
42
+ int64_t size = original_sizes[j];
43
+ TORCH_CHECK_INDEX(value >= -size && value < size,
44
+ "index ", value, " is out of bounds for dimension ", j, " with size ", size);
45
+ if (value < 0) {
46
+ value += size;
47
+ }
48
+ offset += value * original_strides[j];
49
+ }
50
+ return offset;
51
+ }
52
+ };
53
+ } // anonymous namespace
54
+
55
+ template <typename scalar_t, typename func_t>
56
+ void cpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride,
57
+ const func_t& f, bool serial_execution=false)
58
+ {
59
+ int ntensor = iter.ntensors();
60
+ // When launch the index parallel version, set a relative small grain size less than the INTERNAL::GRAIN_SIZE
61
+ // to make the whole available thread numbers get more balanced work load and a better cache location.
62
+ // The grain size here is chosen by the op benchmark to overcome the thread launch overhead
63
+ const int index_parallel_grain_size = 3000;
64
+ auto loop = [&](char** data, const int64_t* strides, int64_t n) {
65
+ auto indexer = Indexer(ntensor - 2, &data[2], &strides[2], index_size, index_stride);
66
+ char* dst = data[0];
67
+ char* src = data[1];
68
+ if (is_constant_index(ntensor, strides)) {
69
+ // specialization for when every element uses the same index
70
+ int64_t offset = indexer.get(0);
71
+ for (const auto i : c10::irange(n)) {
72
+ f(dst + strides[0] * i, src + strides[1] * i, offset);
73
+ }
74
+ } else {
75
+ for (const auto i : c10::irange(n)) {
76
+ int64_t offset = indexer.get(i);
77
+ f(dst + strides[0] * i, src + strides[1] * i, offset);
78
+ }
79
+ }
80
+ };
81
+ if (serial_execution) {
82
+ iter.serial_for_each(loop, {0, iter.numel()});
83
+ } else {
84
+ iter.for_each(loop, index_parallel_grain_size);
85
+ }
86
+ }
87
+ } // at
88
+ } // native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Intrinsics.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
4
+ /* Clang-compatible compiler, targeting x86/x86-64 */
5
+ #include <x86intrin.h>
6
+ #elif defined(_MSC_VER)
7
+ /* Microsoft C/C++-compatible compiler */
8
+ #include <intrin.h>
9
+ #if _MSC_VER <= 1900
10
+ #define _mm256_extract_epi64(X, Y) (((uint64_t*)&X)[Y])
11
+ #endif
12
+ #elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
13
+ /* GCC-compatible compiler, targeting x86/x86-64 */
14
+ #include <x86intrin.h>
15
+ #elif defined(__GNUC__) && defined(__ARM_NEON__)
16
+ /* GCC-compatible compiler, targeting ARM with NEON */
17
+ #include <arm_neon.h>
18
+ #elif defined(__GNUC__) && defined(__IWMMXT__)
19
+ /* GCC-compatible compiler, targeting ARM with WMMX */
20
+ #include <mmintrin.h>
21
+ #elif (defined(__GNUC__) || defined(__xlC__)) && \
22
+ (defined(__VEC__) || defined(__ALTIVEC__))
23
+ /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
24
+ #include <altivec.h>
25
+ /* We need to undef those tokens defined by <altivec.h> to avoid conflicts
26
+ with the C++ types. => Can still use __bool/__vector */
27
+ #undef bool
28
+ #undef vector
29
+ #undef pixel
30
+ #elif defined(__GNUC__) && defined(__SPE__)
31
+ /* GCC-compatible compiler, targeting PowerPC with SPE */
32
+ #include <spe.h>
33
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/LogAddExp.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/complex.h>
4
+ #include <ATen/NumericUtils.h>
5
+
6
+ namespace at { namespace native {
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ // custom min and max to be used in logcumsumexp for complex arguments
10
+ template <typename scalar_t>
11
+ std::pair<c10::complex<scalar_t>, c10::complex<scalar_t>> _logcumsumexp_minmax(c10::complex<scalar_t> x, c10::complex<scalar_t> y) {
12
+ if (at::_isnan(y)) { // either real is nan or imag is nan
13
+ return std::make_pair(y, y);
14
+ } else if (at::_isnan(x)) { // either real is nan or imag is nan
15
+ return std::make_pair(x, x);
16
+ } else {
17
+ return (x.real() < y.real()) ? std::make_pair(x, y) : std::make_pair(y, x);
18
+ }
19
+ }
20
+
21
+ template <typename scalar_t>
22
+ scalar_t _log_add_exp_helper(scalar_t x, scalar_t y) {
23
+ // Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
24
+ scalar_t min = at::_isnan(y) ? y : std::min(x, y); // std::min returns first arg if one of the args is nan
25
+ scalar_t max = at::_isnan(y) ? y : std::max(x, y); // std::max returns first arg if one of the args is nan
26
+ if (min != max || std::isfinite(min)) {
27
+ // nan will be propagated here
28
+ return std::log1p(std::exp(min - max)) + max;
29
+ } else {
30
+ // special case to correctly handle infinite cases
31
+ return x;
32
+ }
33
+ }
34
+
35
+ template <typename scalar_t>
36
+ c10::complex<scalar_t> _log_add_exp_helper(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
37
+ auto [min, max] = _logcumsumexp_minmax<scalar_t>(x, y);
38
+ auto min_real = std::real(min);
39
+ auto max_real = std::real(max);
40
+
41
+ if (at::_isnan(min)) { // either real is nan or imag is nan
42
+ // handling the "infectious" NaNs
43
+ return {std::numeric_limits<scalar_t>::quiet_NaN(), std::numeric_limits<scalar_t>::quiet_NaN()};
44
+ } else if (!std::isfinite(min_real) && (min_real == max_real)) {
45
+ if (min_real < 0) {
46
+ // handle the -inf case, the imaginary part here does not really matter as the exp(value)
47
+ // will be around 0.0 and the angle (i.e. the imaginary part) cannot be determined.
48
+ // It does not matter if we're taking the exp of this value
49
+ return min;
50
+ } else {
51
+ // handle the +inf case, we don't need the special precision for log1p for small values
52
+ // and to avoid producing nan in case of real(max) == real(min) == +inf
53
+ return std::log(std::exp(min) + std::exp(max));
54
+ }
55
+ } else {
56
+ return std::log1p(std::exp(min - max)) + max;
57
+ }
58
+ }
59
+
60
+ } // end namespace
61
+ }} //end at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Loops.h ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides two functions to help write elementwise kernels:
4
+ //
5
+ // cpu_kernel(TensorIterator iter, <lambda>)
6
+ // cpu_kernel_vec(TensorIterator iter, <lambda>, <vec_lambda>)
7
+ //
8
+ // Both functions may generate vectorized code. The cpu_kernel implementation
9
+ // relies on the compiler's auto-vectorization. The cpu_kernel_vec
10
+ // implementation uses x86 SIMD intrinsics when available. These functions
11
+ // are only intended to be used in the ATen/native/cpu subdirectory, since files
12
+ // in other directories are not compiled with AVX/AVX2 enabled. See README.md
13
+ // for more details.
14
+ //
15
+ // For example, to write a multiplication kernel for float:
16
+ //
17
+ // cpu_kernel(iter, [](float a, float b) { return a * b; });
18
+ //
19
+ // Or you may write:
20
+ //
21
+ // cpu_kernel_vec(iter,
22
+ // [](float a, float b) { return a * b; },
23
+ // [](Vectorized<float> a, Vectorized<float> b) { return a * b; });
24
+ //
25
+ // See BinaryOpsKernel.cpp for the complete implementation
26
+ //
27
+ //
28
+
29
+ #include <stdint.h>
30
+ #include <c10/util/C++17.h>
31
+ #include <c10/util/Load.h>
32
+ #include <c10/util/irange.h>
33
+ #include <ATen/detail/FunctionTraits.h>
34
+ #include <ATen/native/cpu/IsContiguous.h>
35
+ #include <ATen/native/TensorIterator.h>
36
+ #include <ATen/native/TensorIteratorDynamicCasting.h>
37
+ #include <ATen/cpu/vec/vec.h>
38
+
39
+ #include <utility>
40
+
41
+ namespace at { namespace native { inline namespace CPU_CAPABILITY {
42
+
43
+ using namespace vec;
44
+
45
+ template <typename traits, std::size_t... INDEX>
46
+ typename traits::ArgsTuple
47
+ dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i,
48
+ std::index_sequence<INDEX...>) {
49
+ return std::make_tuple(
50
+ c10::load<typename traits::template arg<INDEX>::type>(
51
+ data[INDEX] + i * strides[INDEX])...);
52
+ }
53
+
54
+ template <typename traits>
55
+ typename traits::ArgsTuple
56
+ dereference(char* C10_RESTRICT data[], const int64_t* strides, int64_t i) {
57
+ using Indices = std::make_index_sequence<traits::arity>;
58
+ return dereference_impl<traits>(data, strides, i, Indices{});
59
+ }
60
+
61
+ template <typename traits, std::size_t... INDEX>
62
+ typename traits::ArgsTuple
63
+ dereference_vec_impl(char* C10_RESTRICT data[],
64
+ const typename traits::result_type& opt_scalar,
65
+ size_t S,
66
+ int64_t i,
67
+ std::index_sequence<INDEX...>) {
68
+ using Vec = typename traits::result_type;
69
+ using scalar_t = typename Vec::value_type;
70
+ return std::make_tuple(
71
+ S == INDEX + 1 ?
72
+ opt_scalar :
73
+ Vec::loadu(data[INDEX] + i * sizeof(scalar_t))...);
74
+ }
75
+
76
+ template <typename traits>
77
+ typename traits::ArgsTuple
78
+ dereference_vec(char* C10_RESTRICT data[], const typename traits::result_type& opt_scalar, size_t S, int64_t i) {
79
+ using Indices = std::make_index_sequence<traits::arity>;
80
+ return dereference_vec_impl<traits>(data, opt_scalar, S, i, Indices{});
81
+ }
82
+
83
+ template <typename func_t,
84
+ typename std::enable_if<!std::is_void<typename function_traits<func_t>::result_type>::value>::type* = nullptr>
85
+ static inline void
86
+ execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) {
87
+ using traits = function_traits<func_t>;
88
+ using result_type = typename traits::result_type;
89
+ for (; i < n; i++) {
90
+ result_type* out_ptr = (result_type*)(data[0] + i * strides[0]);
91
+ *out_ptr = c10::guts::apply(std::forward<func_t>(op), dereference<traits>(
92
+ &data[1],
93
+ &strides[1],
94
+ i));
95
+ }
96
+ }
97
+
98
+ template <typename func_t,
99
+ typename std::enable_if<std::is_void<typename function_traits<func_t>::result_type>::value>::type* = nullptr>
100
+ static inline void
101
+ execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) {
102
+ using traits = function_traits<func_t>;
103
+ for (; i < n; i++) {
104
+ c10::guts::apply(std::forward<func_t>(op), dereference<traits>(
105
+ &data[0],
106
+ &strides[0],
107
+ i));
108
+ }
109
+ }
110
+
111
+ // Basic loop operation (one output, N inputs). May be auto-vectorized
112
+ // by the compiler. Supports inputs and outputs of different types.
113
+ template <typename func_t>
114
+ static inline void
115
+ basic_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) {
116
+ using traits = function_traits<func_t>;
117
+ constexpr int ntensors = traits::arity + 1;
118
+
119
+ // Copying strides to temporary array helps auto vectorization in older GCC
120
+ // versions.
121
+ int64_t strides[ntensors];
122
+ for (const auto arg : c10::irange(ntensors)) {
123
+ strides[arg] = strides_[arg];
124
+ }
125
+
126
+ execute_op(data, strides, i, n, std::forward<func_t>(op));
127
+ }
128
+
129
+ // the recursive variadic template for iterating over the returned tuple
130
+ template<class T, size_t N>
131
+ struct TupleOutput {
132
+ static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i,
133
+ const T &tuple) {
134
+ TupleOutput<T, N - 1>::handle(data, strides, i, tuple);
135
+
136
+ auto output = std::get<N - 1>(tuple);
137
+ using output_type = decltype(output);
138
+ output_type * out_ptr = (output_type *)(data[N - 1] + i * strides[N - 1]);
139
+ *out_ptr = output;
140
+ }
141
+ };
142
+
143
+ // Base case for the above recursive template
144
+ template<class T>
145
+ struct TupleOutput<T, 1> {
146
+ static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i,
147
+ const T &tuple) {
148
+ auto output = std::get<0>(tuple);
149
+ using output_type = decltype(output);
150
+ output_type* out_ptr = (output_type *)(data[0] + i * strides[0]);
151
+ *out_ptr = output;
152
+ }
153
+ };
154
+
155
+ template<class... Args>
156
+ void handle_tuple_outputs(char* C10_RESTRICT data[],
157
+ const int64_t* strides,
158
+ int64_t i,
159
+ const std::tuple<Args...> &tuple) {
160
+ TupleOutput<decltype(tuple), sizeof...(Args)>::handle(data, strides, i, tuple);
161
+ }
162
+
163
+ // Loop operation for `cpu_kernel_multiple_outputs`.
164
+ // 1. Use `c10::guts::apply` to make dynamic method invocation
165
+ // for the lambda passed in `cpu_kernel_multiple_outputs`.
166
+ // 2. Iterate over the members of the returned tuple, set the corresponding
167
+ // output tensor by the tuple member in `handle_tuple_outputs` function.
168
+ template <typename func_t>
169
+ static inline void
170
+ multiple_outputs_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) {
171
+ using traits = function_traits<func_t>;
172
+
173
+ using result_type = typename traits::result_type;
174
+ constexpr int num_outputs = std::tuple_size<result_type>::value;
175
+ constexpr int ntensors = traits::arity + num_outputs;
176
+
177
+ // Copying strides to temporary array helps auto vectorization in older GCC
178
+ // versions.
179
+ int64_t strides[ntensors];
180
+ for (const auto arg : c10::irange(ntensors)) {
181
+ strides[arg] = strides_[arg];
182
+ }
183
+
184
+ for (; i < n; i++) {
185
+ auto output = c10::guts::apply(op, dereference<traits>(
186
+ &data[num_outputs],
187
+ &strides[num_outputs],
188
+ i));
189
+ handle_tuple_outputs(data, strides, i, output);
190
+ }
191
+ }
192
+
193
+ // Explicitly vectorized loop implementation. All inputs and outputs must be
194
+ // the same type and contiguous with one exception: a single input may be
195
+ // a scalar (stride 0). It's position is indicated by the argument `S`. If `S`
196
+ // is 0, then there are no scalar inputs.
197
+ template <typename func_t, typename vec_func_t>
198
+ static inline void
199
+ vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, vec_func_t&& vop) {
200
+ using traits = function_traits<vec_func_t>;
201
+ using scalar_t = typename function_traits<func_t>::result_type;
202
+ using Vec = Vectorized<scalar_t>;
203
+ constexpr int ntensors = traits::arity + 1;
204
+
205
+ char* C10_RESTRICT data[ntensors];
206
+ for (const auto arg : c10::irange(ntensors)) {
207
+ data[arg] = data_[arg];
208
+ }
209
+
210
+ Vec opt_scalar = Vec(S > 0 ? *(scalar_t*)data[S] : scalar_t(0));
211
+ int64_t i = 0;
212
+ for (; i <= n - 2 * Vec::size(); i += 2 * Vec::size()) {
213
+ auto args1 = dereference_vec<traits>(&data[1], opt_scalar, S, i);
214
+ auto args2 = dereference_vec<traits>(&data[1], opt_scalar, S, i + Vec::size());
215
+ auto out1 = c10::guts::apply(std::forward<vec_func_t>(vop), std::move(args1));
216
+ auto out2 = c10::guts::apply(std::forward<vec_func_t>(vop), std::move(args2));
217
+ out1.store(data[0] + i * sizeof(scalar_t));
218
+ out2.store(data[0] + (i + Vec::size()) * sizeof(scalar_t));
219
+ }
220
+ if (i < n) {
221
+ int64_t strides[ntensors];
222
+ for (const auto arg : c10::irange(ntensors)) {
223
+ strides[arg] = (S > 0 && arg == S) ? 0 : sizeof(scalar_t);
224
+ }
225
+ basic_loop(data, strides, i, n, std::forward<func_t>(op));
226
+ }
227
+ }
228
+
229
+
230
+ template <typename traits, typename cb_t>
231
+ static inline void unroll_contiguous_scalar_checks(
232
+ const int64_t* /*strides*/,
233
+ std::index_sequence<>,
234
+ cb_t&& cb) {
235
+ cb(0);
236
+ }
237
+
238
+ template <typename traits, typename cb_t, size_t INDEX0, size_t ...INDEX>
239
+ static inline void unroll_contiguous_scalar_checks(
240
+ const int64_t* strides,
241
+ std::index_sequence<INDEX0, INDEX...>,
242
+ cb_t&& cb) {
243
+ if (is_contiguous_scalar<traits, INDEX0 + 1>(strides)) {
244
+ cb(INDEX0 + 1);
245
+ } else {
246
+ unroll_contiguous_scalar_checks<traits>(strides, std::index_sequence<INDEX...>{}, std::forward<cb_t>(cb));
247
+ }
248
+ }
249
+
250
+ template <typename op_t, typename vop_t>
251
+ struct VectorizedLoop2d {
252
+ op_t op;
253
+ vop_t vop;
254
+
255
+ using traits = function_traits<op_t>;
256
+ static constexpr int ntensors = traits::arity + 1;
257
+ using data_t = std::array<char*, ntensors>;
258
+
259
+ VectorizedLoop2d(const op_t &op, vop_t vop):
260
+ op(op), vop(std::move(vop)) {}
261
+
262
+ static void advance(data_t &data, const int64_t *outer_strides) {
263
+ for (const auto arg : c10::irange(data.size())) {
264
+ data[arg] += outer_strides[arg];
265
+ }
266
+ }
267
+
268
+ void operator()(char** base, const int64_t *strides, int64_t size0, int64_t size1) {
269
+ data_t data;
270
+ std::copy_n(base, ntensors, data.data());
271
+ const int64_t *outer_strides = &strides[ntensors];
272
+
273
+ if (is_contiguous<traits>(strides)) {
274
+ for (const auto i C10_UNUSED : c10::irange(size1)) {
275
+ vectorized_loop(data.data(), size0, 0, op, vop);
276
+ advance(data, outer_strides);
277
+ }
278
+ } else {
279
+ using Indices = std::make_index_sequence<traits::arity>;
280
+ unroll_contiguous_scalar_checks<traits>(strides, Indices{}, [&](size_t idx) {
281
+ if (idx) {
282
+ for (const auto i C10_UNUSED : c10::irange(size1)) {
283
+ vectorized_loop(data.data(), size0, idx, op, vop);
284
+ advance(data, outer_strides);
285
+ }
286
+ } else {
287
+ for (const auto i C10_UNUSED : c10::irange(size1)) {
288
+ basic_loop(data.data(), strides, 0, size0, op);
289
+ advance(data, outer_strides);
290
+ }
291
+ }
292
+ });
293
+ }
294
+ }
295
+ };
296
+
297
+ template <typename op_t, typename vop_t>
298
+ VectorizedLoop2d<op_t, vop_t> make_vectorized_loop2d(
299
+ const op_t &op, const vop_t &vop) {
300
+ return VectorizedLoop2d<op_t, vop_t>(op, vop);
301
+ }
302
+
303
+ template <typename func_t>
304
+ void cpu_kernel(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) {
305
+ using traits = function_traits<func_t>;
306
+ // this could be extended to work with void return types
307
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
308
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
309
+ // dynamic casting not currently supported on CPU
310
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
311
+
312
+ iter.for_each([&](char** data, const int64_t* strides, int64_t n) {
313
+ // basic loop can handle 1d slices with arbitrary strides, and 1d slices is all that
314
+ // iter.for_each is ever sending to the loop lambda
315
+ basic_loop(data, strides, 0, n, std::forward<func_t>(op));
316
+ }, grain_size);
317
+ iter.cast_outputs();
318
+ }
319
+
320
+ // This function helps write elementwise kernels that requires multiple outputs.
321
+ // It follows the similar structure of cpu_kernel.
322
+ // Instead of `basic_loop` function, a new `multiple_outputs_loop` function is
323
+ // manipulated to handle multiple return values.
324
+ // For now `needs_dynamic_casting` check is not added as the passed lambda (`func_t`)
325
+ // of `multiple_outputs_loop` returns `std::tuple` instead of `scalar_t`.
326
+ // The `gpu_kernel_multiple_outputs` is also implemented without this check,
327
+ // We could extend `needs_dynamic_casting` to support both `std::tuple` and
328
+ // `thrust::tuple` in the future.
329
+ template <typename func_t>
330
+ void cpu_kernel_multiple_outputs(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) {
331
+ using traits = function_traits<func_t>;
332
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
333
+
334
+ iter.for_each([&](char** data, const int64_t* strides, int64_t n) {
335
+ multiple_outputs_loop(data, strides, 0, n, std::forward<func_t>(op));
336
+ }, grain_size);
337
+ iter.cast_outputs();
338
+ }
339
+
340
+ template <bool check_dynamic_cast=true, typename func_t, typename vec_func_t>
341
+ void cpu_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, int64_t grain_size = at::internal::GRAIN_SIZE) {
342
+ using traits = function_traits<func_t>;
343
+ // this could be extended to work with void return types
344
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
345
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
346
+ // dynamic casting not currently supported on CPU, but some kernels (like Fill)
347
+ // explicitly dynamic_cast, so we give the opt-out of checking.
348
+ if constexpr (check_dynamic_cast) {
349
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
350
+ }
351
+
352
+ iter.for_each(make_vectorized_loop2d(op, vop), grain_size);
353
+ iter.cast_outputs();
354
+ }
355
+
356
+ template <typename func_t>
357
+ void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op, const Range& range) {
358
+ using traits = function_traits<func_t>;
359
+ constexpr bool result_void = std::is_void<typename traits::result_type>::value;
360
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity &&
361
+ ((result_void && iter.noutputs() == 0) || (!result_void && iter.noutputs() == 1)));
362
+ // dynamic casting not currently supported on CPU
363
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
364
+
365
+ iter.serial_for_each([&](char** data, const int64_t* strides, int64_t n) {
366
+ basic_loop(data, strides, 0, n, std::forward<func_t>(op));
367
+ }, range);
368
+ iter.cast_outputs();
369
+ }
370
+
371
+ template <typename func_t>
372
+ void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op) {
373
+ cpu_serial_kernel(iter, op, {0, iter.numel()});
374
+ }
375
+
376
+ template <typename func_t, typename vec_func_t>
377
+ void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, const Range& range) {
378
+ using traits = function_traits<func_t>;
379
+ // this could be extended to work with void return types
380
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
381
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
382
+ // dynamic casting not currently supported on CPU
383
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
384
+
385
+ iter.serial_for_each(make_vectorized_loop2d(op, vop), range);
386
+ iter.cast_outputs();
387
+ }
388
+
389
+ template <typename func_t, typename vec_func_t>
390
+ void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop) {
391
+ cpu_serial_kernel_vec(iter, op, vop, {0, iter.numel()});
392
+ }
393
+
394
+ }}} // namespace at::native::<anonymous>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/MaxUnpoolKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+
4
+ namespace at {
5
+ class Tensor;
6
+
7
+ namespace native {
8
+
9
+ using max_unpooling_fn = void(*)(Tensor&, const Tensor&, const Tensor&);
10
+
11
+ DECLARE_DISPATCH(max_unpooling_fn, max_unpool2d_kernel);
12
+ DECLARE_DISPATCH(max_unpooling_fn, max_unpool3d_kernel);
13
+
14
+ }} // at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/PixelShuffleKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at { namespace native {
9
+
10
+ using pixel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t);
11
+ DECLARE_DISPATCH(pixel_shuffle_fn, pixel_shuffle_kernel);
12
+ DECLARE_DISPATCH(pixel_shuffle_fn, pixel_unshuffle_kernel);
13
+
14
+ }} // at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Reduce.h ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/cpu/Loops.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <c10/util/TypeList.h>
6
+ #include <c10/core/Scalar.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ #include <sstream>
10
+
11
+ namespace at { namespace native { inline namespace CPU_CAPABILITY {
12
+
13
+ using namespace vec;
14
+
15
+ #define VEC_LOOP_HEADER(func_t, data) \
16
+ using scalar_t = typename function_traits<func_t>::result_type; \
17
+ using Vec = Vectorized<scalar_t>; \
18
+ char* out_ptr = data[0]; \
19
+ (void) out_ptr;
20
+
21
+ // reduction that is contiguous over the input in dim 0
22
+ template <typename traits>
23
+ static inline bool is_contiguous_reduction(const int64_t* strides) {
24
+ return strides[0] == 0 &&
25
+ strides[1] == sizeof(typename traits::arg2_t);
26
+ }
27
+
28
+ // reduction that is contiguous over the input in dim 1
29
+ template <typename traits>
30
+ static inline bool is_outer_reduction(const int64_t* strides) {
31
+ return strides[0] == 0 &&
32
+ strides[2] == sizeof(typename traits::result_type) &&
33
+ strides[3] == sizeof(typename traits::arg2_t);
34
+ }
35
+
36
+ template <typename func_t, typename vec_func_t>
37
+ static inline void vectorized_reduction(char** data, int64_t n, int64_t stride,
38
+ func_t op, vec_func_t vop, bool reduce) {
39
+ VEC_LOOP_HEADER(func_t, data)
40
+ const char* in1_ptr = data[1];
41
+ Vec acc[4];
42
+ for (const auto j : c10::irange(4)) {
43
+ acc[j] = Vec::loadu(in1_ptr + j * Vec::size() * sizeof(scalar_t));
44
+ }
45
+ for (const auto i : c10::irange(1, n)) {
46
+ const char* ptr = in1_ptr + stride * i;
47
+ acc[0] = vop(acc[0], Vec::loadu(ptr + (0 * Vec::size() * sizeof(scalar_t))));
48
+ acc[1] = vop(acc[1], Vec::loadu(ptr + (1 * Vec::size() * sizeof(scalar_t))));
49
+ acc[2] = vop(acc[2], Vec::loadu(ptr + (2 * Vec::size() * sizeof(scalar_t))));
50
+ acc[3] = vop(acc[3], Vec::loadu(ptr + (3 * Vec::size() * sizeof(scalar_t))));
51
+ }
52
+ if (reduce) {
53
+ scalar_t buffer[Vec::size()];
54
+ acc[0] = vop(vop(acc[0], acc[1]), vop(acc[2], acc[3]));
55
+ acc[0].store(buffer);
56
+ for (const auto j : c10::irange(1, Vec::size())) {
57
+ buffer[0] = op(buffer[0], buffer[j]);
58
+ }
59
+ auto dst = (scalar_t*)out_ptr;
60
+ *dst = op(*dst, buffer[0]);
61
+ } else {
62
+ for (const auto j : c10::irange(4)) {
63
+ auto dst = out_ptr + j * Vec::size() * sizeof(scalar_t);
64
+ acc[j] = vop(acc[j], Vec::loadu(dst));
65
+ acc[j].store(dst);
66
+ }
67
+ }
68
+ }
69
+
70
+ template <typename F>
71
+ static inline void UNARY_OUTER_LOOP(char* data[2], const int64_t strides[2], int64_t n, F f) {
72
+ for (const auto j C10_UNUSED : c10::irange(n)) {
73
+ f();
74
+ data[0] += strides[0];
75
+ data[1] += strides[1];
76
+ }
77
+ }
78
+
79
+ // computes the reduction out = op(out, in)
80
+ template <typename func_t, typename vec_func_t>
81
+ static inline void vectorized_inner_reduction(char** data, int64_t n, func_t op, vec_func_t vop) {
82
+ VEC_LOOP_HEADER(func_t, data)
83
+ int64_t vector_stride = 4 * Vec::size() * sizeof(scalar_t);
84
+ int64_t count = n / (4 * Vec::size());
85
+ if (count > 0) {
86
+ vectorized_reduction(data, count, vector_stride, op, vop, /*reduce=*/true);
87
+ }
88
+ char* ptrs[3] = { data[0], data[0], data[1] };
89
+ int64_t strides[] = { 0, 0, sizeof(scalar_t) };
90
+ basic_loop(ptrs, strides, count * 4 * Vec::size(), n, op);
91
+ }
92
+
93
+ // computes the reduction out = op(out, in)
94
+ template <typename func_t, typename vec_func_t>
95
+ static inline void vectorized_outer_reduction(char** data, int64_t inner_stride, int64_t size0, int64_t size1, func_t op, vec_func_t vop) {
96
+ VEC_LOOP_HEADER(func_t, data)
97
+
98
+ // reduce down each column of 4 * Vec::size() elements (128 or 256 bytes)
99
+ #if defined(CPU_CAPABILITY_AVX512)
100
+ int64_t outer_stride[2] = { 256, 256 };
101
+ #else
102
+ int64_t outer_stride[2] = { 128, 128 };
103
+ #endif
104
+ UNARY_OUTER_LOOP(data, outer_stride, size1 / (4 * Vec::size()), [&] {
105
+ vectorized_reduction(data, size0, inner_stride, op, vop, /*reduce=*/false);
106
+ });
107
+
108
+ // reduce down the remaining columns
109
+ int64_t step[] = { sizeof(scalar_t), sizeof(scalar_t) };
110
+ int64_t remaining = size1 % (4 * Vec::size());
111
+ UNARY_OUTER_LOOP(data, step, remaining, [&] {
112
+ char* ptrs[3] = { data[0], data[0], data[1] };
113
+ int64_t strides[] = { 0, 0, inner_stride };
114
+ basic_loop(ptrs, strides, 0, size0, op);
115
+ });
116
+ }
117
+
118
+ template<typename traits, typename res_t>
119
+ static void set_result(const int index, const res_t result, const TensorIteratorBase &iter, const int num_outputs) {
120
+ // static_assert(std::is_same<res_t, typename traits::arg2_t>::value, "data types must match");
121
+ if (index < num_outputs) {
122
+ char *out = (char *) iter.data_ptr(index);
123
+ *(res_t *) out = result;
124
+ }
125
+ }
126
+
127
+ template<typename traits, typename res_t>
128
+ static void set_results(const res_t result, const TensorIteratorBase &iter, const int num_outputs) {
129
+ AT_ASSERT(num_outputs == 1);
130
+ set_result<traits>(0, result, iter, num_outputs);
131
+ }
132
+
133
+ template<typename traits, std::size_t i = 0, typename... tuple_t>
134
+ static inline typename std::enable_if<i == sizeof...(tuple_t), std::size_t>::type
135
+ for_each_in_tuple(const std::tuple<tuple_t...>& /*t*/, const TensorIteratorBase& /*iter*/, const int /*num_outputs*/) {
136
+ return i;
137
+ }
138
+
139
+ template<typename traits, std::size_t i = 0, typename... tuple_t>
140
+ static inline typename std::enable_if<i < sizeof...(tuple_t), std::size_t>::type
141
+ for_each_in_tuple(const std::tuple<tuple_t...>& t, const TensorIteratorBase &iter, const int num_outputs) {
142
+ if (i < (size_t)num_outputs) {
143
+ set_result<traits>(i, std::get<i>(t), iter, num_outputs);
144
+ return for_each_in_tuple<traits, i + 1, tuple_t...>(t, iter, num_outputs);
145
+ }
146
+ return i;
147
+ }
148
+
149
+ template<typename traits, typename... res_t>
150
+ static void set_results(const std::tuple<res_t...>& result, const TensorIteratorBase &iter, const int num_outputs) {
151
+ AT_ASSERT(num_outputs >= 1);
152
+ std::size_t result_size = for_each_in_tuple<traits>(result, iter, num_outputs);
153
+ AT_ASSERT((size_t)num_outputs == result_size);
154
+ }
155
+
156
+ template <typename T, typename... Args>
157
+ struct all_same : guts::conjunction<
158
+ std::is_same<T, Args>...
159
+ > {};
160
+
161
+ // data_t is the input/output data type.
162
+ // acc_t is a type that contains all the necessary data
163
+ // to continue reducing.
164
+ // index_t is a one-dimensional index
165
+ //
166
+ // ops_t is such that &ops_t::reduce, &ops_t::combine, and &ops_t::project exist and satisfy
167
+ // the following.
168
+ // reduce: (acc_t, data_t, index_t) -> acc_t adds one data point to the accumulated value.
169
+ // combine: (acc_t, acc_t) -> acc_t combines two accumulated values into one.
170
+ // project: acc_t -> out_t finishes the reduction, getting the required output.
171
+ //
172
+ // Additionally, acc_t must be default-constructible:
173
+ // acc_t {} is an identity for combine,
174
+ // and project(acc_t {}) is the value of the operation on zero elements.
175
+ //
176
+ // The point of `combine` is to support parallelization -
177
+ // the idea is to one sequence of `reduce` calls per thread of execution,
178
+ // and then to combine them at the end with `combine`.
179
+ //
180
+ // If there is more than one output element,
181
+ // our parallelization strategy is to use one thread for each of them,
182
+ // which means that `combine` will never be called.
183
+ //
184
+ // If, on the other hand, there is only one, then we split the input into
185
+ // into several pieces, reduce each separately, and then combine them.
186
+
187
+ template <typename ops_t, typename init_t>
188
+ void binary_kernel_reduce(TensorIteratorBase& iter, ops_t ops, init_t init) {
189
+ using rf_t = decltype(&ops_t::reduce);
190
+ using cf_t = decltype(&ops_t::combine);
191
+ using pf_t = decltype(&ops_t::project);
192
+ using r_traits = binary_function_traits<rf_t>;
193
+ using c_traits = binary_function_traits<cf_t>;
194
+ using p_traits = unary_function_traits<pf_t>;
195
+ using acc_t = typename p_traits::arg1_t;
196
+ using data_t = typename r_traits::arg2_t;
197
+ static_assert(
198
+ all_same<
199
+ acc_t,
200
+ init_t,
201
+ typename r_traits::arg1_t,
202
+ typename r_traits::result_type,
203
+ typename c_traits::arg1_t,
204
+ typename c_traits::arg2_t,
205
+ typename c_traits::result_type>::value,
206
+ "all accumulate types must match");
207
+ static_assert(
208
+ std::is_default_constructible<acc_t>::value,
209
+ "the accumulate type must be default-constructible"
210
+ );
211
+ const int num_outputs = iter.noutputs();
212
+ iter.foreach_reduced_elt([&ops, &init, num_outputs](TensorIteratorBase &sub_iter) {
213
+ auto reduction_body = [&ops, &sub_iter, num_outputs](acc_t acc, int64_t begin, int64_t end) -> acc_t {
214
+ int ntensors = sub_iter.ntensors();
215
+ sub_iter.serial_for_each([&acc, &ops, num_outputs, ntensors, begin](char** data, const int64_t* strides, int64_t size) {
216
+ AT_ASSERT(ntensors - num_outputs == 1);
217
+ char *in = data[ntensors - 1];
218
+ int64_t stride = strides[ntensors - 1];
219
+ for (const auto i : c10::irange(size)) {
220
+ acc = ops.reduce(acc, c10::load<data_t>(in), begin + i);
221
+ in += stride;
222
+ }
223
+ }, {begin, end});
224
+ return ops.translate_idx(acc, sub_iter.view_offsets()[0]);
225
+ };
226
+ acc_t total_acc = init;
227
+ auto numel = sub_iter.numel();
228
+ if (numel < at::internal::GRAIN_SIZE || at::get_num_threads() == 1 ||
229
+ at::in_parallel_region()) {
230
+ total_acc = reduction_body(total_acc, 0, numel);
231
+ } else {
232
+ int max_threads = at::get_num_threads();
233
+ AT_ASSERT(max_threads > 0);
234
+ static_assert(
235
+ !std::is_same<acc_t, bool>::value,
236
+ "Concurrently modifying different references into std::vector<bool> is UB."
237
+ );
238
+ std::vector<acc_t> buffer((unsigned)max_threads, init);
239
+ at::parallel_for(0, numel, internal::GRAIN_SIZE,
240
+ [&](int64_t begin, int64_t end) {
241
+ auto& acc = buffer[at::get_thread_num()];
242
+ acc = reduction_body(acc, begin, end);
243
+ }
244
+ );
245
+ for (const auto i : c10::irange(max_threads)) {
246
+ total_acc = ops.combine(total_acc, buffer[i]);
247
+ }
248
+ }
249
+ set_results<r_traits>(ops.project(total_acc), sub_iter, num_outputs);
250
+ });
251
+ }
252
+
253
+ template <typename func_t, typename vec_func_t>
254
+ void binary_kernel_reduce_vec(TensorIteratorBase& iter, func_t op, vec_func_t vop, double ident = 0) {
255
+ using traits = binary_function_traits<func_t>;
256
+ static_assert(
257
+ all_same<
258
+ typename traits::result_type,
259
+ typename traits::arg1_t,
260
+ typename traits::arg2_t>::value,
261
+ "all types must match");
262
+
263
+ iter.output_base().fill_(ident);
264
+ iter.parallel_reduce([&](char** data, const int64_t* strides, int64_t size0, int64_t size1) {
265
+ int64_t outer_strides[] = { strides[2], strides[3] };
266
+ if (is_contiguous_reduction<traits>(strides)) {
267
+ // input is contiguous in dim 0, output is reduced in dim 0
268
+ UNARY_OUTER_LOOP(data, outer_strides, size1, [&] {
269
+ vectorized_inner_reduction(data, size0, op, vop);
270
+ });
271
+ } else if (is_outer_reduction<traits>(strides)) {
272
+ // input and output are contiguous in dim 1
273
+ int64_t inner_stride = strides[1]; // stride of input in dim 0
274
+ vectorized_outer_reduction(data, inner_stride, size0, size1, op, vop);
275
+ } else {
276
+ UNARY_OUTER_LOOP(data, outer_strides, size1, [&] {
277
+ char* ptrs[3] = { data[0], data[0], data[1] };
278
+ int64_t inner_strides[3] = { strides[0], strides[0], strides[1] };
279
+ basic_loop(ptrs, inner_strides, 0, size0, op);
280
+ });
281
+ }
282
+ });
283
+ }
284
+
285
+ // when reduction is on most inner dimension (dim 0 in TensorIterator)
286
+ // and input has contiguous most inner dimension, `binary_kernel_reduce_lastdim`
287
+ // can be used.
288
+ static inline bool is_reduce_lastdim(TensorIteratorBase& iter) {
289
+ return iter.num_reduce_dims() == 1 && iter.is_dim_reduced(0)
290
+ && iter.ninputs() == 1 && iter.strides(1)[0] == iter.element_size(1);
291
+ }
292
+
293
+ template <typename reduce_func_t>
294
+ void binary_kernel_reduce_lastdim(TensorIteratorBase& iter, reduce_func_t reduce_op) {
295
+ auto shape = iter.shape();
296
+ int64_t dim_size = shape[0];
297
+ int64_t grain_size = std::max((int64_t) 1, at::internal::GRAIN_SIZE / dim_size);
298
+ TensorIterator sub_iter(iter);
299
+ // create sub iterator to parallel on all non-reduce-dims
300
+ sub_iter.narrow(0, 0, 1);
301
+ auto loop = [&](char** data, const int64_t* strides, int64_t size) {
302
+ char* out = data[0];
303
+ char* in = data[1];
304
+ for (int64_t i = 0; i < size; ++i) {
305
+ reduce_op(out, in, dim_size);
306
+ out += strides[0];
307
+ in += strides[1];
308
+ }
309
+ };
310
+ sub_iter.for_each(loop, grain_size);
311
+ }
312
+
313
+ }}} // namespace at::native::<anonymous>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SampledAddmmKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ using sampled_addmm_sparse_csr_fn = void(*)(const Tensor&, const Tensor&, const Scalar&, const Scalar&, const Tensor&);
9
+
10
+ DECLARE_DISPATCH(sampled_addmm_sparse_csr_fn, sampled_addmm_sparse_csr_stub);
11
+
12
+ }} // at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SerialStackImpl.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2004-present Facebook. All Rights Reserved.
2
+ #pragma once
3
+
4
+ #include <ATen/core/Tensor.h>
5
+
6
+ #include <ATen/MemoryOverlap.h>
7
+ #include <ATen/Parallel.h>
8
+ #include <ATen/TensorIterator.h>
9
+ #include <ATen/cpu/vec/functional.h>
10
+ #include <ATen/cpu/vec/vec.h>
11
+ #include <c10/util/irange.h>
12
+
13
+ namespace at { namespace native { namespace detail {
14
+
15
+ struct InputMeta {
16
+ void* data_ptr;
17
+ int64_t inner_size;
18
+
19
+ InputMeta(const Tensor& t, int64_t dim, int64_t inner)
20
+ : data_ptr(t.data_ptr()), inner_size(t.sizes()[dim] * inner) {}
21
+ };
22
+
23
+ // This kernel is used by two TensorList types:
24
+ // 1. stack_serial_kernel uses at::ArrayRef<Tensor>
25
+ // 2. Static runtime calls this kernel directly (csrc/jit/runtime/static/ops.cpp) with
26
+ // ProcessedNodeInputWrapper.
27
+ // When making changes, make sure that they are compatible with both types!
28
+ template <typename scalar_t, typename TensorListType>
29
+ void stack_serial_kernel_impl(Tensor& result, TensorListType tensors, int64_t dim) {
30
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
31
+ dim >= 0 && dim <= result.dim(),
32
+ "dim out of range in stack_serial_kernel_impl");
33
+ int64_t outer =
34
+ result.numel() / (result.sizes()[dim] * result.strides()[dim]);
35
+ scalar_t* result_data = result.data_ptr<scalar_t>();
36
+ int64_t ninputs = tensors.size();
37
+ std::vector<InputMeta> inputs;
38
+ inputs.reserve(ninputs);
39
+ for (const auto& tensor : tensors) {
40
+ inputs.emplace_back(tensor, dim, tensor.strides()[dim]);
41
+ }
42
+
43
+ using Vec = vec::Vectorized<scalar_t>;
44
+ scalar_t* result_ptr = result_data;
45
+ for (const auto i : c10::irange(outer)) {
46
+ for (const auto j : c10::irange(ninputs)) {
47
+ int64_t local_inner = inputs[j].inner_size;
48
+ scalar_t* input_ptr = (scalar_t*)(inputs[j].data_ptr) + i * local_inner;
49
+
50
+ if (local_inner < Vec::size()) {
51
+ for (const auto k : c10::irange(local_inner)) {
52
+ result_ptr[k] = input_ptr[k];
53
+ }
54
+ } else {
55
+ vec::map(
56
+ [](Vec x) { return x; }, result_ptr, input_ptr, local_inner);
57
+ }
58
+ result_ptr += local_inner;
59
+ }
60
+ }
61
+ }
62
+
63
+ // Checks to see whether native stack can be invoked under these conditions:
64
+ // - result and input tensors are contiguous
65
+ // - only one thread is used
66
+ // - no type promotion has to occur
67
+ // - tensors dtype is Double or Float
68
+ template <typename TensorListType>
69
+ bool can_use_native_serial_stack_impl(Tensor& result, TensorListType tensors, int64_t dim) {
70
+ TORCH_CHECK(tensors.size() > 0, "expected a non-empty list of Tensors");
71
+ const Tensor& first_tensor = tensors[0];
72
+ // stack dimension should be in range [0,firstTensor.dim())
73
+ // dim == firstTensor.dim() is a valid input, but it is handled by default code path
74
+ // that uses unsqueeze
75
+ if (dim >= first_tensor.dim()) return false;
76
+ // Native stack doesn't apply any tensor is skipped.
77
+ if (first_tensor.numel() == 0 && first_tensor.dim() == 1) return false;
78
+ // there should be no type promotion
79
+ if (result.dtype() != first_tensor.dtype()) return false;
80
+
81
+ auto first_tensor_mem_format = first_tensor.suggest_memory_format();
82
+ ScalarType dtype = first_tensor.scalar_type();
83
+
84
+ if (!result.is_contiguous(first_tensor_mem_format)) {
85
+ return false;
86
+ }
87
+
88
+ // fast path only works for Double and Float
89
+ if (dtype != ScalarType::Double && dtype != ScalarType::Float) {
90
+ return false;
91
+ }
92
+
93
+ // check remainder of inputs
94
+ auto const &first_tensor_shape = first_tensor.sizes();
95
+ for (const auto i : c10::irange(1, tensors.size())) {
96
+ auto const &tensor = tensors[i];
97
+ TORCH_CHECK(tensors[i].sizes() == first_tensor.sizes(),
98
+ "stack expects each tensor to be equal size, but got ", first_tensor_shape,
99
+ " at entry 0 and ", tensor.sizes(), " at entry ", i);
100
+
101
+ // every tensor must be contiguous
102
+ // tensor sizes and strides must be the same
103
+ // there should be no type promotion
104
+ if (!tensor.is_contiguous(first_tensor_mem_format) ||
105
+ tensor.strides() != first_tensor.strides() ||
106
+ tensor.dtype() != dtype) {
107
+ return false;
108
+ }
109
+ }
110
+
111
+ // fast native stack should only be used when it is not worth using multiple threads
112
+ // or there is only one thread. Note that we aren't checking result.numel() here because
113
+ // it may not have been resized and we want to defer that cost till later.
114
+ int64_t numel_in_stack = first_tensor.numel() * tensors.size();
115
+ return numel_in_stack < at::internal::GRAIN_SIZE || at::get_num_threads() == 1;
116
+ }
117
+
118
+ template <typename TensorListType, bool should_skip_overlap_check>
119
+ struct CanUseNativeSerialStack;
120
+
121
+ template <typename TensorListType>
122
+ struct CanUseNativeSerialStack<TensorListType, false> {
123
+ static bool call(Tensor& result, TensorListType tensors, int64_t dim) {
124
+ // Inputs cannot alias the output tensor
125
+ for (const auto i : c10::irange(tensors.size())) {
126
+ auto lap = at::get_overlap_status(result, tensors[i]);
127
+ TORCH_CHECK(lap != at::MemOverlapStatus::Partial &&
128
+ lap != at::MemOverlapStatus::Full, 0,
129
+ "unsupported operation: the input tensors cannot refer to any of the "
130
+ "output memory locations. Found overlap in input tensor ", i);
131
+ }
132
+
133
+ return can_use_native_serial_stack_impl(result, tensors, dim);
134
+ }
135
+ };
136
+
137
+ template <typename TensorListType>
138
+ struct CanUseNativeSerialStack<TensorListType, true> {
139
+ static bool call(Tensor& result, TensorListType tensors, int64_t dim) {
140
+ return can_use_native_serial_stack_impl(result, tensors, dim);
141
+ }
142
+ };
143
+
144
+ }}} // namespace at::native::detail
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/UpSampleKernelAVXAntialias.h ADDED
@@ -0,0 +1,1376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ The Python Imaging Library (PIL) is
3
+
4
+ Copyright © 1997-2011 by Secret Labs AB
5
+ Copyright © 1995-2011 by Fredrik Lundh
6
+
7
+ Pillow is the friendly PIL fork. It is
8
+
9
+ Copyright © 2010-2022 by Alex Clark and contributors
10
+
11
+ Like PIL, Pillow is licensed under the open source HPND License
12
+ */
13
+
14
+ // This code is heavily inspired from PILLOW-SIMD's implementation:
15
+ // https://github.com/uploadcare/pillow-simd/blob/simd/master/src/libImaging/Resample.c
16
+
17
+ #pragma once
18
+ #ifdef CPU_CAPABILITY_AVX2
19
+ // TODO: This file only supports AVX2. We could split the AVX kernels into
20
+ // smaller logical blocks in order to port them into the Vec.h logic. This would
21
+ // allow to support other vectorization architectures and perhaps also support
22
+ // the non-vectorized fallback (we'd need to make sure it's not slower than the
23
+ // current fallback).
24
+
25
+ #include <ATen/core/Tensor.h>
26
+ #include <ATen/cpu/vec/intrinsics.h>
27
+ #include <c10/util/irange.h>
28
+
29
+ #ifndef AT_PER_OPERATOR_HEADERS
30
+ #include <ATen/Functions.h>
31
+ #else
32
+ #include <ATen/ops/empty.h>
33
+ #endif
34
+
35
+
36
+ namespace {
37
+
38
+ static inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
39
+ int32_t v;
40
+ if (i32_aligned) {
41
+ v = *(const int32_t*)ptr;
42
+ } else {
43
+ std::memcpy(&v, ptr, 4);
44
+ }
45
+ return _mm_cvtsi32_si128(v);
46
+ }
47
+
48
+ static inline __m128i mm_cvtepu8_epi32(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
49
+ return _mm_cvtepu8_epi32(mm_cvtsi32_si128(ptr, i32_aligned));
50
+ }
51
+
52
+ static inline void _write_endline_rgb_as_uint32(
53
+ uint8_t* C10_RESTRICT output,
54
+ uint32_t data
55
+ ) {
56
+ // data is (R G B X), output is (X1 X2 X3 | R1 B1 G1 R2 ...)
57
+ // Here we explicitly set X as R1
58
+ uint8_t* data_ptr = reinterpret_cast<uint8_t*>(&data);
59
+ data_ptr[3] = output[3];
60
+ std::memcpy(output, data_ptr, 4);
61
+ }
62
+
63
+ at::Tensor unpack_rgb(const at::Tensor& packed_tensor) {
64
+ // Convert a "packed" tensor (typically RGBRGBRGB if channels_last) into
65
+ // RGBARGBARGBA format where A is hard-coded to 0. Each pixel is encoded
66
+ // into as 32 bits. This generalizes to num_channels <= 4 and also works for
67
+ // non-channels_last tensors.
68
+
69
+ const uint8_t* packed = (const uint8_t*)packed_tensor.data_ptr<uint8_t>();
70
+ auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
71
+ auto num_channels = packed_tensor.size(0);
72
+
73
+ constexpr int rgba_size = 4;
74
+ auto unpacked_tensor = at::empty({rgba_size, packed_tensor.size(1), packed_tensor.size(2)}, at::CPU(at::kByte));
75
+ uint8_t* unpacked = (uint8_t*) unpacked_tensor.data_ptr<uint8_t>();
76
+
77
+ auto stride_i = packed_tensor.stride(2);
78
+ auto stride_j = packed_tensor.stride(0);
79
+
80
+ for (const auto i : c10::irange(num_pixels)) {
81
+ for (const auto j : c10::irange(rgba_size)) {
82
+ unpacked[rgba_size * i + j] = (j < num_channels) ? packed[stride_i * i + stride_j * j] : 0;
83
+ }
84
+ }
85
+ return unpacked_tensor;
86
+ }
87
+
88
+ void pack_rgb(
89
+ const at::Tensor& unpacked_tensor, // IN
90
+ const at::Tensor& packed_tensor // OUT
91
+ ) {
92
+ // Convert from unpacked channels last 3-channels or 4-channels tensor into original data layout.
93
+
94
+ uint8_t* unpacked = (uint8_t*)unpacked_tensor.data_ptr<uint8_t>();
95
+ uint8_t* packed = (uint8_t*)packed_tensor.data_ptr<uint8_t>();
96
+ auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
97
+ auto num_channels = packed_tensor.size(0);
98
+
99
+ auto unpacked_increment = unpacked_tensor.size(0);
100
+ auto packed_increment = packed_tensor.stride(2);
101
+ auto packed_stride = packed_tensor.stride(0);
102
+
103
+ TORCH_INTERNAL_ASSERT(unpacked_increment == 3 || unpacked_increment == 4);
104
+
105
+ for (const auto i C10_UNUSED : c10::irange(num_pixels)) {
106
+ for (const auto j : c10::irange(num_channels)) {
107
+ packed[j * packed_stride] = unpacked[j];
108
+ }
109
+ unpacked += unpacked_increment;
110
+ packed += packed_increment;
111
+ }
112
+ }
113
+
114
+ void ImagingResampleHorizontalConvolution8u4x(
115
+ uint8_t* C10_RESTRICT lineOut0,
116
+ uint8_t* C10_RESTRICT lineOut1,
117
+ uint8_t* C10_RESTRICT lineOut2,
118
+ uint8_t* C10_RESTRICT lineOut3,
119
+ int64_t out_xsize,
120
+ const uint8_t* C10_RESTRICT lineIn0,
121
+ const uint8_t* C10_RESTRICT lineIn1,
122
+ const uint8_t* C10_RESTRICT lineIn2,
123
+ const uint8_t* C10_RESTRICT lineIn3,
124
+ int64_t in_xsize,
125
+ const int64_t* idx_ptr_xmin,
126
+ const int64_t* idx_ptr_size,
127
+ const int16_t* kk,
128
+ int kmax,
129
+ unsigned int coefs_precision,
130
+ int64_t num_channels,
131
+ bool is_last_line);
132
+
133
+ void ImagingResampleHorizontalConvolution8u(
134
+ uint8_t* C10_RESTRICT lineOut,
135
+ int64_t out_xsize,
136
+ const uint8_t* C10_RESTRICT lineIn,
137
+ int64_t in_xsize,
138
+ const int64_t* idx_ptr_xmin,
139
+ const int64_t* idx_ptr_size,
140
+ const int16_t* kk,
141
+ int kmax,
142
+ unsigned int coefs_precision,
143
+ int64_t num_channels,
144
+ bool is_last_line);
145
+
146
+ void ImagingResampleVerticalConvolution8u(
147
+ uint8_t* C10_RESTRICT lineOut,
148
+ const uint8_t* C10_RESTRICT lineIn,
149
+ int64_t xsize,
150
+ int64_t ids_min,
151
+ int64_t ids_size,
152
+ const int16_t* k,
153
+ unsigned int coefs_precision,
154
+ int64_t num_channels);
155
+
156
+ template<int num_channels>
157
+ void ImagingResampleHorizontal(
158
+ const at::Tensor & unpacked_output,
159
+ const at::Tensor & unpacked_input,
160
+ int ksize,
161
+ const std::vector<at::Tensor>& horiz_indices_weights,
162
+ unsigned int horiz_weights_precision) {
163
+
164
+ // Interpolation horizontal pass: we compute x-axis (image width) interpolation outputs.
165
+
166
+ // Input data is stored as
167
+ // input = [r[0], g[0], b[0], a[0], r[1], g[1], b[1], a[1], r[2], g[2], b[2], a[2], ...]
168
+ // Weights are float values computed for each output pixel and rescaled to uint16:
169
+ // weights[i] = [w[i, 0], w[i, 1], ..., w[i, K-1]]
170
+ // We want to compute the output as following:
171
+ // output = [oR[0], oG[0], oB[0], oA[0], oR[1], oG[1], oB[1], oA[1], ...]
172
+ // where
173
+ // oR[yoffset + i] = r[yoffset + xmin[i]] * w[i, 0] + ... + r[yoffset + xmin[i] + K-1] * w[i, K-1]
174
+ // oG[yoffset + i] = g[yoffset + xmin[i]] * w[i, 0] + ... + g[yoffset + xmin[i] + K-1] * w[i, K-1]
175
+ // oB[yoffset + i] = b[yoffset + xmin[i]] * w[i, 0] + ... + b[yoffset + xmin[i] + K-1] * w[i, K-1]
176
+ //
177
+
178
+ // TODO: we may want to merge that into the fallback code (currently called
179
+ // basic_loop_aa_horizontal<uint8_t>)
180
+ // Although this may not be needed if / when we port all this code to use
181
+ // Vec.h since this would potentially give us another fall-back implem
182
+
183
+ const int16_t* kk = (int16_t*)(horiz_indices_weights[3].data_ptr<double>());
184
+
185
+ auto xout = unpacked_output.size(2);
186
+ auto yout = unpacked_output.size(1);
187
+ auto xin = unpacked_input.size(2);
188
+ TORCH_INTERNAL_ASSERT(num_channels == unpacked_input.size(0));
189
+
190
+ const int64_t* idx_ptr_xmin = horiz_indices_weights[0].data_ptr<int64_t>();
191
+ const int64_t* idx_ptr_size = horiz_indices_weights[1].data_ptr<int64_t>();
192
+
193
+ uint8_t* unpacked_output_p = unpacked_output.data_ptr<uint8_t>();
194
+ const uint8_t* unpacked_input_p = unpacked_input.data_ptr<uint8_t>();
195
+
196
+ int64_t yy = 0;
197
+ auto xout_stride = xout * num_channels;
198
+ auto xin_stride = xin * num_channels;
199
+ for (; yy < yout - 3; yy += 4) {
200
+ ImagingResampleHorizontalConvolution8u4x(
201
+ unpacked_output_p + yy * xout_stride,
202
+ unpacked_output_p + (yy + 1) * xout_stride,
203
+ unpacked_output_p + (yy + 2) * xout_stride,
204
+ unpacked_output_p + (yy + 3) * xout_stride,
205
+ xout,
206
+ unpacked_input_p + yy * xin_stride,
207
+ unpacked_input_p + (yy + 1) * xin_stride,
208
+ unpacked_input_p + (yy + 2) * xin_stride,
209
+ unpacked_input_p + (yy + 3) * xin_stride,
210
+ xin,
211
+ idx_ptr_xmin,
212
+ idx_ptr_size,
213
+ kk,
214
+ ksize,
215
+ horiz_weights_precision,
216
+ num_channels,
217
+ yy + 3 == yout - 1);
218
+ }
219
+ for (; yy < yout; yy++) {
220
+ ImagingResampleHorizontalConvolution8u(
221
+ unpacked_output_p + yy * xout_stride,
222
+ xout,
223
+ unpacked_input_p + yy * xin_stride,
224
+ xin,
225
+ idx_ptr_xmin,
226
+ idx_ptr_size,
227
+ kk,
228
+ ksize,
229
+ horiz_weights_precision,
230
+ num_channels,
231
+ yy == yout - 1);
232
+ }
233
+ }
234
+
235
+ void ImagingResampleVertical(
236
+ const at::Tensor & unpacked_output,
237
+ const at::Tensor & unpacked_input,
238
+ int ksize,
239
+ const std::vector<at::Tensor>& vert_indices_weights,
240
+ unsigned int vert_weights_precision) {
241
+
242
+ // Interpolation vertical pass: we compute y-axis interpolation outputs.
243
+ // Input data is stored as
244
+ // input = [r[0], g[0], b[0], a[0], r[1], g[1], b[1], a[1], r[2], g[2], b[2], a[2], ...]
245
+ // Weights are float values computed for each output pixel and rescaled to uint16:
246
+ // weights[i] = [w[i, 0], w[i, 1], ..., w[i, K-1]]
247
+ // We want to compute the output as following:
248
+ // output = [oR[0], oG[0], oB[0], oA[0], oR[1], oG[1], oB[1], oA[1], ...]
249
+ // where
250
+ // oR[xoffset + i] = r[xoffset + ymin[i]] * w[i, 0] + ... + r[xoffset + ymin[i] + (K-1) * xsize] * w[i, K-1]
251
+ // oG[xoffset + i] = g[xoffset + ymin[i]] * w[i, 0] + ... + g[xoffset + ymin[i] + (K-1) * xsize] * w[i, K-1]
252
+ // oB[xoffset + i] = b[xoffset + ymin[i]] * w[i, 0] + ... + b[xoffset + ymin[i] + (K-1) * xsize] * w[i, K-1]
253
+
254
+ // TODO: we may want to merge that into the fallback code (currently called
255
+ // basic_loop_aa_vertical<uint8_t>)
256
+ // Although this may not be needed if / when we port all this code to use
257
+ // Vec.h since this would potentially give us another fall-back implem
258
+ const int16_t* kk = (int16_t*)(vert_indices_weights[3].data_ptr<double>());
259
+
260
+ const int64_t* idx_ptr_xmin = vert_indices_weights[0].data_ptr<int64_t>();
261
+ const int64_t* idx_ptr_size = vert_indices_weights[1].data_ptr<int64_t>();
262
+
263
+ uint8_t* unpacked_output_p = unpacked_output.data_ptr<uint8_t>();
264
+ const uint8_t* unpacked_input_p = unpacked_input.data_ptr<uint8_t>();
265
+
266
+ auto xout = unpacked_output.size(2);
267
+ auto yout = unpacked_output.size(1);
268
+ const auto num_channels = unpacked_input.size(0);
269
+ TORCH_INTERNAL_ASSERT(num_channels == unpacked_output.size(0));
270
+
271
+ auto xout_stride = xout * num_channels;
272
+ for (const auto yy : c10::irange(yout)) {
273
+ const auto* k = &kk[yy * ksize];
274
+ auto ids_min = idx_ptr_xmin[yy];
275
+ auto ids_size = idx_ptr_size[yy];
276
+ ImagingResampleVerticalConvolution8u(
277
+ unpacked_output_p + yy * xout_stride,
278
+ unpacked_input_p,
279
+ xout,
280
+ ids_min,
281
+ ids_size,
282
+ k,
283
+ vert_weights_precision,
284
+ num_channels);
285
+ }
286
+ }
287
+
288
+ // This is the only public entry point in this file. It supports bilinear or bicubic
289
+ // mode for uint8 dtype when C <= 4, with or without antialias. The
290
+ // implem is based on PIL-SIMD.
291
+ // Its equivalent implementation (fallback) for when AVX isn't supported or when
292
+ // C > 4 is separable_upsample_generic_Nd_kernel_impl() There are a bunch of
293
+ // future improvement that can be done: look for the TODOs in this file.
294
+ // For details on how the weights are computed and how the multiplications are
295
+ // run on int (instead of float weights), see
296
+ // [ Weights computation for uint8_t and multiplication trick ]
297
+ // For details on how the AVX kernels are implemented, see
298
+ // https://gist.github.com/NicolasHug/47c97d731f05eaad5694c173849b86f5
299
+ // See also [ Support for antialias=False as a subcase of antilias=True ] to
300
+ // learn more about how the antialias=False case is computed. The same holds
301
+ // here: all these kernels are general enough to handle an arbitrary number of
302
+ // weights, but when aa=False they could be optimized further.
303
+ template <typename scale_type, class F>
304
+ void upsample_avx_bilinear_bicubic_uint8(
305
+ const at::Tensor& input_,
306
+ const at::Tensor& output,
307
+ bool align_corners,
308
+ const scale_type& scales,
309
+ bool antialias) {
310
+ auto batch_size = input_.size(0);
311
+ auto num_channels = input_.size(1);
312
+ auto xin = input_.size(3);
313
+ auto yin = input_.size(2);
314
+ auto xout = output.size(3);
315
+ auto yout = output.size(2);
316
+
317
+ if (xin == xout && yin == yout) {
318
+ output.copy_(input_);
319
+ return;
320
+ }
321
+
322
+ at::Tensor input = input_;
323
+ if (!(input.is_contiguous() || input.is_contiguous(at::MemoryFormat::ChannelsLast))) {
324
+ // If input is not contiguous with memory format channels first or channels last,
325
+ // we explicitly convert the input to contiguous channels last memory format.
326
+ // This simplifies the rest of the code and let us assume that the format is only contiguous channels first or channels last,
327
+ // Most tensors going through this `if` block won't need to go through unpacking, but those having C < 3 may
328
+ // have to (this means 2 copies are made). We could avoid the extra copy by handling non-contiguous input
329
+ // directly within unpack_rgb() and pack_rgb(), but initial attempts showed that this is fairly complex.
330
+ input = input.contiguous(at::MemoryFormat::ChannelsLast);
331
+ }
332
+
333
+ auto need_horizontal = xout != xin;
334
+ auto need_vertical = yout != yin;
335
+
336
+ int ksize_horiz, ksize_vert;
337
+ std::vector<at::Tensor> horiz_indices_weights, vert_indices_weights;
338
+ unsigned int horiz_weights_precision, vert_weights_precision;
339
+
340
+ bool skip_unpacking = (num_channels == 3 || num_channels == 4) && input.is_contiguous(at::MemoryFormat::ChannelsLast);
341
+ bool skip_packing = (num_channels == 3 || num_channels == 4) && output.is_contiguous(at::MemoryFormat::ChannelsLast);
342
+
343
+ if (need_horizontal) {
344
+ int interp_dim = 3;
345
+ auto stride = (skip_unpacking) ? num_channels : 4;
346
+ std::tie(horiz_indices_weights, ksize_horiz, horiz_weights_precision) =
347
+ F::compute_indices_int16_weights_aa(
348
+ /*input_size=*/xin,
349
+ /*output_size=*/xout,
350
+ /*stride=*/stride,
351
+ /*ndims=*/4,
352
+ /*reshape_dim=*/interp_dim,
353
+ /*align_corners=*/align_corners,
354
+ /*opt_scale=*/scales[interp_dim - 2],
355
+ /*antialias=*/antialias,
356
+ /*align_i32=*/true);
357
+ }
358
+
359
+ if (need_vertical) {
360
+ int interp_dim = 2;
361
+ auto stride = (skip_unpacking) ? num_channels * xout : 4 * xout;
362
+ std::tie(vert_indices_weights, ksize_vert, vert_weights_precision) =
363
+ F::compute_indices_int16_weights_aa(
364
+ /*input_size=*/yin,
365
+ /*output_size=*/yout,
366
+ /*stride=*/stride,
367
+ /*ndims=*/4,
368
+ /*reshape_dim=*/interp_dim,
369
+ /*align_corners=*/align_corners,
370
+ /*opt_scale=*/scales[interp_dim - 2],
371
+ /*antialias=*/antialias,
372
+ /*align_i32=*/true);
373
+ }
374
+
375
+ at::Tensor buffer_horiz, buffer_vert;
376
+ // Minor optimization: we can avoid allocating an extra buffer if we're performing
377
+ // horizontal-only or vertical-only interpolation, and if the tensor doesn't
378
+ // need repacking
379
+ if (need_horizontal && (need_vertical || !skip_packing)) {
380
+ auto c = (skip_unpacking) ? num_channels : 4;
381
+ buffer_horiz = at::empty({c, yin, xout}, input.options());
382
+ }
383
+ if (need_vertical && !skip_packing) {
384
+ auto c = (skip_unpacking) ? num_channels : 4;
385
+ buffer_vert = at::empty({c, yout, xout}, input.options());
386
+ }
387
+
388
+ for (const auto i : c10::irange(batch_size)) {
389
+
390
+ at::Tensor unpacked_input = (skip_unpacking) ? input[i] : unpack_rgb(input[i]);
391
+ at::Tensor unpacked_output;
392
+
393
+ if (need_horizontal) {
394
+ at::Tensor unpacked_output_temp = (need_vertical || !skip_packing) ? buffer_horiz : output[i];
395
+
396
+ if (skip_unpacking && num_channels == 3) {
397
+ ImagingResampleHorizontal<3>(
398
+ unpacked_output_temp,
399
+ unpacked_input,
400
+ ksize_horiz,
401
+ horiz_indices_weights,
402
+ horiz_weights_precision);
403
+ } else {
404
+ ImagingResampleHorizontal<4>(
405
+ unpacked_output_temp,
406
+ unpacked_input,
407
+ ksize_horiz,
408
+ horiz_indices_weights,
409
+ horiz_weights_precision);
410
+ }
411
+ unpacked_output = unpacked_input = unpacked_output_temp;
412
+ }
413
+ if (need_vertical) {
414
+ unpacked_output = (skip_packing) ? output[i] : buffer_vert;
415
+
416
+ ImagingResampleVertical(
417
+ unpacked_output,
418
+ unpacked_input,
419
+ ksize_vert,
420
+ vert_indices_weights,
421
+ vert_weights_precision
422
+ );
423
+ }
424
+
425
+ TORCH_INTERNAL_ASSERT(unpacked_output.defined());
426
+
427
+ if (!skip_packing) {
428
+ pack_rgb(unpacked_output, output[i]);
429
+ }
430
+ }
431
+ }
432
+
433
+ void ImagingResampleHorizontalConvolution8u4x(
434
+ uint8_t* C10_RESTRICT lineOut0,
435
+ uint8_t* C10_RESTRICT lineOut1,
436
+ uint8_t* C10_RESTRICT lineOut2,
437
+ uint8_t* C10_RESTRICT lineOut3,
438
+ int64_t out_xsize,
439
+ const uint8_t* C10_RESTRICT lineIn0,
440
+ const uint8_t* C10_RESTRICT lineIn1,
441
+ const uint8_t* C10_RESTRICT lineIn2,
442
+ const uint8_t* C10_RESTRICT lineIn3,
443
+ int64_t in_xsize,
444
+ const int64_t* idx_ptr_xmin,
445
+ const int64_t* idx_ptr_size,
446
+ const int16_t* kk,
447
+ int kmax,
448
+ unsigned int coefs_precision,
449
+ int64_t num_channels,
450
+ bool is_last_line) {
451
+
452
+ // Interpolation horizontal pass processing together 4 vertical lines.
453
+ // - Input data format is RGBA or RGB with R,G,B,A being uint8. In case of RGBA
454
+ // we can encode 4 values as a single uint32 value.
455
+ // - We split the size of weight vector for a given output index as a sum:
456
+ // ids_size = num_blocks_4 * 4 + num_blocks_2 * 2 + num_blocks_1.
457
+ // - We load and process 4 weights values in a loop ("block 4") then we process 2 weights values
458
+ // in another loop ("block 2") and finally we process 1 weights value in the final loop ("block 1").
459
+
460
+ // Define shuffling masks (low/high) for num_channels 4 and 3
461
+ // Mask low casts lower half of each lane to epi16 and reorder RGBARGBA -> RRGGBBAA:
462
+ // [r1 g1 b1 a1 r2 g2 b2 a2 ... | R1 G1 B1 A1 R2 G2 B2 A2 ... ] ->
463
+ // [r1 0 r2 0 g1 0 g2 0 b1 0 b2 0 a1 0 a2 0 | R1 0 R2 0 G1 0 G2 0 B1 0 B2 0 A1 0 A2 0]
464
+ // Mask high casts upper half of each lane to epi16 and reorder RGBARGBA -> RRGGBBAA::
465
+ // [ ... r3 g3 b3 a3 r4 g4 b4 a4 | ... R3 G3 B3 A3 R4 G4 B4 A4 ] ->
466
+ // [r3 0 r4 0 g3 0 g4 0 b3 0 b4 0 a3 0 a4 0 | R3 0 R4 0 G3 0 G4 0 B3 0 B4 0 A3 0 A4 0]
467
+
468
+ const auto mask_low_c4 = _mm256_set_epi8(
469
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
470
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
471
+ const auto mask_high_c4 = _mm256_set_epi8(
472
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
473
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8);
474
+ const auto mask_low_c3 = _mm256_set_epi8(
475
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0,
476
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
477
+ const auto mask_high_c3 = _mm256_set_epi8(
478
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6,
479
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6);
480
+
481
+ const auto mask_low = (num_channels == 3) ? mask_low_c3 : mask_low_c4;
482
+ const auto mask_high = (num_channels == 3) ? mask_high_c3 : mask_high_c4;
483
+
484
+ const auto stride = num_channels * sizeof(uint8_t);
485
+
486
+ TORCH_INTERNAL_ASSERT(stride == 3 || stride == 4);
487
+
488
+ // out_xsize = output width, out_x = output x index
489
+ // ids_min is the input offset index corresponding to out_x
490
+ // ids_size is the interpolation size for out_x
491
+
492
+ // Let's precompute ids_size limits for block 4 and block 2.
493
+ //
494
+ // In block 4 (4 means we process 4 weight values together), we read input data
495
+ // with _mm_loadu_si128, i.e. 16 bytes, per one line:
496
+ // lineIn0 + stride * (i + ids_min) + 16 <= lineIn0 + stride * (ids_size + ids_min)
497
+ // --> i <= ids_size - 16.0 / stride
498
+ // Strict boundary:
499
+ // --> i < ids_size + 1 - int(ceil(16.0 / stride)) = ids_size - b4_delta
500
+ // Soft boundary for reading inside the buffer except its boundaries:
501
+ // --> i < ids_size + 1 - int(16.0 / stride) = ids_size - b4_delta_soft
502
+ // RGBA: b4_delta = b4_delta_soft = 3
503
+ // RGB : b4_delta = 5
504
+ // RGB : b4_delta_soft = 4
505
+ const auto b4_delta = (stride == 4) ? 3 : ((is_last_line) ? 5 : 4);
506
+
507
+ // In block 2 (2 means we process 2 weights values together), we read input data
508
+ // with _mm_loadl_epi64, i.e. 8 bytes, per one line:
509
+ // lineIn0 + stride * (i + ids_min) + 8 <= lineIn0 + stride * (ids_size + ids_min)
510
+ // --> i <= ids_size - 8.0 / stride
511
+ // Strict boundary:
512
+ // --> i < ids_size + 1 - int(ceil(8.0 / stride)) = ids_size - b2_delta
513
+ // Soft boundary for reading inside the buffer except its boundaries:
514
+ // --> i < ids_size + 1 - int(8.0 / stride) = ids_size - b2_delta_soft
515
+ // RGBA: b2_delta = b2_delta_soft = 1
516
+ // RGB : b2_delta = 2
517
+ // RGB : b2_delta_soft = 1
518
+ const auto b2_delta = (stride == 4) ? 1 : ((is_last_line) ? 2 : 1);
519
+
520
+ const auto max_out_x_strided = out_xsize * stride;
521
+ const auto max_in_x_strided = in_xsize * stride;
522
+
523
+ const auto zero = _mm256_setzero_si256();
524
+ const auto initial = _mm256_set1_epi32(1 << (coefs_precision - 1));
525
+
526
+ for (const auto out_x : c10::irange(out_xsize)) {
527
+ const auto ids_min = idx_ptr_xmin[out_x];
528
+ const auto ids_size = idx_ptr_size[out_x];
529
+ const auto * k = &kk[out_x * kmax];
530
+ int64_t i = 0;
531
+
532
+ auto sss0 = initial;
533
+ auto sss1 = initial;
534
+
535
+ const auto * lineIn0_min = lineIn0 + ids_min;
536
+ const auto * lineIn1_min = lineIn1 + ids_min;
537
+ const auto * lineIn2_min = lineIn2 + ids_min;
538
+ const auto * lineIn3_min = lineIn3 + ids_min;
539
+
540
+ // block 4
541
+ for (; i < ids_size - b4_delta; i += 4) {
542
+ // Load 4 values from weight vector
543
+ // mmk0 = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ...]
544
+ // mmk1 = [wl_2 wh_2 wl_3 wh_3 wl_2 wh_2 wl_3 wh_3 ...]
545
+ const auto mmk0 = _mm256_set1_epi32(*(int32_t*)&k[i]);
546
+ const auto mmk1 = _mm256_set1_epi32(*(int32_t*)&k[i + 2]);
547
+
548
+ // RGBA: Load 8 pixels (4 per line) from input lines 0 and 1:
549
+ // source = [
550
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
551
+ // R0 G0 B0 A0 R1 G1 B1 A1 R2 G2 B2 A2 R3 G3 B3 A3
552
+ // ]
553
+ // RGB: Load 10 pixels (5 per line)
554
+ // source = [
555
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
556
+ // R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
557
+ // ]
558
+ auto source = _mm256_inserti128_si256(_mm256_castsi128_si256(
559
+ _mm_loadu_si128((__m128i *) (lineIn0_min + stride * i))),
560
+ _mm_loadu_si128((__m128i *) (lineIn1_min + stride * i)), 1);
561
+
562
+ // Apply mask_low:
563
+ // RGBA:
564
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 A0 0 A1 0]
565
+ // RGB:
566
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 0 0 0 0]
567
+ auto pix1 = _mm256_shuffle_epi8(source, mask_low);
568
+ // Compute output value as C += w0 * C0 + w1 * C1 for each channel in 32-bit precision
569
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk0));
570
+
571
+ // Apply mask_high:
572
+ // RGBA:
573
+ // [r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 a2 0 a3 0 | R2 0 R3 0 G2 0 G3 0 B2 0 B3 0 A2 0 A3 0]
574
+ // RGB:
575
+ // [r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 0 0 0 0 | R2 0 R3 0 G2 0 G3 0 B2 0 B3 0 0 0 0 0]
576
+ auto pix2 = _mm256_shuffle_epi8(source, mask_high);
577
+ // Compute output value as C += w2 * C2 + w3 * C3 for each channel in 32-bit precision
578
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix2, mmk1));
579
+
580
+ // Same as above to next lines 2 and 3:
581
+ auto source2 = _mm256_inserti128_si256(_mm256_castsi128_si256(
582
+ _mm_loadu_si128((__m128i *) (lineIn2_min + stride * i))),
583
+ _mm_loadu_si128((__m128i *) (lineIn3_min + stride * i)), 1);
584
+ auto pix3 = _mm256_shuffle_epi8(source2, mask_low);
585
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix3, mmk0));
586
+ auto pix4 = _mm256_shuffle_epi8(source2, mask_high);
587
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix4, mmk1));
588
+ }
589
+
590
+ // block 2
591
+ for (; i < ids_size - b2_delta; i += 2) {
592
+ // Load 2 values from weight vector
593
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ...]
594
+ const auto mmk = _mm256_set1_epi32(*(int32_t*)&k[i]);
595
+
596
+ // Load 4 pixels (2 per line) from input lines 0 and 1:
597
+ // RGBA: source1 = [
598
+ // r0 g0 b0 a0 r1 g1 b1 a1 0 0 0 0 0 0 0 0
599
+ // R0 G0 B0 A0 R1 G1 B1 A1 0 0 0 0 0 0 0 0
600
+ // ]
601
+ // RGB: source1 = [
602
+ // r0 g0 b0 r1 g1 b1 r2 0 0 0 0 0 0 0 0
603
+ // R0 G0 B0 R1 G1 B1 R2 0 0 0 0 0 0 0 0
604
+ // ]
605
+ auto source1 = _mm256_inserti128_si256(_mm256_castsi128_si256(
606
+ _mm_loadl_epi64((__m128i *) (lineIn0_min + stride * i))),
607
+ _mm_loadl_epi64((__m128i *) (lineIn1_min + stride * i)), 1);
608
+ // Apply mask_low:
609
+ // RGBA:
610
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 A0 0 A1 0]
611
+ // RGB:
612
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 0 0 0 0]
613
+ auto pix1 = _mm256_shuffle_epi8(source1, mask_low);
614
+ // Compute output value as C += w0 * C0 + w1 * C1 for each channel in 32-bit precision
615
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
616
+
617
+ // Same as above for lines 2 and 3:
618
+ auto source2 = _mm256_inserti128_si256(_mm256_castsi128_si256(
619
+ _mm_loadl_epi64((__m128i *) (lineIn2_min + stride * i))),
620
+ _mm_loadl_epi64((__m128i *) (lineIn3_min + stride * i)), 1);
621
+ auto pix2 = _mm256_shuffle_epi8(source2, mask_low);
622
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
623
+ }
624
+
625
+ // block 1
626
+ const auto i32_aligned = num_channels == 4;
627
+ for (; i < ids_size - 1; i++) {
628
+ // Load 1 value from weight vector
629
+ // mmk = [wl_0 wh_0 0 0 wl_0 wh_0 0 0 ...]
630
+ const auto mmk = _mm256_set1_epi32(k[i]);
631
+
632
+ // Load 2 pixels (one per line) from input lines 0 and 1:
633
+ // RGBA: pix1 = [
634
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 a0 0 0 0
635
+ // R0 0 0 0 G0 0 0 0 B0 0 0 0 A0 0 0 0
636
+ // ]
637
+ // RGB: pix1 = [
638
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 r1 0 0 0
639
+ // R0 0 0 0 G0 0 0 0 B0 0 0 0 R1 0 0 0
640
+ // ]
641
+ auto pix1 = _mm256_inserti128_si256(_mm256_castsi128_si256(
642
+ mm_cvtepu8_epi32(lineIn0_min + stride * i, i32_aligned)),
643
+ mm_cvtepu8_epi32(lineIn1_min + stride * i, i32_aligned), 1);
644
+ // Compute output value as C += w0 * C0 for each channel in 32-bit precision
645
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
646
+
647
+ // Same as above for lines 2 and 3
648
+ auto pix2 = _mm256_inserti128_si256(_mm256_castsi128_si256(
649
+ mm_cvtepu8_epi32(lineIn2_min + stride * i, i32_aligned)),
650
+ mm_cvtepu8_epi32(lineIn3_min + stride * i, i32_aligned), 1);
651
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
652
+ }
653
+
654
+ if (i == ids_size - 1) {
655
+ // last element
656
+ auto mmk = _mm256_set1_epi32(k[i]);
657
+ // For num_channels == 3 (3 bytes = one pixel) we tolerate to read 4 bytes
658
+ // lines 0, 1 and 2 wont go out of allocated memory bounds
659
+ auto pix = _mm256_inserti128_si256(_mm256_castsi128_si256(
660
+ mm_cvtepu8_epi32(lineIn0_min + stride * i, i32_aligned)),
661
+ mm_cvtepu8_epi32(lineIn1_min + stride * i, i32_aligned), 1);
662
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
663
+
664
+ auto p0 = mm_cvtepu8_epi32(lineIn2_min + stride * i, i32_aligned);
665
+ __m128i p1;
666
+ if (num_channels == 3 && C10_UNLIKELY(is_last_line && ids_min + stride * i + 4 >= max_in_x_strided)) {
667
+ uint8_t input[4];
668
+ std::memcpy(input, lineIn3_min + stride * i, 3);
669
+ p1 = mm_cvtepu8_epi32(input, true);
670
+ } else {
671
+ p1 = mm_cvtepu8_epi32(lineIn3_min + stride * i, i32_aligned);
672
+ }
673
+ auto pix2 = _mm256_inserti128_si256(_mm256_castsi128_si256(p0), p1, 1);
674
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
675
+ }
676
+
677
+ // Convert fixed point values back to integers (truncating)
678
+ sss0 = _mm256_srai_epi32(sss0, coefs_precision);
679
+ sss1 = _mm256_srai_epi32(sss1, coefs_precision);
680
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
681
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d 0 0 0 0 0 0 0 0)
682
+ sss0 = _mm256_packs_epi32(sss0, zero);
683
+ sss1 = _mm256_packs_epi32(sss1, zero);
684
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
685
+ // (a a b b c c d d) -> (a b c d 0 0 0 0)
686
+ sss0 = _mm256_packus_epi16(sss0, zero);
687
+ sss1 = _mm256_packus_epi16(sss1, zero);
688
+
689
+ // Write the output into single uint32
690
+ // (a b c d) -> x_uint32
691
+ auto o0 = _mm_cvtsi128_si32(_mm256_castsi256_si128(sss0));
692
+ auto o1 = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss0, 1));
693
+ auto o2 = _mm_cvtsi128_si32(_mm256_castsi256_si128(sss1));
694
+ auto o3 = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss1, 1));
695
+
696
+ const auto out_x_strided = stride * out_x;
697
+
698
+ if (num_channels == 3 && C10_UNLIKELY(out_x_strided + 4 >= max_out_x_strided)) {
699
+ // Memcpy 4-bytes is faster than 3-bytes and this is a boundary case when we want to write
700
+ // 4 bytes (R G B | X) to the output buffer (X1 X2 X3 | R1).
701
+ // The 4th byte in the register (X) has a garbage value and 4th byte in the output buffer (R1) has a correct
702
+ // value which was preveiously computed by another line. In other words, it means that we can not overwrite
703
+ // it by simply writing 4 bytes from the register to the output. We'll do the following:
704
+ // v----------|
705
+ // Output = [... X1 X2 X3 | R1 G1 B1 R2 ...]
706
+ // First, we write R1 value to the 4th byte of (R G B | X) -> (R G B | R1)
707
+ // Second, we write 4 bytes from the register to the output: (X1 X2 X3 | R1) -> (R G B | R1)
708
+ // Output = [... R G B | R1 G1 B1 R2 ...]
709
+
710
+ _write_endline_rgb_as_uint32(lineOut0 + out_x_strided, o0);
711
+ _write_endline_rgb_as_uint32(lineOut1 + out_x_strided, o1);
712
+ _write_endline_rgb_as_uint32(lineOut2 + out_x_strided, o2);
713
+
714
+ if (C10_UNLIKELY(is_last_line)) {
715
+ // When we handle the last line, we can not access the next 4 bytes
716
+ // as they are out of memory bounds.
717
+ std::memcpy(lineOut3 + out_x_strided, (uint8_t *) &o3, num_channels);
718
+ } else {
719
+ _write_endline_rgb_as_uint32(lineOut3 + out_x_strided, o3);
720
+ }
721
+ } else if (num_channels == 3) {
722
+ // Memcpy 4-bytes is faster than 3-bytes and here
723
+ // we simply write 4 bytes (... R G B X 0 0 0 0 0 ...) where X is a garbage value
724
+ // that we will overwrite on the next iteration: (... R G B R G B X 0 0 ...)
725
+ std::memcpy(lineOut0 + out_x_strided, (uint8_t *) &o0, 4);
726
+ std::memcpy(lineOut1 + out_x_strided, (uint8_t *) &o1, 4);
727
+ std::memcpy(lineOut2 + out_x_strided, (uint8_t *) &o2, 4);
728
+ std::memcpy(lineOut3 + out_x_strided, (uint8_t *) &o3, 4);
729
+ } else {
730
+ // num_channels = 4 -> lineOutX + out_x_strided should be uint32 aligned
731
+ *(uint32_t *)(lineOut0 + out_x_strided) = o0;
732
+ *(uint32_t *)(lineOut1 + out_x_strided) = o1;
733
+ *(uint32_t *)(lineOut2 + out_x_strided) = o2;
734
+ *(uint32_t *)(lineOut3 + out_x_strided) = o3;
735
+ }
736
+ }
737
+ }
738
+
739
+ void ImagingResampleHorizontalConvolution8u(
740
+ uint8_t* C10_RESTRICT lineOut,
741
+ int64_t out_xsize,
742
+ const uint8_t* C10_RESTRICT lineIn,
743
+ int64_t in_xsize,
744
+ const int64_t* idx_ptr_xmin,
745
+ const int64_t* idx_ptr_size,
746
+ const int16_t* kk,
747
+ int kmax,
748
+ unsigned int coefs_precision,
749
+ int64_t num_channels,
750
+ bool is_last_line) {
751
+
752
+ // Interpolation horizontal pass processing only one vertical line.
753
+ // - Input data format is RGBA or RGB with R,G,B,A being uint8. In case of RGBA
754
+ // we can encode 4 values as a single uint32 value.
755
+ // - We split the size of weight vector for a given output index as a sum:
756
+ // ids_size = num_blocks_8 * 8 + num_blocks_4 * 4 + num_blocks_2 * 2 + num_blocks_1
757
+ // - We load and process 8 weights values in a loop ("block 8") then 4 weights and 2 weights values in
758
+ // in another loops ("block 4" and "block 2") and finally we process 1 weight value in the final loop ("block 1").
759
+
760
+ // Define various shuffling masks
761
+ const auto kmask_low = _mm256_set_epi8(
762
+ 11, 10, 9, 8, 11, 10, 9, 8, 11, 10, 9, 8, 11, 10, 9, 8,
763
+ 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0);
764
+ const auto kmask_high = _mm256_set_epi8(
765
+ 15, 14, 13, 12, 15, 14, 13, 12, 15, 14, 13, 12, 15, 14, 13, 12,
766
+ 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4);
767
+ const auto kmask_hl = _mm256_set_epi8(
768
+ 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4,
769
+ 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0);
770
+
771
+ const auto mask_low_c4 = _mm256_set_epi8(
772
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
773
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
774
+ const auto mask_high_c4 = _mm256_set_epi8(
775
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
776
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8);
777
+ const auto mask_low_c3 = _mm256_set_epi8(
778
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0,
779
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
780
+ const auto mask_high_c3 = _mm256_set_epi8(
781
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6,
782
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6);
783
+ const auto mask_hl_c3 = _mm256_set_epi8(
784
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6,
785
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
786
+ const auto mask_hl_c4 = _mm256_set_epi8(
787
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
788
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
789
+
790
+ const auto mask_low128_c3 = _mm_set_epi8(
791
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
792
+ const auto mask_low128_c4 = _mm_set_epi8(
793
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
794
+
795
+ const auto mask_low = (num_channels == 3) ? mask_low_c3 : mask_low_c4;
796
+ const auto mask_high = (num_channels == 3) ? mask_high_c3 : mask_high_c4;
797
+ const auto mask_hl = (num_channels == 3) ? mask_hl_c3 : mask_hl_c4;
798
+ const auto mask_low128 = (num_channels == 3) ? mask_low128_c3 : mask_low128_c4;
799
+
800
+ // out_xsize = output width, out_x = output x index
801
+ // ids_min is the input offset index corresponding to out_x
802
+ // ids_size is the interpolation size for out_x
803
+
804
+ const auto stride = num_channels * sizeof(uint8_t);
805
+ const auto zero = _mm_setzero_si128();
806
+
807
+ TORCH_INTERNAL_ASSERT(stride == 3 || stride == 4);
808
+
809
+ // Let's precompute ids_size limits for block 8, block 4 and block 2
810
+ //
811
+ // In block 8 (8 means we process 8 weight values together), we read at
812
+ // most 32 bytes input data (16 + 16 bytes for RGBA and 12 + 16 bytes for RGB)
813
+ // lineIn + stride * (i + ids_min) + 32 <= lineIn + stride * (ids_size + ids_min)
814
+ // --> i <= ids_size - 32.0 / stride
815
+ // Strict boundary:
816
+ // --> i < ids_size + 1 - int(ceil(32.0 / stride)) = ids_size - b8_delta
817
+ // Soft boundary for reading inside the buffer except its boundaries:
818
+ // --> i < ids_size + 1 - int(32.0 / stride) = ids_size - b8_delta_soft
819
+ // RGBA: b8_delta = b8_delta_soft = 7
820
+ // RGB : b8_delta = 10
821
+ // RGB : b8_delta_soft = 9
822
+ const auto b8_delta = (stride == 4) ? 7 : ((is_last_line) ? 10 : 9);
823
+
824
+ // In block 4 (4 means we process 4 weight values together), we read
825
+ // 16 bytes of input data.
826
+ // lineIn + stride * (i + ids_min) + 16 <= lineIn0 + stride * (ids_size + ids_min)
827
+ // --> i <= ids_size - 16.0 / stride
828
+ // Strict boundary:
829
+ // --> i < ids_size + 1 - int(ceil(16.0 / stride)) = ids_size - b4_delta
830
+ // Soft boundary for reading inside the buffer except its boundaries:
831
+ // --> i < ids_size + 1 - int(16.0 / stride) = ids_size - b4_delta_soft
832
+ // RGBA: b4_delta = b4_delta_soft = 3
833
+ // RGB : b4_delta = 5
834
+ // RGB : b4_delta_soft = 4
835
+ const auto b4_delta = (stride == 4) ? 3 : ((is_last_line) ? 5 : 4);
836
+
837
+ // In block 2 (2 means we process 2 weight values together), we read
838
+ // 8 bytes of input data.
839
+ // lineIn0 + stride * (i + ids_min) + 8 <= lineIn0 + stride * (ids_size + ids_min)
840
+ // --> i <= ids_size - 8.0 / stride
841
+ // Strict boundary:
842
+ // --> i < ids_size + 1 - int(ceil(8.0 / stride)) = ids_size - b2_delta
843
+ // Soft boundary for reading inside the buffer except its boundaries:
844
+ // --> i < ids_size + 1 - int(8.0 / stride) = ids_size - b2_delta_soft
845
+ // RGBA: b2_delta = b2_delta_soft = 1
846
+ // RGB : b2_delta = 2
847
+ // RGB : b2_delta_soft = 1
848
+ const auto b2_delta = (stride == 4) ? 1 : ((is_last_line) ? 2 : 1);
849
+
850
+ const auto max_out_x_strided = out_xsize * stride;
851
+ const auto max_in_x_strided = in_xsize * stride;
852
+
853
+ for (const auto out_x : c10::irange(out_xsize)) {
854
+ __m128i sss;
855
+ const auto ids_min = idx_ptr_xmin[out_x];
856
+ const auto ids_size = idx_ptr_size[out_x];
857
+ const auto * k = &kk[out_x * kmax];
858
+ int64_t i = 0;
859
+
860
+ const auto * lineIn_min = lineIn + ids_min;
861
+
862
+ if (ids_size < 8) {
863
+ sss = _mm_set1_epi32(1 << (coefs_precision - 1));
864
+ } else {
865
+ // Lower part will be added to higher, use only half of the error
866
+ auto sss256 = _mm256_set1_epi32(1 << (coefs_precision - 2));
867
+
868
+ // block 8
869
+ for (; i < ids_size - b8_delta; i += 8) {
870
+ // Load 8 values from weight vector
871
+ auto tmp = _mm_loadu_si128((__m128i*)&k[i]);
872
+ // ksource = [
873
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 wl_4 wh_4 wl_5 wh_5 wl_6 wh_6 wl_7 wh_7
874
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 wl_4 wh_4 wl_5 wh_5 wl_6 wh_6 wl_7 wh_7
875
+ // ]
876
+ auto ksource = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
877
+
878
+ // RGBA: Load 8 pixels from input:
879
+ // source = [
880
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
881
+ // r4 g4 b4 a4 r5 g5 b5 a5 r6 g6 b6 a6 r7 g7 b7 a7
882
+ // ]
883
+ // RGB: Load 10 pixels from input (however we can process only 8 pixels):
884
+ // source = [
885
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
886
+ // r4 g4 b4 r5 g5 b5 r6 g6 b6 r7 g7 b7 r8 g8 b8 r9
887
+ // ]
888
+ auto source = _mm256_inserti128_si256(_mm256_castsi128_si256(
889
+ _mm_loadu_si128((__m128i *) (lineIn_min + stride * i))),
890
+ _mm_loadu_si128((__m128i *) (lineIn_min + stride * (i + 4))), 1);
891
+
892
+ // Extract lower part of each lane, cast to epi16 and reoder RGBARGBA -> RRGGBBAA
893
+ // RGBA: pix1 = [
894
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0
895
+ // r4 0 r5 0 g4 0 g5 0 b4 0 b5 0 a4 0 a5 0
896
+ // ]
897
+ // RGB: pix1 = [
898
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0
899
+ // r4 0 r5 0 g4 0 g5 0 b4 0 b5 0 0 0 0 0
900
+ // ]
901
+ auto pix1 = _mm256_shuffle_epi8(source, mask_low);
902
+ // mmk1 = [
903
+ // wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ...
904
+ // wl_4 wh_4 wl_5 wh_5 wl_4 wh_4 wl_5 wh_5 ... ...
905
+ // ]
906
+ auto mmk1 = _mm256_shuffle_epi8(ksource, kmask_low);
907
+ // Compute output value as
908
+ // C += w0 * C0 + w1 * C1
909
+ // C += w4 * C4 + w5 * C5 for each channel in 32-bit precision
910
+ sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix1, mmk1));
911
+
912
+ // Same as above for higher part of each lane
913
+ auto pix2 = _mm256_shuffle_epi8(source, mask_high);
914
+ auto mmk2 = _mm256_shuffle_epi8(ksource, kmask_high);
915
+ // Compute output value as
916
+ // C += w2 * C2 + w3 * C3
917
+ // C += w6 * C6 + w7 * C7 for each channel in 32-bit precision
918
+ sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix2, mmk2));
919
+ }
920
+
921
+ // block 4
922
+ for (; i < ids_size - b4_delta; i += 4) {
923
+ // Load 4 values from weight vector
924
+ auto tmp = _mm_loadl_epi64((__m128i *) &k[i]);
925
+ // ksource = [
926
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 0 0 0 0 0 0 0 0
927
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 0 0 0 0 0 0 0 0
928
+ // ]
929
+ auto ksource = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
930
+
931
+ // Load pixels from input line
932
+ tmp = _mm_loadu_si128((__m128i *) (lineIn_min + stride * i));
933
+ // RGBA: source = [
934
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
935
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
936
+ // ]
937
+ // RGB: source = [
938
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
939
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
940
+ // ]
941
+ auto source = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
942
+
943
+ // Cast source to epi16 and reorder RGBARGBA -> RRGGBBAA
944
+ // RGBA: pix = [
945
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0
946
+ // r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 a2 0 a3 0
947
+ // ]
948
+ // RGB: pix = [
949
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0
950
+ // r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 0 0 0 0
951
+ // ]
952
+ auto pix = _mm256_shuffle_epi8(source, mask_hl);
953
+ // mmk = [
954
+ // wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ...
955
+ // wl_2 wh_2 wl_3 wh_3 wl_2 wh_2 wl_3 wh_3 ... ...
956
+ // ]
957
+ auto mmk = _mm256_shuffle_epi8(ksource, kmask_hl);
958
+ // Compute output value as
959
+ // C += w0 * C0 + w1 * C1
960
+ // C += w2 * C2 + w3 * C3 for each channel in 32-bit precision
961
+ sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
962
+ }
963
+
964
+ // Sum results between the lanes
965
+ sss = _mm_add_epi32(
966
+ _mm256_extracti128_si256(sss256, 0),
967
+ _mm256_extracti128_si256(sss256, 1));
968
+ }
969
+
970
+ // block 2
971
+ for (; i < ids_size - b2_delta; i += 2) {
972
+ // Load 2 values from weight vector
973
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ...]
974
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
975
+ // Load pixels from input line
976
+ // RGBA: source = [
977
+ // r0 g0 b0 a0 r1 g1 b1 a1 0 0 0 0 0 0 0 0
978
+ // ]
979
+ // RGB: source = [
980
+ // r0 g0 b0 r1 g1 b1 r2 g2 0 0 0 0 0 0 0 0
981
+ // ]
982
+ auto source = _mm_loadl_epi64((__m128i *) (lineIn_min + stride * i));
983
+ // Cast source to epi16 and reorder RGBARGBA -> RRGGBBAA
984
+ auto pix = _mm_shuffle_epi8(source, mask_low128);
985
+ // Compute output value as C += w0 * C0 + w1 * C1 for each channel in 32-bit precision
986
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
987
+ }
988
+
989
+ // block 1
990
+ const auto i32_aligned = num_channels == 4;
991
+ for (; i < ids_size - 1; i++) {
992
+ // Load 1 value from weight vector
993
+ // mmk = [wl_0 wh_0 0 0 wl_0 wh_0 0 0 ...]
994
+ auto mmk = _mm_set1_epi32(k[i]);
995
+ // Load one pixel from input line
996
+ // RGBA: pix = [
997
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 a0 0 0 0
998
+ // ]
999
+ // RGB: pix = [
1000
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 r1 0 0 0
1001
+ // ]
1002
+ auto pix = mm_cvtepu8_epi32(lineIn_min + stride * i, i32_aligned);
1003
+ // Compute output value as C += w0 * C0 for each channel in 32-bit precision
1004
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1005
+ }
1006
+
1007
+ if (i == ids_size - 1) {
1008
+ // last element
1009
+ auto mmk = _mm_set1_epi32(k[i]);
1010
+ __m128i pix;
1011
+ auto p = lineIn_min + stride * i;
1012
+ if (num_channels == 3 && C10_UNLIKELY(is_last_line && ids_min + stride * i + 4 >= max_in_x_strided)) {
1013
+ uint8_t input[4];
1014
+ std::memcpy(input, p, 3);
1015
+ pix = mm_cvtepu8_epi32(input, true);
1016
+ } else {
1017
+ pix = mm_cvtepu8_epi32(p, i32_aligned);
1018
+ }
1019
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1020
+ }
1021
+
1022
+ // Convert fixed point values back to integers (truncating)
1023
+ sss = _mm_srai_epi32(sss, coefs_precision);
1024
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1025
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d 0 0 0 0 0 0 0 0)
1026
+ sss = _mm_packs_epi32(sss, zero);
1027
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1028
+ // (a a b b c c d d) -> (a b c d 0 0 0 0)
1029
+ sss = _mm_packus_epi16(sss, zero);
1030
+ // Write the output into single uint32
1031
+ // (a b c d) -> x_uint32
1032
+ auto o = _mm_cvtsi128_si32(sss);
1033
+ const auto out_x_strided = stride * out_x;
1034
+ if (num_channels == 3 && C10_UNLIKELY(out_x_strided + 4 >= max_out_x_strided)) {
1035
+ if (C10_UNLIKELY(is_last_line)) {
1036
+ // When we handle the last line, we can not access the next 4 bytes
1037
+ // as they are out of memory bounds.
1038
+ std::memcpy(lineOut + out_x_strided, (uint8_t *) &o, 3);
1039
+ } else {
1040
+ // Memcpy 4-bytes is faster than 3-bytes and this is a boundary case when we want to write
1041
+ // 4 bytes (R G B | X) to the output buffer (X1 X2 X3 | R1).
1042
+ // The 4th byte in the register (X) has a garbage value and 4th byte in the output buffer (R1) has a correct
1043
+ // value which was preveiously computed by another line. In other words, it means that we can not overwrite
1044
+ // it by simply writing 4 bytes from the register to the output. We'll do the following:
1045
+ // v----------|
1046
+ // Output = [... X1 X2 X3 | R1 G1 B1 R2 ...]
1047
+ // First, we write R1 value to the 4th byte of (R G B | X) -> (R G B | R1)
1048
+ // Second, we write 4 bytes from the register to the output: (X1 X2 X3 | R1) -> (R G B | R1)
1049
+ // Output = [... R G B | R1 G1 B1 R2 ...]
1050
+ _write_endline_rgb_as_uint32(lineOut + out_x_strided, o);
1051
+ }
1052
+ } else if (num_channels == 3) {
1053
+ // Memcpy 4-bytes is faster than 3-bytes and here
1054
+ // we simply write 4 bytes (... R G B X 0 0 0 0 0 ...) where X is a garbage value
1055
+ // that we will overwrite on the next iteration: (... R G B R G B X 0 0 ...)
1056
+ std::memcpy(lineOut + out_x_strided, (uint8_t *) &o, 4);
1057
+ } else {
1058
+ // num_channels = 4 -> lineOut + out_x_strided should be uint32 aligned
1059
+ *(uint32_t *)(lineOut + out_x_strided) = o;
1060
+ }
1061
+ }
1062
+ }
1063
+
1064
+ void ImagingResampleVerticalConvolution8u(
1065
+ uint8_t* C10_RESTRICT lineOut,
1066
+ const uint8_t* C10_RESTRICT lineIn,
1067
+ int64_t xsize,
1068
+ int64_t ids_min,
1069
+ int64_t ids_size,
1070
+ const int16_t* k,
1071
+ unsigned int coefs_precision,
1072
+ int64_t num_channels) {
1073
+
1074
+ // Interpolation vertical pass processing one line.
1075
+ // - We process x-axis data with blocks of 8, 2 and 1
1076
+ // - We split the size of weight vector for a given output index as a sum: K = n * 2 + m.
1077
+
1078
+ // xsize = output width, also equals to input width
1079
+ // ids_size = interpolation size
1080
+ // ids_min = input y start index
1081
+ const auto stride = num_channels * sizeof(uint8_t);
1082
+
1083
+ TORCH_INTERNAL_ASSERT(stride == 3 || stride == 4);
1084
+
1085
+ const int64_t data_size = xsize * stride;
1086
+ const int64_t data_stride = stride;
1087
+ constexpr auto vec_size = 256 / 8;
1088
+
1089
+ const auto initial = _mm_set1_epi32(1 << (coefs_precision - 1));
1090
+ const auto initial_256 = _mm256_set1_epi32(1 << (coefs_precision - 1));
1091
+ const auto zero = _mm_setzero_si128();
1092
+ const auto zero_256 = _mm256_setzero_si256();
1093
+
1094
+ int64_t j = 0;
1095
+ // block 8
1096
+ const auto b8_usable_vec_stride = (vec_size / data_stride) * data_stride;
1097
+ for (; j < data_size - vec_size; j += b8_usable_vec_stride) {
1098
+ auto sss0 = initial_256;
1099
+ auto sss1 = initial_256;
1100
+ auto sss2 = initial_256;
1101
+ auto sss3 = initial_256;
1102
+ int64_t i = 0;
1103
+ const auto * lineIn_min = lineIn + j + ids_min;
1104
+
1105
+ for (; i < ids_size - 1; i += 2) {
1106
+ // Load 2 values from weight vector
1107
+ auto mmk = _mm256_set1_epi32(*(int32_t*)&k[i]);
1108
+
1109
+ // RGBA: Load 8 pixels per line
1110
+ // source1 = [
1111
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
1112
+ // r4 g4 b4 a4 r5 g5 b5 a5 r6 g6 b6 a6 r7 g7 b7 a7
1113
+ // ]
1114
+ // RGB: Load 10 pixels per line (however we can process only 8 pixels):
1115
+ // source1 = [
1116
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
1117
+ // r4 g4 b4 r5 g5 b5 r6 g6 b6 r7 g7 b7 r8 g8 b8 r9
1118
+ // ]
1119
+ auto source1 =
1120
+ _mm256_loadu_si256((__m256i*)(lineIn_min + data_size * i));
1121
+ auto source2 =
1122
+ _mm256_loadu_si256((__m256i*)(lineIn_min + data_size * (i + 1)));
1123
+
1124
+ // Interleave source1 and source2 from the low half of each 128-bit lane
1125
+ // and cast the result to epi16
1126
+ // RGBA: pix1 = [
1127
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 a0 0 A0 0
1128
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 a1 0 A1 0
1129
+ // ]
1130
+ // RGB: pix1 = [
1131
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 0 0 0 0
1132
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 0 0 0 0
1133
+ // ]
1134
+ auto source_lo = _mm256_unpacklo_epi8(source1, source2);
1135
+ auto pix1 = _mm256_unpacklo_epi8(source_lo, zero_256);
1136
+ // Compute output value as
1137
+ // C += w0 * c0 + w1 * C0
1138
+ // C += w0 * c1 + w1 * C1 for each channel in 32-bit precision
1139
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
1140
+
1141
+ // RGBA: pix2 = [
1142
+ // r2 0 R2 0 g2 0 G2 0 b2 0 B2 0 a2 0 A2 0
1143
+ // r3 0 R3 0 g3 0 G3 0 b3 0 B3 0 a3 0 A3 0
1144
+ // ]
1145
+ // RGB: pix2 = [
1146
+ // r2 0 R2 0 g2 0 G2 0 b2 0 B2 0 0 0 0 0
1147
+ // r3 0 R3 0 g3 0 G3 0 b3 0 B3 0 0 0 0 0
1148
+ // ]
1149
+ auto pix2 = _mm256_unpackhi_epi8(source_lo, zero_256);
1150
+ // Compute output value as
1151
+ // C += w0 * c2 + w1 * C2
1152
+ // C += w0 * c3 + w1 * C3 for each channel in 32-bit precision
1153
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
1154
+
1155
+ // Same as above for the high half of each 128-bit lane
1156
+ auto source_hi = _mm256_unpackhi_epi8(source1, source2);
1157
+ auto pix3 = _mm256_unpacklo_epi8(source_hi, zero_256);
1158
+ sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix3, mmk));
1159
+ auto pix4 = _mm256_unpackhi_epi8(source_hi, zero_256);
1160
+ sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix4, mmk));
1161
+ }
1162
+ // Same processing as above but with a single weight value
1163
+ for (; i < ids_size; i += 1) {
1164
+ auto mmk = _mm256_set1_epi32(k[i]);
1165
+
1166
+ auto source1 = _mm256_loadu_si256((__m256i*)(lineIn_min + i * data_size));
1167
+
1168
+ auto source_lo = _mm256_unpacklo_epi8(source1, zero_256);
1169
+ auto pix1 = _mm256_unpacklo_epi8(source_lo, zero_256);
1170
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
1171
+ auto pix2 = _mm256_unpackhi_epi8(source_lo, zero_256);
1172
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
1173
+
1174
+ auto source_hi = _mm256_unpackhi_epi8(source1, zero_256);
1175
+ auto pix3 = _mm256_unpacklo_epi8(source_hi, _mm256_setzero_si256());
1176
+ sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix3, mmk));
1177
+ auto pix4 = _mm256_unpackhi_epi8(source_hi, _mm256_setzero_si256());
1178
+ sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix4, mmk));
1179
+ }
1180
+ // Convert fixed point values back to integers (truncating)
1181
+ sss0 = _mm256_srai_epi32(sss0, coefs_precision);
1182
+ sss1 = _mm256_srai_epi32(sss1, coefs_precision);
1183
+ sss2 = _mm256_srai_epi32(sss2, coefs_precision);
1184
+ sss3 = _mm256_srai_epi32(sss3, coefs_precision);
1185
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1186
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d)
1187
+ sss0 = _mm256_packs_epi32(sss0, sss1);
1188
+ sss2 = _mm256_packs_epi32(sss2, sss3);
1189
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1190
+ // (a a b b c c d d) -> (a b c d)
1191
+ sss0 = _mm256_packus_epi16(sss0, sss2);
1192
+
1193
+ // Stores 32 bytes
1194
+ _mm256_storeu_si256((__m256i*)(lineOut + j), sss0);
1195
+ }
1196
+
1197
+ // TODO: Do we also need block 4 ???
1198
+ // block 2
1199
+ const auto b2_usable_vec_stride = (8 / data_stride) * data_stride;
1200
+ for (; j < data_size - vec_size / 4; j += b2_usable_vec_stride) {
1201
+ auto sss0 = initial;
1202
+ auto sss1 = initial;
1203
+ int64_t i = 0;
1204
+ const auto * lineIn_min = lineIn + j + ids_min;
1205
+
1206
+ for (; i < ids_size - 1; i += 2) {
1207
+ // Load 2 values from weight vector
1208
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ]
1209
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
1210
+
1211
+ // Load 2 pixels per line
1212
+ // RGBA: source1 = [
1213
+ // r0 g0 b0 a0 r1 g1 b1 a1 0 0 0 0 0 0 0 0
1214
+ // ]
1215
+ // RGB: source1 = [
1216
+ // r0 g0 b0 r1 g1 b1 r2 g2 0 0 0 0 0 0 0 0
1217
+ // ]
1218
+ auto source1 = _mm_loadl_epi64((__m128i *) (lineIn_min + i * data_size));
1219
+ auto source2 = _mm_loadl_epi64((__m128i *) (lineIn_min + (i + 1) * data_size));
1220
+ // Interleave source1 and source2 and cast the result to epi16
1221
+ // RGBA: pix = [
1222
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 a0 0 A0 0
1223
+ // ]
1224
+ // RGB: pix = [
1225
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 0 0 0 0
1226
+ // ]
1227
+ auto source = _mm_unpacklo_epi8(source1, source2);
1228
+ auto pix = _mm_unpacklo_epi8(source, zero);
1229
+ // Compute output value as C += w0 * c0 + w1 * C0 for each channel in 32-bit precision
1230
+ sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix, mmk));
1231
+ // RGBA: pix = [
1232
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 a1 0 A1 0
1233
+ // ]
1234
+ // RGB: pix = [
1235
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 0 0 0 0
1236
+ // ]
1237
+ pix = _mm_unpackhi_epi8(source, zero);
1238
+ // Compute output value as C += w0 * c1 + w1 * C1 for each channel in 32-bit precision
1239
+ sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix, mmk));
1240
+ }
1241
+ // Same processing as above but with a single weight value
1242
+ for (; i < ids_size; i += 1) {
1243
+ auto mmk = _mm_set1_epi32(k[i]);
1244
+
1245
+ auto source1 = _mm_loadl_epi64((__m128i*) (lineIn_min + i * data_size));
1246
+
1247
+ auto source = _mm_unpacklo_epi8(source1, zero);
1248
+ auto pix1 = _mm_unpacklo_epi8(source, zero);
1249
+ sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix1, mmk));
1250
+ auto pix2 = _mm_unpackhi_epi8(source, zero);
1251
+ sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix2, mmk));
1252
+ }
1253
+ // Convert fixed point values back to integers (truncating)
1254
+ sss0 = _mm_srai_epi32(sss0, coefs_precision);
1255
+ sss1 = _mm_srai_epi32(sss1, coefs_precision);
1256
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1257
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d)
1258
+ sss0 = _mm_packs_epi32(sss0, sss1);
1259
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1260
+ // (a a b b c c d d) -> (a b c d)
1261
+ sss0 = _mm_packus_epi16(sss0, sss0);
1262
+ // Store 2 pixels to the output
1263
+ _mm_storel_epi64((__m128i*)(lineOut + j), sss0);
1264
+ }
1265
+
1266
+ // block 1
1267
+ const auto b1_usable_vec_stride = (4 / data_stride) * data_stride;
1268
+ const auto i32_aligned = num_channels == 4;
1269
+ for (; j < data_size - 4; j += b1_usable_vec_stride) {
1270
+ auto sss = initial;
1271
+ int64_t i = 0;
1272
+ const auto * lineIn_min = lineIn + j + ids_min;
1273
+
1274
+ for (; i < ids_size - 1; i += 2) {
1275
+ // Load 2 values from weight vector
1276
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ]
1277
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
1278
+
1279
+ // Load one pixel per line
1280
+ // RGBA: source1 = [
1281
+ // r0 g0 b0 a0 0 0 0 0 0 0 0 0 0 0 0 0
1282
+ // ]
1283
+ // RGB: source1 = [
1284
+ // r0 g0 b0 r1 0 0 0 0 0 0 0 0 0 0 0 0
1285
+ // ]
1286
+ auto source1 = mm_cvtsi32_si128(lineIn_min + i * data_size, i32_aligned);
1287
+ auto source2 = mm_cvtsi32_si128(lineIn_min + (i + 1) * data_size, i32_aligned);
1288
+
1289
+ // Interleave source1 and source2 and cast the result to epi16
1290
+ // RGBA: pix = [
1291
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 a0 0 A0 0
1292
+ // ]
1293
+ // RGB: pix = [
1294
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 0 0 0 0
1295
+ // ]
1296
+ auto source = _mm_unpacklo_epi8(source1, source2);
1297
+ auto pix = _mm_unpacklo_epi8(source, zero);
1298
+ // Compute output value as C += w0 * c0 + w1 * C0 for each channel in 32-bit precision
1299
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1300
+ }
1301
+
1302
+ for (; i < ids_size; i++) {
1303
+ auto mmk = _mm_set1_epi32(k[i]);
1304
+ auto pix = mm_cvtepu8_epi32(lineIn_min + i * data_size, i32_aligned);
1305
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1306
+ }
1307
+ sss = _mm_srai_epi32(sss, coefs_precision);
1308
+ sss = _mm_packs_epi32(sss, zero);
1309
+ sss = _mm_packus_epi16(sss, zero);
1310
+
1311
+ auto o = _mm_cvtsi128_si32(sss);
1312
+
1313
+ // Here we write 4 bytes to the output even if num_channels < 4, e.g o = {r,g,b,X} for num_channels=3
1314
+ // It is OK to write 4th byte (e.g. X) as on the next step we will overwrite it with new data.
1315
+ // We also wont go out of bounds of lineOut memory allocation
1316
+ std::memcpy(lineOut + j, (uint8_t *) &o, 4);
1317
+ }
1318
+
1319
+ for (; j < data_size; j += data_stride) {
1320
+ auto sss = initial;
1321
+ int64_t i = 0;
1322
+ const auto * lineIn_min = lineIn + j + ids_min;
1323
+ // For RGBA we can use (ids_size - 1) as tighter limit but for RGB we can read outside memory boundary
1324
+ // for the last remaining line
1325
+ for (; i < ids_size - 2; i += 2) {
1326
+ // Load two coefficients at once
1327
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
1328
+
1329
+ // Load 2 lines
1330
+ auto source1 = mm_cvtsi32_si128(lineIn_min + i * data_size, i32_aligned);
1331
+ auto source2 = mm_cvtsi32_si128(lineIn_min + (i + 1) * data_size, i32_aligned);
1332
+
1333
+ auto source = _mm_unpacklo_epi8(source1, source2);
1334
+ auto pix = _mm_unpacklo_epi8(source, zero);
1335
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1336
+ }
1337
+
1338
+ // Same processing as above but with a single weight value
1339
+ for (; i < ids_size; i++) {
1340
+ auto mmk = _mm_set1_epi32(k[i]);
1341
+
1342
+ const uint8_t * p = lineIn_min + i * data_size;
1343
+ __m128i pix;
1344
+ // There is no much perf gain using more detailed condition like
1345
+ // num_channels == 3 && ids_min + j + data_size * i + 4 >= in_max_size
1346
+ // const int64_t in_max_size = data_size * in_ysize;
1347
+ if (num_channels == 3) {
1348
+ uint8_t input[4];
1349
+ std::memcpy(input, p, 3);
1350
+ pix = mm_cvtepu8_epi32(input, true);
1351
+ } else {
1352
+ pix = mm_cvtepu8_epi32(p, true);
1353
+ }
1354
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1355
+ }
1356
+
1357
+ // Convert fixed point values back to integers (truncating)
1358
+ sss = _mm_srai_epi32(sss, coefs_precision);
1359
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1360
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d)
1361
+ sss = _mm_packs_epi32(sss, zero);
1362
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1363
+ // (a a b b c c d d) -> (a b c d)
1364
+ sss = _mm_packus_epi16(sss, zero);
1365
+ // Store one pixel to the output
1366
+ auto o = _mm_cvtsi128_si32(sss);
1367
+ if (num_channels == 3 && C10_UNLIKELY(j + 4 >= data_size)) {
1368
+ std::memcpy(lineOut + j, (uint8_t *) &o, 3);
1369
+ } else {
1370
+ std::memcpy(lineOut + j, (uint8_t *) &o, 4);
1371
+ }
1372
+ }
1373
+ }
1374
+
1375
+ } // anonymous namespace
1376
+ #endif // CPU_CAPABILITY_AVX2
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/WeightNormKernel.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at { namespace native {
10
+
11
+ using weight_norm_fn = void(*)(
12
+ TensorBase&, TensorBase&, const TensorBase&, const TensorBase&, int64_t);
13
+ using weight_norm_backward_fn = void(*)(
14
+ TensorBase&, TensorBase&, const TensorBase&, const TensorBase&,
15
+ const TensorBase&, const TensorBase&, int64_t);
16
+
17
+ DECLARE_DISPATCH(weight_norm_fn, weight_norm_stub);
18
+ DECLARE_DISPATCH(weight_norm_backward_fn, weight_norm_backward_stub);
19
+
20
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/avx_mathfun.h ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ /*
3
+ AVX implementation of sin, cos, sincos, exp and log
4
+
5
+ Based on "sse_mathfun.h", by Julien Pommier
6
+ http://gruntthepeon.free.fr/ssemath/
7
+
8
+ Copyright (C) 2012 Giovanni Garberoglio
9
+ Interdisciplinary Laboratory for Computational Science (LISC)
10
+ Fondazione Bruno Kessler and University of Trento
11
+ via Sommarive, 18
12
+ I-38123 Trento (Italy)
13
+
14
+ This software is provided 'as-is', without any express or implied
15
+ warranty. In no event will the authors be held liable for any damages
16
+ arising from the use of this software.
17
+
18
+ Permission is granted to anyone to use this software for any purpose,
19
+ including commercial applications, and to alter it and redistribute it
20
+ freely, subject to the following restrictions:
21
+
22
+ 1. The origin of this software must not be misrepresented; you must not
23
+ claim that you wrote the original software. If you use this software
24
+ in a product, an acknowledgment in the product documentation would be
25
+ appreciated but is not required.
26
+ 2. Altered source versions must be plainly marked as such, and must not be
27
+ misrepresented as being the original software.
28
+ 3. This notice may not be removed or altered from any source distribution.
29
+
30
+ (this is the zlib license)
31
+ */
32
+
33
+ #include <ATen/native/cpu/Intrinsics.h>
34
+
35
+ /* The original source of this file has been modified. */
36
+ #if defined(CPU_CAPABILITY_AVX2)
37
+
38
+ #if defined(__GNUC__)
39
+ # define ALIGN32_BEG __attribute__((aligned(32)))
40
+ #elif defined(_WIN32)
41
+ # define ALIGN32_BEG __declspec(align(32))
42
+ #endif
43
+
44
+ typedef __m256 v8sf; // vector of 8 float (avx2)
45
+ typedef __m256i v8si; // vector of 8 int (avx2)
46
+
47
+ /* declare some AVX constants -- why can't I figure a better way to do that? */
48
+ #define _PS256_CONST(Name, Val) \
49
+ static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
50
+ #define _PI32_CONST256(Name, Val) \
51
+ static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
52
+ #define _PS256_CONST_TYPE(Name, Type, Val) \
53
+ static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
54
+
55
+ _PS256_CONST(1 , 1.0f);
56
+ _PS256_CONST(0p5, 0.5f);
57
+ /* the smallest non denormalized float number */
58
+ _PS256_CONST_TYPE(min_norm_pos, int, 0x00800000);
59
+ _PS256_CONST_TYPE(mant_mask, int, 0x7f800000);
60
+ _PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
61
+
62
+ _PS256_CONST_TYPE(sign_mask, int, (int)0x80000000);
63
+ _PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
64
+
65
+ _PI32_CONST256(0, 0);
66
+ _PI32_CONST256(1, 1);
67
+ _PI32_CONST256(inv1, ~1);
68
+ _PI32_CONST256(2, 2);
69
+ _PI32_CONST256(4, 4);
70
+ _PI32_CONST256(0x7f, 0x7f);
71
+
72
+ _PS256_CONST(cephes_SQRTHF, 0.707106781186547524);
73
+ _PS256_CONST(cephes_log_p0, 7.0376836292E-2);
74
+ _PS256_CONST(cephes_log_p1, - 1.1514610310E-1);
75
+ _PS256_CONST(cephes_log_p2, 1.1676998740E-1);
76
+ _PS256_CONST(cephes_log_p3, - 1.2420140846E-1);
77
+ _PS256_CONST(cephes_log_p4, + 1.4249322787E-1);
78
+ _PS256_CONST(cephes_log_p5, - 1.6668057665E-1);
79
+ _PS256_CONST(cephes_log_p6, + 2.0000714765E-1);
80
+ _PS256_CONST(cephes_log_p7, - 2.4999993993E-1);
81
+ _PS256_CONST(cephes_log_p8, + 3.3333331174E-1);
82
+ _PS256_CONST(cephes_log_q1, -2.12194440e-4);
83
+ _PS256_CONST(cephes_log_q2, 0.693359375);
84
+
85
+
86
+ /* natural logarithm computed for 8 simultaneous float
87
+ return NaN for x <= 0
88
+ */
89
+ inline v8sf log256_ps(v8sf x) {
90
+ v8si imm0;
91
+ v8sf one = *(v8sf*)_ps256_1;
92
+
93
+ //v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
94
+ v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
95
+
96
+ x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
97
+
98
+ // can be done with AVX2
99
+ imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23);
100
+
101
+ /* keep only the fractional part */
102
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
103
+ x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
104
+
105
+ // this is again another AVX2 instruction
106
+ imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
107
+ v8sf e = _mm256_cvtepi32_ps(imm0);
108
+
109
+ e = _mm256_add_ps(e, one);
110
+
111
+ /* part2:
112
+ if( x < SQRTHF ) {
113
+ e -= 1;
114
+ x = x + x - 1.0;
115
+ } else { x = x - 1.0; }
116
+ */
117
+ //v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
118
+ v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
119
+ v8sf tmp = _mm256_and_ps(x, mask);
120
+ x = _mm256_sub_ps(x, one);
121
+ e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
122
+ x = _mm256_add_ps(x, tmp);
123
+
124
+ v8sf z = _mm256_mul_ps(x,x);
125
+
126
+ v8sf y = *(v8sf*)_ps256_cephes_log_p0;
127
+ y = _mm256_mul_ps(y, x);
128
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
129
+ y = _mm256_mul_ps(y, x);
130
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
131
+ y = _mm256_mul_ps(y, x);
132
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
133
+ y = _mm256_mul_ps(y, x);
134
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
135
+ y = _mm256_mul_ps(y, x);
136
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
137
+ y = _mm256_mul_ps(y, x);
138
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
139
+ y = _mm256_mul_ps(y, x);
140
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
141
+ y = _mm256_mul_ps(y, x);
142
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
143
+ y = _mm256_mul_ps(y, x);
144
+
145
+ y = _mm256_mul_ps(y, z);
146
+
147
+ tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
148
+ y = _mm256_add_ps(y, tmp);
149
+
150
+
151
+ tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
152
+ y = _mm256_sub_ps(y, tmp);
153
+
154
+ tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
155
+ x = _mm256_add_ps(x, y);
156
+ x = _mm256_add_ps(x, tmp);
157
+ x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
158
+ return x;
159
+ }
160
+
161
+ _PS256_CONST(exp_hi, 88.3762626647949f);
162
+ _PS256_CONST(exp_lo, -88.3762626647949f);
163
+
164
+ _PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
165
+ _PS256_CONST(cephes_exp_C1, 0.693359375);
166
+ _PS256_CONST(cephes_exp_C2, -2.12194440e-4);
167
+
168
+ _PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
169
+ _PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
170
+ _PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
171
+ _PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
172
+ _PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
173
+ _PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
174
+
175
+ inline v8sf exp256_ps(v8sf x) {
176
+ v8sf tmp = _mm256_setzero_ps(), fx;
177
+ v8si imm0;
178
+ v8sf one = *(v8sf*)_ps256_1;
179
+
180
+ x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
181
+ x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
182
+
183
+ /* express exp(x) as exp(g + n*log(2)) */
184
+ fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
185
+ fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
186
+
187
+ /* how to perform a floorf with SSE: just below */
188
+ //imm0 = _mm256_cvttps_epi32(fx);
189
+ //tmp = _mm256_cvtepi32_ps(imm0);
190
+
191
+ tmp = _mm256_floor_ps(fx);
192
+
193
+ /* if greater, subtract 1 */
194
+ //v8sf mask = _mm256_cmpgt_ps(tmp, fx);
195
+ v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
196
+ mask = _mm256_and_ps(mask, one);
197
+ fx = _mm256_sub_ps(tmp, mask);
198
+
199
+ tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
200
+ v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
201
+ x = _mm256_sub_ps(x, tmp);
202
+ x = _mm256_sub_ps(x, z);
203
+
204
+ z = _mm256_mul_ps(x,x);
205
+
206
+ v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
207
+ y = _mm256_mul_ps(y, x);
208
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
209
+ y = _mm256_mul_ps(y, x);
210
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
211
+ y = _mm256_mul_ps(y, x);
212
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
213
+ y = _mm256_mul_ps(y, x);
214
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
215
+ y = _mm256_mul_ps(y, x);
216
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
217
+ y = _mm256_mul_ps(y, z);
218
+ y = _mm256_add_ps(y, x);
219
+ y = _mm256_add_ps(y, one);
220
+
221
+ /* build 2^n */
222
+ imm0 = _mm256_cvttps_epi32(fx);
223
+ // another two AVX2 instructions
224
+ imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
225
+ imm0 = _mm256_slli_epi32(imm0, 23);
226
+ v8sf pow2n = _mm256_castsi256_ps(imm0);
227
+ y = _mm256_mul_ps(y, pow2n);
228
+ return y;
229
+ }
230
+
231
+ _PS256_CONST(minus_cephes_DP1, -0.78515625);
232
+ _PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
233
+ _PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
234
+ _PS256_CONST(sincof_p0, -1.9515295891E-4);
235
+ _PS256_CONST(sincof_p1, 8.3321608736E-3);
236
+ _PS256_CONST(sincof_p2, -1.6666654611E-1);
237
+ _PS256_CONST(coscof_p0, 2.443315711809948E-005);
238
+ _PS256_CONST(coscof_p1, -1.388731625493765E-003);
239
+ _PS256_CONST(coscof_p2, 4.166664568298827E-002);
240
+ _PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
241
+
242
+
243
+ /* evaluation of 8 sines at onces using AVX intrisics
244
+
245
+ The code is the exact rewriting of the cephes sinf function.
246
+ Precision is excellent as long as x < 8192 (I did not bother to
247
+ take into account the special handling they have for greater values
248
+ -- it does not return garbage for arguments over 8192, though, but
249
+ the extra precision is missing).
250
+
251
+ Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
252
+ surprising but correct result.
253
+
254
+ */
255
+ inline v8sf sin256_ps(v8sf x) { // any x
256
+ v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
257
+ v8si imm0, imm2;
258
+
259
+ sign_bit = x;
260
+ /* take the absolute value */
261
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
262
+ /* extract the sign bit (upper one) */
263
+ sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
264
+
265
+ /* scale by 4/Pi */
266
+ y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
267
+
268
+ /*
269
+ Here we start a series of integer operations, which are in the
270
+ realm of AVX2.
271
+ If we don't have AVX, let's perform them using SSE2 directives
272
+ */
273
+
274
+ /* store the integer part of y in mm0 */
275
+ imm2 = _mm256_cvttps_epi32(y);
276
+ /* j=(j+1) & (~1) (see the cephes sources) */
277
+ // another two AVX2 instruction
278
+ imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
279
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
280
+ y = _mm256_cvtepi32_ps(imm2);
281
+
282
+ /* get the swap sign flag */
283
+ imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
284
+ imm0 = _mm256_slli_epi32(imm0, 29);
285
+ /* get the polynom selection mask
286
+ there is one polynom for 0 <= x <= Pi/4
287
+ and another one for Pi/4<x<=Pi/2
288
+
289
+ Both branches will be computed.
290
+ */
291
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
292
+ imm2 = _mm256_cmpeq_epi32(imm2,*(v8si*)_pi32_256_0);
293
+
294
+ v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
295
+ v8sf poly_mask = _mm256_castsi256_ps(imm2);
296
+ sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
297
+
298
+ /* The magic pass: "Extended precision modular arithmetic"
299
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
300
+ xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
301
+ xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
302
+ xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
303
+ xmm1 = _mm256_mul_ps(y, xmm1);
304
+ xmm2 = _mm256_mul_ps(y, xmm2);
305
+ xmm3 = _mm256_mul_ps(y, xmm3);
306
+ x = _mm256_add_ps(x, xmm1);
307
+ x = _mm256_add_ps(x, xmm2);
308
+ x = _mm256_add_ps(x, xmm3);
309
+
310
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
311
+ y = *(v8sf*)_ps256_coscof_p0;
312
+ v8sf z = _mm256_mul_ps(x,x);
313
+
314
+ y = _mm256_mul_ps(y, z);
315
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
316
+ y = _mm256_mul_ps(y, z);
317
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
318
+ y = _mm256_mul_ps(y, z);
319
+ y = _mm256_mul_ps(y, z);
320
+ v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
321
+ y = _mm256_sub_ps(y, tmp);
322
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
323
+
324
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
325
+
326
+ v8sf y2 = *(v8sf*)_ps256_sincof_p0;
327
+ y2 = _mm256_mul_ps(y2, z);
328
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
329
+ y2 = _mm256_mul_ps(y2, z);
330
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
331
+ y2 = _mm256_mul_ps(y2, z);
332
+ y2 = _mm256_mul_ps(y2, x);
333
+ y2 = _mm256_add_ps(y2, x);
334
+
335
+ /* select the correct result from the two polynoms */
336
+ xmm3 = poly_mask;
337
+ y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
338
+ y = _mm256_andnot_ps(xmm3, y);
339
+ y = _mm256_add_ps(y,y2);
340
+ /* update the sign */
341
+ y = _mm256_xor_ps(y, sign_bit);
342
+
343
+ return y;
344
+ }
345
+
346
+ /* almost the same as sin_ps */
347
+ inline v8sf cos256_ps(v8sf x) { // any x
348
+ v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
349
+ v8si imm0, imm2;
350
+
351
+ /* take the absolute value */
352
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
353
+
354
+ /* scale by 4/Pi */
355
+ y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
356
+
357
+ /* store the integer part of y in mm0 */
358
+ imm2 = _mm256_cvttps_epi32(y);
359
+ /* j=(j+1) & (~1) (see the cephes sources) */
360
+ imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
361
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
362
+ y = _mm256_cvtepi32_ps(imm2);
363
+ imm2 = _mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
364
+
365
+ /* get the swap sign flag */
366
+ imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
367
+ imm0 = _mm256_slli_epi32(imm0, 29);
368
+ /* get the polynom selection mask */
369
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
370
+ imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
371
+
372
+ v8sf sign_bit = _mm256_castsi256_ps(imm0);
373
+ v8sf poly_mask = _mm256_castsi256_ps(imm2);
374
+
375
+ /* The magic pass: "Extended precision modular arithmetic"
376
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
377
+ xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
378
+ xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
379
+ xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
380
+ xmm1 = _mm256_mul_ps(y, xmm1);
381
+ xmm2 = _mm256_mul_ps(y, xmm2);
382
+ xmm3 = _mm256_mul_ps(y, xmm3);
383
+ x = _mm256_add_ps(x, xmm1);
384
+ x = _mm256_add_ps(x, xmm2);
385
+ x = _mm256_add_ps(x, xmm3);
386
+
387
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
388
+ y = *(v8sf*)_ps256_coscof_p0;
389
+ v8sf z = _mm256_mul_ps(x,x);
390
+
391
+ y = _mm256_mul_ps(y, z);
392
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
393
+ y = _mm256_mul_ps(y, z);
394
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
395
+ y = _mm256_mul_ps(y, z);
396
+ y = _mm256_mul_ps(y, z);
397
+ v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
398
+ y = _mm256_sub_ps(y, tmp);
399
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
400
+
401
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
402
+
403
+ v8sf y2 = *(v8sf*)_ps256_sincof_p0;
404
+ y2 = _mm256_mul_ps(y2, z);
405
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
406
+ y2 = _mm256_mul_ps(y2, z);
407
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
408
+ y2 = _mm256_mul_ps(y2, z);
409
+ y2 = _mm256_mul_ps(y2, x);
410
+ y2 = _mm256_add_ps(y2, x);
411
+
412
+ /* select the correct result from the two polynoms */
413
+ xmm3 = poly_mask;
414
+ y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
415
+ y = _mm256_andnot_ps(xmm3, y);
416
+ y = _mm256_add_ps(y,y2);
417
+ /* update the sign */
418
+ y = _mm256_xor_ps(y, sign_bit);
419
+
420
+ return y;
421
+ }
422
+
423
+ /* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
424
+ it is almost as fast, and gives you a free cosine with your sine */
425
+ inline void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
426
+
427
+ v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
428
+ v8si imm0, imm2, imm4;
429
+
430
+ sign_bit_sin = x;
431
+ /* take the absolute value */
432
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
433
+ /* extract the sign bit (upper one) */
434
+ sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
435
+
436
+ /* scale by 4/Pi */
437
+ y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
438
+
439
+ /* store the integer part of y in imm2 */
440
+ imm2 = _mm256_cvttps_epi32(y);
441
+
442
+ /* j=(j+1) & (~1) (see the cephes sources) */
443
+ imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
444
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
445
+
446
+ y = _mm256_cvtepi32_ps(imm2);
447
+ imm4 = imm2;
448
+
449
+ /* get the swap sign flag for the sine */
450
+ imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
451
+ imm0 = _mm256_slli_epi32(imm0, 29);
452
+ //v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
453
+
454
+ /* get the polynom selection mask for the sine*/
455
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
456
+ imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
457
+ //v8sf poly_mask = _mm256_castsi256_ps(imm2);
458
+
459
+ v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
460
+ v8sf poly_mask = _mm256_castsi256_ps(imm2);
461
+
462
+ /* The magic pass: "Extended precision modular arithmetic"
463
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
464
+ xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
465
+ xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
466
+ xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
467
+ xmm1 = _mm256_mul_ps(y, xmm1);
468
+ xmm2 = _mm256_mul_ps(y, xmm2);
469
+ xmm3 = _mm256_mul_ps(y, xmm3);
470
+ x = _mm256_add_ps(x, xmm1);
471
+ x = _mm256_add_ps(x, xmm2);
472
+ x = _mm256_add_ps(x, xmm3);
473
+
474
+ imm4 = _mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
475
+ imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
476
+ imm4 = _mm256_slli_epi32(imm4, 29);
477
+
478
+ v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
479
+
480
+ sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
481
+
482
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
483
+ v8sf z = _mm256_mul_ps(x,x);
484
+ y = *(v8sf*)_ps256_coscof_p0;
485
+
486
+ y = _mm256_mul_ps(y, z);
487
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
488
+ y = _mm256_mul_ps(y, z);
489
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
490
+ y = _mm256_mul_ps(y, z);
491
+ y = _mm256_mul_ps(y, z);
492
+ v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
493
+ y = _mm256_sub_ps(y, tmp);
494
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
495
+
496
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
497
+
498
+ v8sf y2 = *(v8sf*)_ps256_sincof_p0;
499
+ y2 = _mm256_mul_ps(y2, z);
500
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
501
+ y2 = _mm256_mul_ps(y2, z);
502
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
503
+ y2 = _mm256_mul_ps(y2, z);
504
+ y2 = _mm256_mul_ps(y2, x);
505
+ y2 = _mm256_add_ps(y2, x);
506
+
507
+ /* select the correct result from the two polynoms */
508
+ xmm3 = poly_mask;
509
+ v8sf ysin2 = _mm256_and_ps(xmm3, y2);
510
+ v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
511
+ y2 = _mm256_sub_ps(y2,ysin2);
512
+ y = _mm256_sub_ps(y, ysin1);
513
+
514
+ xmm1 = _mm256_add_ps(ysin1,ysin2);
515
+ xmm2 = _mm256_add_ps(y,y2);
516
+
517
+ /* update the sign */
518
+ *s = _mm256_xor_ps(xmm1, sign_bit_sin);
519
+ *c = _mm256_xor_ps(xmm2, sign_bit_cos);
520
+ }
521
+
522
+ #endif // CPU_CAPABILITY_AVX2
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/moments_utils.h ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cstring>
5
+ #include <numeric>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ #include <ATen/Parallel.h>
10
+ #include <ATen/OpMathType.h>
11
+ #include <ATen/cpu/vec/vec.h>
12
+ #include <ATen/native/cpu/utils.h>
13
+ #include <c10/util/SmallVector.h>
14
+ #include <c10/util/irange.h>
15
+
16
+ namespace at {
17
+ namespace native {
18
+ inline namespace CPU_CAPABILITY {
19
+
20
+ template<typename T> using opmath_t = at::opmath_type<T>;
21
+
22
+ constexpr int64_t kChunkSize = 16;
23
+
24
+ template <typename T>
25
+ void AddMoments(
26
+ int64_t m0_add,
27
+ const T& m1_add,
28
+ const T& m2_add,
29
+ int64_t& m0,
30
+ T& m1,
31
+ T& m2) {
32
+ const int64_t n = m0 + m0_add;
33
+ const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
34
+ const T delta = m1_add - m1;
35
+ m1 += c * delta;
36
+ m2 += m2_add + delta * delta * c * static_cast<T>(m0);
37
+ m0 = n;
38
+ }
39
+
40
+ template <typename T>
41
+ C10_ALWAYS_INLINE void AddMomentsVec(
42
+ int64_t m0_add,
43
+ const vec::Vectorized<T>& m1_add,
44
+ const vec::Vectorized<T>& m2_add,
45
+ int64_t& m0,
46
+ vec::Vectorized<T>& m1,
47
+ vec::Vectorized<T>& m2) {
48
+ using Vec = vec::Vectorized<T>;
49
+ const int64_t n = m0 + m0_add;
50
+ const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
51
+ const Vec c_vec(c);
52
+ const Vec delta = m1_add - m1;
53
+ m1 += c_vec * delta;
54
+ m2 += m2_add + delta * delta * c_vec * Vec(static_cast<T>(m0));
55
+ m0 = n;
56
+ }
57
+
58
+ template <typename T>
59
+ inline typename std::enable_if<std::is_same<T, opmath_t<T>>::value, void>::type
60
+ UpdateMomentsVec(
61
+ int64_t m0,
62
+ const T* X_ptr,
63
+ const std::array<vec::Vectorized<opmath_t<T>>, kChunkSize>& c_vecs,
64
+ int64_t& m0_stk0,
65
+ vec::Vectorized<opmath_t<T>>& m1_stk0,
66
+ vec::Vectorized<opmath_t<T>>& m2_stk0) {
67
+ using Vec = vec::Vectorized<opmath_t<T>>;
68
+ Vec m1_vec(0);
69
+ Vec m2_vec(0);
70
+ for (const auto j : c10::irange(m0)) {
71
+ const Vec x_vec = Vec::loadu(X_ptr + j * Vec::size());
72
+ const Vec delta_vec = x_vec - m1_vec;
73
+ m1_vec += delta_vec * c_vecs[j];
74
+ m2_vec += delta_vec * (x_vec - m1_vec);
75
+ }
76
+ AddMomentsVec(m0, m1_vec, m2_vec, m0_stk0, m1_stk0, m2_stk0);
77
+ }
78
+
79
+ // each bfloat16 vector will be converted to two float vectors,
80
+ // and accumulated successively on m1_stk0/m2_stk0.
81
+ template <typename T>
82
+ inline typename std::enable_if<!std::is_same<T, at::opmath_type<T>>::value, void>::type
83
+ UpdateMomentsVec(
84
+ int64_t m0,
85
+ const T* X_ptr,
86
+ const std::array<vec::Vectorized<at::opmath_type<T>>, kChunkSize>& c_vecs,
87
+ int64_t& m0_stk0,
88
+ vec::Vectorized<at::opmath_type<T>>& m1_stk0,
89
+ vec::Vectorized<at::opmath_type<T>>& m2_stk0) {
90
+ using Vec = vec::Vectorized<T>;
91
+ using fVec = vec::Vectorized<at::opmath_type<T>>;
92
+ fVec m1_fvec0(0), m1_fvec1(0);
93
+ fVec m2_fvec0(0), m2_fvec1(0);
94
+ for (const auto j : c10::irange(m0)) {
95
+ const Vec x_bvec = Vec::loadu(X_ptr + j * Vec::size());
96
+ fVec x_fvec0, x_fvec1;
97
+ std::tie(x_fvec0, x_fvec1) = convert_to_float<T>(x_bvec);
98
+ const fVec delta_fvec0 = x_fvec0 - m1_fvec0;
99
+ const fVec delta_fvec1 = x_fvec1 - m1_fvec1;
100
+ m1_fvec0 += delta_fvec0 * c_vecs[j];
101
+ m1_fvec1 += delta_fvec1 * c_vecs[j];
102
+ m2_fvec0 += delta_fvec0 * (x_fvec0 - m1_fvec0);
103
+ m2_fvec1 += delta_fvec1 * (x_fvec1 - m1_fvec1);
104
+ }
105
+ AddMomentsVec(m0, m1_fvec0, m2_fvec0, m0_stk0, m1_stk0, m2_stk0);
106
+ AddMomentsVec(m0, m1_fvec1, m2_fvec1, m0_stk0, m1_stk0, m2_stk0);
107
+ }
108
+
109
+ // Compute rowwise moments by Welford algorithm and cascade sum to improve
110
+ // numerical stability.
111
+ // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
112
+ // https://en.wikipedia.org/wiki/Pairwise_summation
113
+ template <typename T, int64_t kMaxDepth>
114
+ std::pair<opmath_t<T>, opmath_t<T>> RowwiseMomentsImpl(const T* X, int64_t N, int64_t ddof = 0) {
115
+ using math_t = opmath_t<T>;
116
+
117
+ constexpr int64_t kVecSize = vec::Vectorized<T>::size();
118
+ constexpr int64_t kAccVecSize = vec::Vectorized<math_t>::size();
119
+ const int64_t n = N / kVecSize;
120
+ const int64_t m = divup(n, kChunkSize);
121
+ const int64_t depth = utils::CeilLog2(m);
122
+
123
+ using Vec = vec::Vectorized<math_t>;
124
+ const Vec kZeroVec(math_t(0));
125
+ c10::SmallVector<int64_t, kMaxDepth> m0_stk(depth, 0);
126
+ c10::SmallVector<Vec, kMaxDepth> m1_stk(depth, kZeroVec);
127
+ c10::SmallVector<Vec, kMaxDepth> m2_stk(depth, kZeroVec);
128
+
129
+ for (const auto i : c10::irange(m)) {
130
+ const T* X_ptr = X + i * kChunkSize * kVecSize;
131
+ const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize);
132
+ static std::array<Vec, kChunkSize> c_vecs = ([]() {
133
+ std::array<Vec, kChunkSize> result;
134
+ for (const auto i : c10::irange(kChunkSize)) {
135
+ result[i] = Vec(math_t(1) / static_cast<math_t>(i + 1));
136
+ }
137
+ return result;
138
+ })();
139
+ UpdateMomentsVec(m0, X_ptr, c_vecs, m0_stk[0], m1_stk[0], m2_stk[0]);
140
+
141
+ int64_t mask = i + 1;
142
+ for (int64_t j = 1; j < depth && (mask & 1) == 0; ++j) {
143
+ AddMomentsVec(
144
+ m0_stk[j - 1],
145
+ m1_stk[j - 1],
146
+ m2_stk[j - 1],
147
+ m0_stk[j],
148
+ m1_stk[j],
149
+ m2_stk[j]);
150
+ m0_stk[j - 1] = 0;
151
+ m1_stk[j - 1] = kZeroVec;
152
+ m2_stk[j - 1] = kZeroVec;
153
+ mask >>= 1;
154
+ }
155
+ }
156
+ for (const auto i : c10::irange(1, depth)) {
157
+ AddMomentsVec(
158
+ m0_stk[i], m1_stk[i], m2_stk[i], m0_stk[0], m1_stk[0], m2_stk[0]);
159
+ }
160
+
161
+ std::array<math_t, kAccVecSize> m1_arr{};
162
+ std::array<math_t, kAccVecSize> m2_arr{};
163
+ m1_stk[0].store(m1_arr.data());
164
+ m2_stk[0].store(m2_arr.data());
165
+
166
+ int64_t m0 = 0;
167
+ math_t m1 = 0;
168
+ math_t m2 = 0;
169
+ for (int64_t i = n * kVecSize; i < N; ++i) {
170
+ math_t x = static_cast<math_t>(X[i]);
171
+ const math_t delta = x - m1;
172
+ ++m0;
173
+ m1 += delta / static_cast<math_t>(m0);
174
+ m2 += delta * (x - m1);
175
+ }
176
+ // for BFloat16, each vector in m1_arr/m2_arr holds 2*n accumulated result
177
+ int64_t m0_add = n * kVecSize / kAccVecSize;
178
+ for (const auto i : c10::irange(kAccVecSize)) {
179
+ AddMoments(m0_add, m1_arr[i], m2_arr[i], m0, m1, m2);
180
+ }
181
+
182
+ return std::make_pair(m1, m2 / static_cast<math_t>(N - ddof));
183
+ }
184
+
185
+ template <typename T>
186
+ std::pair<opmath_t<T>, opmath_t<T>> RowwiseMoments(const T* X, int64_t N, int64_t ddof = 0) {
187
+ using Vec = vec::Vectorized<T>;
188
+ constexpr int64_t kVecSize = Vec::size();
189
+ const int64_t n = N / kVecSize;
190
+ const int64_t m = divup(n, kChunkSize);
191
+ const int64_t depth = utils::CeilLog2(m);
192
+ if (depth <= 4) {
193
+ return RowwiseMomentsImpl<T, 4>(X, N, ddof);
194
+ } else if (depth <= 8) {
195
+ return RowwiseMomentsImpl<T, 8>(X, N, ddof);
196
+ } else if (depth <= 16) {
197
+ return RowwiseMomentsImpl<T, 16>(X, N, ddof);
198
+ } else if (depth <= 32) {
199
+ return RowwiseMomentsImpl<T, 32>(X, N, ddof);
200
+ } else {
201
+ return RowwiseMomentsImpl<T, 64>(X, N, ddof);
202
+ }
203
+ }
204
+
205
+ } // namespace CPU_CAPABILITY
206
+ } // namespace native
207
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/utils.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <ATen/cpu/vec/vec.h>
5
+ #include <c10/util/llvmMathExtras.h>
6
+
7
+ #ifdef USE_FBGEMM
8
+ #include <fbgemm/Fbgemm.h>
9
+ #endif
10
+
11
+ namespace at {
12
+ namespace native {
13
+
14
+ inline namespace CPU_CAPABILITY {
15
+
16
+ template <typename T>
17
+ inline T data_index_init(T offset) {
18
+ return offset;
19
+ }
20
+
21
+ template <typename T, typename... Args>
22
+ inline T data_index_init(T offset, T& x, const T& X, Args&&... args) {
23
+ offset = data_index_init(offset, std::forward<Args>(args)...);
24
+ x = offset % X;
25
+ return offset / X;
26
+ }
27
+
28
+ inline bool data_index_step() {
29
+ return true;
30
+ }
31
+
32
+ template <typename T, typename... Args>
33
+ inline bool data_index_step(T& x, const T& X, Args&&... args) {
34
+ if (data_index_step(std::forward<Args>(args)...)) {
35
+ x = ((x + 1) == X) ? 0 : (x + 1);
36
+ return x == 0;
37
+ }
38
+ return false;
39
+ }
40
+
41
+ // Helper struct for bfloat16 vectorization
42
+ // Useful when you need float as immediate dtype or accumulate dtype
43
+ using namespace vec;
44
+ struct Vec2 {
45
+ Vectorized<float> val0, val1;
46
+ Vec2(Vectorized<float> v0, Vectorized<float> v1) : val0(v0), val1(v1) {}
47
+ Vec2(float v) : val0(v), val1(v) {}
48
+ static Vec2 loadu(const BFloat16* ptr) {
49
+ Vectorized<float> v0, v1;
50
+ std::tie(v0, v1) = convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr));
51
+ return {v0, v1};
52
+ }
53
+ static Vec2 loadu(const float* ptr) {
54
+ return {Vectorized<float>::loadu(ptr), Vectorized<float>::loadu(ptr + Vectorized<float>::size())};
55
+ }
56
+ void store(BFloat16* ptr) const {
57
+ Vectorized<BFloat16> val = convert_float_bfloat16(val0, val1);
58
+ val.store(ptr);
59
+ }
60
+ void store(float* ptr) const {
61
+ val0.store(ptr);
62
+ val1.store(ptr + Vectorized<float>::size());
63
+ }
64
+ };
65
+ inline Vec2 operator+(const Vec2& a, const Vec2& b) { return {a.val0 + b.val0, a.val1 + b.val1}; }
66
+ inline Vec2 operator*(const Vec2& a, const Vec2& b) { return {a.val0 * b.val0, a.val1 * b.val1}; }
67
+ inline Vec2 operator-(const Vec2& a, const Vec2& b) { return {a.val0 - b.val0, a.val1 - b.val1}; }
68
+ inline Vec2 operator/(const Vec2& a, const Vec2& b) { return {a.val0 / b.val0, a.val1 / b.val1}; }
69
+ inline Vec2 maximum(const Vec2& a, const Vec2& b) { return {vec::maximum(a.val0, b.val0), vec::maximum(a.val1, b.val1)}; }
70
+ inline Vec2 minimum(const Vec2& a, const Vec2& b) { return {vec::minimum(a.val0, b.val0), vec::minimum(a.val1, b.val1)}; }
71
+
72
+ template <typename scalar_t> struct VectorizedType { using type = Vectorized<scalar_t>; };
73
+ template <> struct VectorizedType<BFloat16> { using type = Vec2; };
74
+ template <typename scalar_t> using VecType = typename VectorizedType<scalar_t>::type;
75
+
76
+ // Helper for mixed data type parameter Vec::load
77
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const BFloat16* ptr) {
78
+ return convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr));
79
+ }
80
+
81
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const Half* ptr) {
82
+ return convert_half_float(Vectorized<Half>::loadu(ptr));
83
+ }
84
+
85
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const float* ptr) {
86
+ using Vec = Vectorized<float>;
87
+ return std::make_tuple(Vec::loadu(ptr), Vec::loadu(ptr + Vec::size()));
88
+ }
89
+
90
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const BFloat16* ptr, int64_t count) {
91
+ return convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr, count));
92
+ }
93
+
94
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const Half* ptr, int64_t count) {
95
+ return convert_half_float(Vectorized<Half>::loadu(ptr, count));
96
+ }
97
+
98
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const float* ptr, int64_t count) {
99
+ using Vec = Vectorized<float>;
100
+ if (count > Vec::size()) {
101
+ return std::make_tuple(Vec::loadu(ptr), Vec::loadu(ptr + Vec::size(), count - Vec::size()));
102
+ } else {
103
+ return std::make_tuple(Vec::loadu(ptr, count), Vec(0));
104
+ }
105
+ }
106
+
107
+ } // namespace
108
+
109
+ namespace utils {
110
+
111
+ template <typename T>
112
+ T CeilLog2(const T& x) {
113
+ if (x <= 2) {
114
+ return 1;
115
+ }
116
+ // Last set bit is floor(log2(x)), floor + 1 is ceil
117
+ // except when x is an exact powers of 2, so subtract 1 first
118
+ return static_cast<T>(llvm::findLastSet(static_cast<uint64_t>(x) - 1)) + 1;
119
+ }
120
+
121
+ // matrix transpose:
122
+ // src has shape of M by N, with leading dimension of ld_src
123
+ // dst has shape of N by M, with leading dimension of ld_dst
124
+ template <typename T>
125
+ inline void transpose(int64_t M, int64_t N, const T* src, int64_t ld_src, T* dst, int64_t ld_dst) {
126
+ for (int64_t j = 0; j < N; j++) {
127
+ for (int64_t i = 0; i < M; i++) {
128
+ dst[j * ld_dst + i] = src[i * ld_src + j];
129
+ }
130
+ }
131
+ }
132
+
133
+ #ifdef USE_FBGEMM
134
+ template <>
135
+ inline void transpose<float>(int64_t M, int64_t N, const float* src, int64_t ld_src, float* dst, int64_t ld_dst) {
136
+ TORCH_CHECK(fbgemm::fbgemmSupportedCPU(), "Your CPU does not support FBGEMM.");
137
+ fbgemm::transpose_simd<float>(M, N, src, ld_src, dst, ld_dst);
138
+ }
139
+ #endif
140
+
141
+ template <typename index_t, typename F>
142
+ inline void parallel_sparse_csr(
143
+ const TensorAccessor<index_t, 1>& crow_acc,
144
+ const int64_t M,
145
+ const int64_t nnz,
146
+ const F& f) {
147
+ TORCH_CHECK(crow_acc.size(0) == M + 1);
148
+
149
+ // directly parallel on `M` may lead to load imbalance,
150
+ // statically determine thread partition here to average payload
151
+ // for each thread.
152
+ int num_threads = at::get_num_threads();
153
+ std::vector<int64_t> thread_splits(num_threads + 1, M);
154
+
155
+ int64_t thread_averge_payload = std::max((int64_t)1, divup(nnz, num_threads));
156
+
157
+ thread_splits[0] = 0;
158
+ int64_t sum = 0;
159
+ int64_t t = 1;
160
+ for (const auto m : c10::irange(M)) {
161
+ int64_t row_start = crow_acc[m];
162
+ int64_t row_end = crow_acc[m + 1];
163
+ sum += row_end - row_start;
164
+ if (sum > t * thread_averge_payload) {
165
+ thread_splits[t] = m;
166
+ t++;
167
+ }
168
+ }
169
+ // need to restore the last index,
170
+ // due to rounding error when calculating `thread_averge_payload`.
171
+ thread_splits[num_threads] = M;
172
+
173
+ at::parallel_for(0, num_threads, 1, [&](int64_t cbegin, int64_t cend) {
174
+ int tid = at::get_thread_num();
175
+ int64_t begin = thread_splits[tid];
176
+ int64_t end = thread_splits[tid + 1];
177
+ f(begin, end);
178
+ });
179
+ }
180
+
181
+ } // namespace utils
182
+
183
+ } // namespace native
184
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cpu/zmath.h ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Complex number math operations that act as no-ops for other dtypes.
4
+ #include <c10/util/complex.h>
5
+ #include <c10/util/math_compat.h>
6
+ #include <c10/util/MathConstants.h>
7
+ #include<ATen/NumericUtils.h>
8
+
9
+ namespace at { namespace native {
10
+ inline namespace CPU_CAPABILITY {
11
+
12
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
13
+ inline VALUE_TYPE zabs (SCALAR_TYPE z) {
14
+ return z;
15
+ }
16
+
17
+ template<>
18
+ inline c10::complex<float> zabs <c10::complex<float>> (c10::complex<float> z) {
19
+ return c10::complex<float>(std::abs(z));
20
+ }
21
+
22
+ template<>
23
+ inline float zabs <c10::complex<float>, float> (c10::complex<float> z) {
24
+ return std::abs(z);
25
+ }
26
+
27
+ template<>
28
+ inline c10::complex<double> zabs <c10::complex<double>> (c10::complex<double> z) {
29
+ return c10::complex<double>(std::abs(z));
30
+ }
31
+
32
+ template<>
33
+ inline double zabs <c10::complex<double>, double> (c10::complex<double> z) {
34
+ return std::abs(z);
35
+ }
36
+
37
+ // This overload corresponds to non-complex dtypes.
38
+ // The function is consistent with its NumPy equivalent
39
+ // for non-complex dtypes where `pi` is returned for
40
+ // negative real numbers and `0` is returned for 0 or positive
41
+ // real numbers.
42
+ // Note: `nan` is propagated.
43
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
44
+ inline VALUE_TYPE angle_impl (SCALAR_TYPE z) {
45
+ if (at::_isnan(z)) {
46
+ return z;
47
+ }
48
+ return z < 0 ? c10::pi<double> : 0;
49
+ }
50
+
51
+ template<>
52
+ inline c10::complex<float> angle_impl <c10::complex<float>> (c10::complex<float> z) {
53
+ return c10::complex<float>(std::arg(z), 0.0);
54
+ }
55
+
56
+ template<>
57
+ inline float angle_impl <c10::complex<float>, float> (c10::complex<float> z) {
58
+ return std::arg(z);
59
+ }
60
+
61
+ template<>
62
+ inline c10::complex<double> angle_impl <c10::complex<double>> (c10::complex<double> z) {
63
+ return c10::complex<double>(std::arg(z), 0.0);
64
+ }
65
+
66
+ template<>
67
+ inline double angle_impl <c10::complex<double>, double> (c10::complex<double> z) {
68
+ return std::arg(z);
69
+ }
70
+
71
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
72
+ constexpr VALUE_TYPE real_impl (SCALAR_TYPE z) {
73
+ return z; //No-Op
74
+ }
75
+
76
+ template<>
77
+ constexpr c10::complex<float> real_impl <c10::complex<float>> (c10::complex<float> z) {
78
+ return c10::complex<float>(z.real(), 0.0);
79
+ }
80
+
81
+ template<>
82
+ constexpr float real_impl <c10::complex<float>, float> (c10::complex<float> z) {
83
+ return z.real();
84
+ }
85
+
86
+ template<>
87
+ constexpr c10::complex<double> real_impl <c10::complex<double>> (c10::complex<double> z) {
88
+ return c10::complex<double>(z.real(), 0.0);
89
+ }
90
+
91
+ template<>
92
+ constexpr double real_impl <c10::complex<double>, double> (c10::complex<double> z) {
93
+ return z.real();
94
+ }
95
+
96
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
97
+ constexpr VALUE_TYPE imag_impl (SCALAR_TYPE /*z*/) {
98
+ return 0;
99
+ }
100
+
101
+ template<>
102
+ constexpr c10::complex<float> imag_impl <c10::complex<float>> (c10::complex<float> z) {
103
+ return c10::complex<float>(z.imag(), 0.0);
104
+ }
105
+
106
+ template<>
107
+ constexpr float imag_impl <c10::complex<float>, float> (c10::complex<float> z) {
108
+ return z.imag();
109
+ }
110
+
111
+ template<>
112
+ constexpr c10::complex<double> imag_impl <c10::complex<double>> (c10::complex<double> z) {
113
+ return c10::complex<double>(z.imag(), 0.0);
114
+ }
115
+
116
+ template<>
117
+ constexpr double imag_impl <c10::complex<double>, double> (c10::complex<double> z) {
118
+ return z.imag();
119
+ }
120
+
121
+ template <typename TYPE>
122
+ inline TYPE conj_impl (TYPE z) {
123
+ return z; //No-Op
124
+ }
125
+
126
+ template<>
127
+ inline c10::complex<at::Half> conj_impl <c10::complex<at::Half>> (c10::complex<at::Half> z) {
128
+ return c10::complex<at::Half>{z.real(), -z.imag()};
129
+ }
130
+
131
+ template<>
132
+ inline c10::complex<float> conj_impl <c10::complex<float>> (c10::complex<float> z) {
133
+ return c10::complex<float>(z.real(), -z.imag());
134
+ }
135
+
136
+ template<>
137
+ inline c10::complex<double> conj_impl <c10::complex<double>> (c10::complex<double> z) {
138
+ return c10::complex<double>(z.real(), -z.imag());
139
+ }
140
+
141
+ template <typename TYPE>
142
+ inline TYPE ceil_impl (TYPE z) {
143
+ return std::ceil(z);
144
+ }
145
+
146
+ template <>
147
+ inline c10::complex<float> ceil_impl (c10::complex<float> z) {
148
+ return c10::complex<float>(std::ceil(z.real()), std::ceil(z.imag()));
149
+ }
150
+
151
+ template <>
152
+ inline c10::complex<double> ceil_impl (c10::complex<double> z) {
153
+ return c10::complex<double>(std::ceil(z.real()), std::ceil(z.imag()));
154
+ }
155
+
156
+ template<typename T>
157
+ inline c10::complex<T> sgn_impl (c10::complex<T> z) {
158
+ if (z == c10::complex<T>(0, 0)) {
159
+ return c10::complex<T>(0, 0);
160
+ } else {
161
+ return z / zabs(z);
162
+ }
163
+ }
164
+
165
+ template <typename TYPE>
166
+ inline TYPE floor_impl (TYPE z) {
167
+ return std::floor(z);
168
+ }
169
+
170
+ template <>
171
+ inline c10::complex<float> floor_impl (c10::complex<float> z) {
172
+ return c10::complex<float>(std::floor(z.real()), std::floor(z.imag()));
173
+ }
174
+
175
+ template <>
176
+ inline c10::complex<double> floor_impl (c10::complex<double> z) {
177
+ return c10::complex<double>(std::floor(z.real()), std::floor(z.imag()));
178
+ }
179
+
180
+ template <typename TYPE>
181
+ inline TYPE round_impl (TYPE z) {
182
+ return std::nearbyint(z);
183
+ }
184
+
185
+ template <>
186
+ inline c10::complex<float> round_impl (c10::complex<float> z) {
187
+ return c10::complex<float>(std::nearbyint(z.real()), std::nearbyint(z.imag()));
188
+ }
189
+
190
+ template <>
191
+ inline c10::complex<double> round_impl (c10::complex<double> z) {
192
+ return c10::complex<double>(std::nearbyint(z.real()), std::nearbyint(z.imag()));
193
+ }
194
+
195
+ template <typename TYPE>
196
+ inline TYPE trunc_impl (TYPE z) {
197
+ return std::trunc(z);
198
+ }
199
+
200
+ template <>
201
+ inline c10::complex<float> trunc_impl (c10::complex<float> z) {
202
+ return c10::complex<float>(std::trunc(z.real()), std::trunc(z.imag()));
203
+ }
204
+
205
+ template <>
206
+ inline c10::complex<double> trunc_impl (c10::complex<double> z) {
207
+ return c10::complex<double>(std::trunc(z.real()), std::trunc(z.imag()));
208
+ }
209
+
210
+ template <typename TYPE, std::enable_if_t<!c10::is_complex<TYPE>::value, int> = 0>
211
+ inline TYPE max_impl (TYPE a, TYPE b) {
212
+ if (_isnan<TYPE>(a) || _isnan<TYPE>(b)) {
213
+ return std::numeric_limits<TYPE>::quiet_NaN();
214
+ } else {
215
+ return std::max(a, b);
216
+ }
217
+ }
218
+
219
+ template <typename TYPE, std::enable_if_t<c10::is_complex<TYPE>::value, int> = 0>
220
+ inline TYPE max_impl (TYPE a, TYPE b) {
221
+ if (_isnan<TYPE>(a)) {
222
+ return a;
223
+ } else if (_isnan<TYPE>(b)) {
224
+ return b;
225
+ } else {
226
+ return std::abs(a) > std::abs(b) ? a : b;
227
+ }
228
+ }
229
+
230
+ template <typename TYPE, std::enable_if_t<!c10::is_complex<TYPE>::value, int> = 0>
231
+ inline TYPE min_impl (TYPE a, TYPE b) {
232
+ if (_isnan<TYPE>(a) || _isnan<TYPE>(b)) {
233
+ return std::numeric_limits<TYPE>::quiet_NaN();
234
+ } else {
235
+ return std::min(a, b);
236
+ }
237
+ }
238
+
239
+ template <typename TYPE, std::enable_if_t<c10::is_complex<TYPE>::value, int> = 0>
240
+ inline TYPE min_impl (TYPE a, TYPE b) {
241
+ if (_isnan<TYPE>(a)) {
242
+ return a;
243
+ } else if (_isnan<TYPE>(b)) {
244
+ return b;
245
+ } else {
246
+ return std::abs(a) < std::abs(b) ? a : b;
247
+ }
248
+ }
249
+
250
+ } // end namespace
251
+ }} //end at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/BinaryInternal.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // DON'T include this except from Binary*.cu files. It should not leak into
2
+ // headers.
3
+ #pragma once
4
+ #define TORCH_ASSERT_NO_OPERATORS
5
+ #include <ATen/AccumulateType.h>
6
+ #include <ATen/Dispatch.h>
7
+ #include <ATen/native/BinaryOps.h>
8
+ #include <ATen/native/DispatchStub.h>
9
+ #include <ATen/native/TensorIterator.h>
10
+ #include <c10/cuda/CUDAGuard.h>
11
+ #include <c10/cuda/CUDAMathCompat.h>
12
+ #include <c10/util/TypeSafeSignMath.h>
13
+ #include <ATen/native/cuda/JitLoops.cuh>
14
+ #include <ATen/native/cuda/Loops.cuh>
15
+
16
+ #include <type_traits>
17
+
18
+ namespace at {
19
+ namespace native {
20
+ namespace binary_internal {
21
+
22
+ template <typename scalar_t>
23
+ struct DivFunctor {
24
+ __device__ scalar_t operator()(scalar_t a, scalar_t b) const {
25
+ return a / b;
26
+ }
27
+ };
28
+
29
+ template <typename T>
30
+ struct MulFunctor {
31
+ __device__ T operator()(T a, T b) const {
32
+ return a * b;
33
+ }
34
+ };
35
+
36
+ // Workaround for the error: '*' in boolean context, suggest '&&' instead
37
+ // [-Werror=int-in-bool-context]
38
+ template <>
39
+ struct MulFunctor<bool> {
40
+ __device__ bool operator()(bool a, bool b) const {
41
+ return a && b;
42
+ }
43
+ };
44
+ void div_true_kernel_cuda(TensorIteratorBase& iter);
45
+ void div_trunc_kernel_cuda(TensorIteratorBase& iter);
46
+ } // namespace binary_internal
47
+ } // namespace native
48
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDAJitLoops.cuh ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ // Jiterator functions are guarded behind this macro
5
+ #if AT_USE_JITERATOR()
6
+
7
+ #include <ATen/OpMathType.h>
8
+ #include <ATen/TensorIterator.h>
9
+ #include <ATen/core/Array.h>
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
12
+ #include <ATen/native/cuda/jit_utils.h>
13
+ #include <ATen/native/cuda/MemoryAccess.cuh>
14
+ #include <ATen/native/cuda/thread_constants.h>
15
+
16
+ #include <ATen/native/cuda/Loops.cuh>
17
+
18
+ #include <c10/macros/Macros.h>
19
+ #include <c10/core/ScalarType.h>
20
+ #include <c10/util/SmallBuffer.h>
21
+ #include <c10/util/C++17.h>
22
+
23
+ #include <initializer_list>
24
+ #include <type_traits>
25
+ #include <tuple>
26
+ #include <mutex>
27
+
28
+ namespace at {
29
+ namespace native {
30
+
31
+ template <typename Tuple, std::size_t... I>
32
+ constexpr auto tuple_to_array_helper(Tuple& t, std::index_sequence<I...> seq) {
33
+ constexpr auto size = seq.size();
34
+ (void)t; // warning : unused parameter when tuple is empty.
35
+ return std::array<void*, size>{static_cast<void*>(&std::get<I>(t))...};
36
+ }
37
+
38
+ // Helper function convert tuple to std::array<void*, N>
39
+ // for passing the arguments to CUDA Kernel
40
+ // NOTE: We capture tuple by reference,
41
+ // so the pointers in returned array are only valid
42
+ // till tuple is alive.
43
+ template <typename ...Args>
44
+ constexpr auto tuple_to_array(std::tuple<Args...>& extra_args) {
45
+ constexpr auto tuple_size = sizeof...(Args);
46
+ return tuple_to_array_helper(extra_args, std::make_index_sequence<tuple_size>{});
47
+ }
48
+
49
+ struct JittedVecKernelCache {
50
+ // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
51
+ at::cuda::jit::NvrtcFunction vec1;
52
+ at::cuda::jit::NvrtcFunction vec2;
53
+ at::cuda::jit::NvrtcFunction vec4;
54
+ };
55
+
56
+ struct JittedKernelVariantCache {
57
+ JittedVecKernelCache vec;
58
+ at::cuda::jit::NvrtcFunction noncontiguous;
59
+ at::cuda::jit::NvrtcFunction dynamic_contiguous;
60
+ at::cuda::jit::NvrtcFunction dynamic_noncontiguous;
61
+ };
62
+
63
+ inline c10::SmallBuffer<void*, 64> pack_kernel_args(
64
+ std::initializer_list<void*> args,
65
+ c10::ArrayRef<void*> extra_args) {
66
+ c10::SmallBuffer<void*, 64> ret(args.size() + extra_args.size());
67
+ std::copy(args.begin(), args.end(), ret.data());
68
+ std::copy(extra_args.begin(), extra_args.end(), ret.data() + args.size());
69
+ return ret;
70
+ }
71
+
72
+ template<typename array_t,
73
+ typename inp_calc_t,
74
+ typename out_calc_t,
75
+ typename loader_t,
76
+ typename storer_t>
77
+ void launch_jitted_unrolled_kernel(
78
+ std::mutex &jiterator_mutex,
79
+ at::cuda::jit::NvrtcFunction &fn_cache,
80
+ const at::cuda::jit::KernelDescriptor &desc,
81
+ int64_t N,
82
+ array_t data,
83
+ inp_calc_t ic,
84
+ out_calc_t oc,
85
+ loader_t l,
86
+ storer_t s,
87
+ bool contiguous,
88
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
89
+ void* scalar_val,
90
+ c10::ArrayRef<void*> extra_args) {
91
+
92
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
93
+ //casting result to int is always safe, intermediate is int64 and won't overflow
94
+ const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
95
+
96
+ if (!fn_cache.function) {
97
+ const std::lock_guard<std::mutex> lock{jiterator_mutex};
98
+ if (!fn_cache.function) {
99
+ constexpr bool dynamic_casting = !std::is_same<decltype(l), memory::LoadWithoutCast>() ||
100
+ !std::is_same<decltype(s), memory::StoreWithoutCast>();
101
+ auto code = at::cuda::jit::generate_code(
102
+ desc, contiguous, dynamic_casting, scalar_pos);
103
+ fn_cache = at::cuda::jit::jit_pwise_function(code, desc.name);
104
+ }
105
+ }
106
+
107
+ auto args = pack_kernel_args({&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args);
108
+ at::cuda::jit::launch_jitted_pwise_function(fn_cache, args.data(), {grid, 1u, 1u},
109
+ {num_threads(), 1u, 1u});
110
+ }
111
+
112
+ template<int arity, typename array_t>
113
+ void launch_jitted_vectorized_kernel(
114
+ std::mutex &jiterator_mutex, JittedVecKernelCache &fn_cache,
115
+ const at::cuda::jit::KernelDescriptor &desc, int64_t N, array_t data,
116
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
117
+ void *scalar_val, c10::ArrayRef<void*> extra_args) {
118
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
119
+ // N is still int64_t for the computation, but it's always safe to cast result to int
120
+ const uint32_t grid = (N + block_work_size() - 1) / block_work_size();
121
+ const int vec_size = at::cuda::jit::can_vectorize_up_to(
122
+ desc, c10::ArrayRef<char*>(data.data, data.size()));
123
+
124
+ // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements)
125
+ // fn_ptr is set to the appropriate function based on the vec size and GPU used
126
+ at::cuda::jit::NvrtcFunction* fn_ptr;
127
+ if (vec_size == 4) {
128
+ fn_ptr = &fn_cache.vec4;
129
+ } else if (vec_size == 2) {
130
+ fn_ptr = &fn_cache.vec2;
131
+ } else if (vec_size ==1) {
132
+ fn_ptr = &fn_cache.vec1;
133
+ } else {
134
+ TORCH_INTERNAL_ASSERT(false, "unexpected vec_size for jitter vectorized kernel");
135
+ }
136
+
137
+ bool vectorized = vec_size > 1;
138
+
139
+ if (!fn_ptr->function) {
140
+ const std::lock_guard<std::mutex> lock{jiterator_mutex};
141
+ if (!fn_ptr->function) { // cache miss!
142
+
143
+ // Generates program
144
+ auto code = at::cuda::jit::generate_code(
145
+ desc, /*contiguous=*/true, /*dynamic_casting=*/false,
146
+ scalar_pos, vectorized, vec_size);
147
+ std::string kernel_name = vectorized ? desc.name + "_vectorized" + std::to_string(vec_size) : desc.name;
148
+
149
+ // Acquires the program
150
+ *fn_ptr = at::cuda::jit::jit_pwise_function(code, kernel_name);
151
+ }
152
+ }
153
+
154
+ if (vectorized) {
155
+ auto args = pack_kernel_args({&N, &data, scalar_val}, extra_args);
156
+ at::cuda::jit::launch_jitted_pwise_function(
157
+ *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
158
+ } else {
159
+ // NVCC complains about unused variables l and s.
160
+ // It should be false positive in most cases, so we suppress the warnings.
161
+ #pragma nv_diagnostic push
162
+ #pragma nv_diag_suppress 177
163
+ auto ic = TrivialOffsetCalculator<arity>();
164
+ auto oc = TrivialOffsetCalculator<1>();
165
+ auto l = memory::LoadWithoutCast();
166
+ auto s = memory::StoreWithoutCast();
167
+
168
+ auto args = pack_kernel_args(
169
+ {&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args);
170
+ at::cuda::jit::launch_jitted_pwise_function(
171
+ *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u});
172
+ #pragma nv_diagnostic pop
173
+ }
174
+ }
175
+
176
+ template <int arity>
177
+ void jitted_gpu_kernel_generic(
178
+ std::mutex &jiterator_mutex,
179
+ JittedKernelVariantCache &cache,
180
+ const at::cuda::jit::KernelDescriptor &desc,
181
+ at::cuda::jit::BinaryFuncVariant scalar_pos,
182
+ c10::ArrayRef<void*> extra_args,
183
+ TensorIteratorBase& iter,
184
+ const bool dynamic_casting,
185
+ void *scalar_val) {
186
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
187
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == arity);
188
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
189
+
190
+ constexpr int ntensors = arity + 1;
191
+ at::detail::Array<char*, ntensors> data;
192
+ for (auto i : c10::irange(ntensors)) {
193
+ data[i] = (char*)iter.data_ptr(i);
194
+ }
195
+
196
+ int64_t numel = iter.numel();
197
+ bool contiguous = iter.is_contiguous();
198
+
199
+ // Decides which of 4 kernel types to launch
200
+ // Variations are:
201
+ // - Case 1: no dynamic casting and contiguous
202
+ // - Case 2: no dynamic casting and noncontiguous
203
+ // - Case 3: dynamic casting and contiguous
204
+ // - Case 4: dynamic casting and noncontiguous
205
+ // These cases align with the non-jitted CUDALoops.cuh cases in gpu_kernel_impl
206
+
207
+ if (!dynamic_casting) {
208
+ if (contiguous) {
209
+ // Case 1: no dynamic casting and contiguous
210
+ launch_jitted_vectorized_kernel<arity>(
211
+ jiterator_mutex, cache.vec, desc,
212
+ numel, data, scalar_pos, scalar_val, extra_args);
213
+ return;
214
+ }
215
+
216
+ // Case 2: no dynamic casting and noncontiguous
217
+ auto input_offset_calculator = make_input_offset_calculator<arity>(iter);
218
+ auto output_offset_calculator = make_output_offset_calculator(iter);
219
+ auto loader = memory::LoadWithoutCast();
220
+ auto storer = memory::StoreWithoutCast();
221
+ launch_jitted_unrolled_kernel(
222
+ jiterator_mutex, cache.noncontiguous, desc, numel, data,
223
+ input_offset_calculator, output_offset_calculator, loader,
224
+ storer, contiguous, scalar_pos, scalar_val, extra_args);
225
+ return;
226
+ }
227
+
228
+ // Cases 3 and 4 are handled below
229
+ // Both require construction of a storer (this asserts 1 output) and one or more loaders
230
+
231
+ // Creates store cast to output (the zeroth tensor in TensorIterator)
232
+ auto storer = memory::StoreWithCast<1>(iter);
233
+
234
+ // Creates load casts from inputs (note offset indexing into the iterators 1...n tensors)
235
+ auto loader = memory::LoadWithCast<arity>(iter);
236
+
237
+ if (contiguous) {
238
+ // Case 3: dynamic casting and contiguous
239
+ auto input_offset_calculator = TrivialOffsetCalculator<arity>();
240
+ auto output_offset_calculator = TrivialOffsetCalculator<1>();
241
+ launch_jitted_unrolled_kernel(
242
+ jiterator_mutex, cache.dynamic_contiguous, desc, numel, data, input_offset_calculator,
243
+ output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args);
244
+ return;
245
+ }
246
+
247
+ // Case 4: dynamic casting and noncontiguous
248
+ auto input_offset_calculator = make_input_offset_calculator<arity>(iter);
249
+ auto output_offset_calculator = make_output_offset_calculator(iter);
250
+ launch_jitted_unrolled_kernel(
251
+ jiterator_mutex, cache.dynamic_noncontiguous, desc, numel, data, input_offset_calculator,
252
+ output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args);
253
+ }
254
+
255
+ // NOTE: static to reduce chances of name collision.
256
+ template <
257
+ char const* name,
258
+ typename result_type,
259
+ typename f_inputs_type,
260
+ int arity,
261
+ at::cuda::jit::BinaryFuncVariant scalar_pos =
262
+ at::cuda::jit::BinaryFuncVariant::NoScalar,
263
+ typename... ExtraArgs>
264
+ static void jitted_gpu_kernel_impl(
265
+ TensorIteratorBase& iter,
266
+ const std::string &f,
267
+ const bool dynamic_casting,
268
+ at::opmath_type<f_inputs_type> scalar_val,
269
+ std::tuple<ExtraArgs...> extra_args) {
270
+
271
+ // TODO: Memory use can probably be optimized by re-using kernels across GPUs with
272
+ // the same compute capability
273
+ static std::mutex jiterator_mutex;
274
+ static std::vector<JittedKernelVariantCache> device_caches(c10::cuda::device_count());
275
+
276
+ constexpr int nInputs = arity;
277
+ constexpr int nOutputs = 1; // TODO: Support more than 1 output
278
+ static const auto desc = at::cuda::jit::make_kernel_descriptor<
279
+ result_type, f_inputs_type, ExtraArgs...>(name, f, nInputs, nOutputs);
280
+
281
+ auto &cache = device_caches[iter.device().index()];
282
+ auto extra_args_array = tuple_to_array(extra_args);
283
+ return jitted_gpu_kernel_generic<arity>(
284
+ jiterator_mutex,
285
+ cache,
286
+ desc,
287
+ scalar_pos,
288
+ extra_args_array,
289
+ iter,
290
+ dynamic_casting,
291
+ &scalar_val
292
+ );
293
+ }
294
+
295
+ }} // at::native
296
+
297
+ #endif // AT_USE_JITERATOR()
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CUDALoops.cuh ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides two functions to help write GPU elementwise kernels:
4
+ //
5
+ // gpu_kernel(TensorIterator iter, <lambda>)
6
+ // gpu_kernel_with_scalars(TensorIterator iter, <lambda>)
7
+ //
8
+ // The gpu_kernel_with_scalars generates specializations that support a
9
+ // single scalar CPU argument, such as from `cuda_tensor + 5`. The CPU scalar
10
+ // is lifted to a kernel parameter instead of copying to device memory.
11
+ // This should be used in conjunction with TensorIterator::allow_cpu_scalars_,
12
+ // which is the default for TensorIterator::binary_op. Otherwise, all inputs
13
+ // and the output must be on the GPU.
14
+ //
15
+ // For example, to write a reciprocal kernel for GPU float Tensors:
16
+ //
17
+ // gpu_kernel(iter, []GPU_LAMBDA(float a) {
18
+ // return 1.0f / a;
19
+ // });
20
+ //
21
+ // To write a multiplication kernel for GPU float Tensors where one argument
22
+ // may be a CPU scalar:
23
+ //
24
+ // gpu_kernel_with_scalars(iter, []GPU_LAMBDA(float a, float b) {
25
+ // return a * b;
26
+ // });
27
+ //
28
+ // See BinaryOpsKernel.cu for the complete implementation
29
+ //
30
+
31
+ #include <type_traits>
32
+ #include <tuple>
33
+ #include <iostream>
34
+
35
+ #include <ATen/cuda/CUDAContext.h>
36
+ #include <ATen/core/Array.h>
37
+ #include <ATen/detail/FunctionTraits.h>
38
+ #include <ATen/native/TensorIterator.h>
39
+ #include <c10/macros/Macros.h>
40
+ #include <c10/core/DynamicCast.h>
41
+ #include <c10/core/ScalarType.h>
42
+ #include <c10/util/TypeCast.h>
43
+ #include <c10/util/C++17.h>
44
+
45
+
46
+ #ifdef __NVCC__
47
+ #define ASSERT_HOST_DEVICE_LAMBDA(type) \
48
+ static_assert(__nv_is_extended_host_device_lambda_closure_type(type), \
49
+ #type " must be a __host__ __device__ lambda")
50
+ #else
51
+ #define ASSERT_HOST_DEVICE_LAMBDA(type)
52
+ #endif
53
+
54
+
55
+ namespace at { namespace native {
56
+
57
+ template<int vec_size, typename func_t, typename array_t>
58
+ C10_LAUNCH_BOUNDS_1(num_threads())
59
+ __global__ void vectorized_elementwise_kernel(int N, func_t f, array_t data) {
60
+ using traits = function_traits<func_t>;
61
+ int remaining = N - block_work_size() * blockIdx.x;
62
+
63
+ if (remaining < block_work_size()) { // if this block handles the reminder, just do a naive unrolled loop
64
+ auto input_calc = TrivialOffsetCalculator<traits::arity>();
65
+ auto output_calc = TrivialOffsetCalculator<1>();
66
+ auto loader = memory::LoadWithoutCast();
67
+ auto storer = memory::StoreWithoutCast();
68
+ auto policy = memory::policies::unroll<array_t, decltype(input_calc), decltype(output_calc),
69
+ memory::LoadWithoutCast, memory::StoreWithoutCast>(
70
+ data, remaining, input_calc, output_calc, loader, storer);
71
+ elementwise_kernel_helper(f, policy);
72
+ } else { // if this block has a full `block_work_size` data to handle, use vectorized memory access
73
+ elementwise_kernel_helper(f, memory::policies::vectorized<vec_size, array_t>(data));
74
+ }
75
+ }
76
+
77
+ template<typename func_t, typename array_t, typename inp_calc_t, typename out_calc_t, typename loader_t, typename storer_t>
78
+ C10_LAUNCH_BOUNDS_1(num_threads())
79
+ __global__ void unrolled_elementwise_kernel(int N, func_t f, array_t data,
80
+ inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s)
81
+ {
82
+ int remaining = N - block_work_size() * blockIdx.x;
83
+ auto policy = memory::policies::unroll<array_t, inp_calc_t, out_calc_t, loader_t, storer_t>(data, remaining, ic, oc, l, s);
84
+ elementwise_kernel_helper(f, policy);
85
+ }
86
+
87
+ // this function assume trivial 1d and no dynamic casting
88
+ template<typename func_t, typename array_t>
89
+ static inline void launch_vectorized_kernel(int64_t N, const func_t& f, array_t data) {
90
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
91
+ using traits = function_traits<func_t>;
92
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
93
+ auto stream = at::cuda::getCurrentCUDAStream();
94
+ int vec_size = memory::can_vectorize_up_to<func_t>(data);
95
+
96
+ switch (vec_size) {
97
+ case 4:
98
+ vectorized_elementwise_kernel<4, func_t, array_t><<<grid, num_threads(), 0, stream>>>(N, f, data);
99
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
100
+ break;
101
+ case 2:
102
+ vectorized_elementwise_kernel<2, func_t, array_t><<<grid, num_threads(), 0, stream>>>(N, f, data);
103
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
104
+ break;
105
+ case 1: {
106
+ auto input_calc = TrivialOffsetCalculator<traits::arity>();
107
+ auto output_calc = TrivialOffsetCalculator<1>();
108
+ auto loader = memory::LoadWithoutCast();
109
+ auto storer = memory::StoreWithoutCast();
110
+ unrolled_elementwise_kernel<func_t, array_t><<<grid, num_threads(), 0, stream>>>(N, f, data, input_calc, output_calc, loader, storer);
111
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
112
+ break;
113
+ }
114
+ default:
115
+ TORCH_INTERNAL_ASSERT(false, "Unexpected vectorization size");
116
+ }
117
+ }
118
+
119
+
120
+ template<typename func_t, typename array_t, typename inp_calc_t, typename out_calc_t, typename loader_t, typename storer_t>
121
+ static inline void launch_unrolled_kernel(int64_t N, const func_t& f, array_t data,
122
+ inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s)
123
+ {
124
+ TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits<int32_t>::max());
125
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
126
+ auto stream = at::cuda::getCurrentCUDAStream();
127
+ unrolled_elementwise_kernel<func_t, array_t><<<grid, num_threads(), 0, stream>>>(N, f, data, ic, oc, l, s);
128
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
129
+ }
130
+
131
+ template<int nt, int vt, typename func_t>
132
+ C10_LAUNCH_BOUNDS_2(nt, 4)
133
+ __global__ void elementwise_kernel(int N, func_t f) {
134
+ int tid = threadIdx.x;
135
+ int nv = nt * vt;
136
+ int idx = nv * blockIdx.x + tid;
137
+ #pragma unroll
138
+ for (int i = 0; i < vt; i++) {
139
+ if (idx < N) {
140
+ f(idx);
141
+ idx += nt;
142
+ }
143
+ }
144
+ }
145
+
146
+ template<int nt, int vt, typename func_t>
147
+ static void launch_legacy_kernel(int64_t N, const func_t& f) {
148
+ TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
149
+ if (N == 0) {
150
+ return;
151
+ }
152
+ dim3 block(nt);
153
+ dim3 grid((N + block.x * vt - 1) / (block.x * vt));
154
+ auto stream = at::cuda::getCurrentCUDAStream();
155
+ elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
156
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
157
+ }
158
+
159
+ template <typename traits, typename func_t, typename index_t, size_t... INDEX>
160
+ C10_HOST_DEVICE typename traits::result_type
161
+ invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i,
162
+ std::index_sequence<INDEX...>) {
163
+ (void)strides;
164
+ (void)i;
165
+ return f(c10::load<typename traits::template arg<INDEX>::type>(data[INDEX] + i * strides[INDEX])...);
166
+ }
167
+
168
+ template <typename func_t, typename index_t, typename traits = function_traits<func_t>>
169
+ C10_HOST_DEVICE typename traits::result_type
170
+ invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i) {
171
+ using Indices = std::make_index_sequence<traits::arity>;
172
+ return invoke_impl<traits>(f, data, strides, i, Indices{});
173
+ }
174
+
175
+ template <typename traits, typename func_t, typename index_t, size_t... I>
176
+ C10_HOST_DEVICE typename traits::result_type
177
+ invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i,
178
+ std::index_sequence<I...>) {
179
+ (void)strides;
180
+ (void)i;
181
+ return f(c10::fetch_and_cast<typename traits::template arg<I>::type>(dtypes[I], data[I] + i * strides[I])...);
182
+ }
183
+
184
+ template <typename func_t, typename index_t, typename traits = function_traits<func_t>>
185
+ C10_HOST_DEVICE typename traits::result_type
186
+ invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i) {
187
+ using Indices = std::make_index_sequence<traits::arity>;
188
+ return invoke_impl<traits>(f, data, strides, dtypes, i, Indices{});
189
+ }
190
+
191
+
192
+ template <typename func_t>
193
+ void gpu_kernel_impl_nocast(TensorIteratorBase& iter, const func_t& f) {
194
+ using traits = function_traits<func_t>;
195
+ using arg0_t = typename traits::result_type;
196
+ constexpr int ntensors = traits::arity + 1;
197
+
198
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
199
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
200
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
201
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
202
+
203
+ at::detail::Array<char*, ntensors> data;
204
+ for (int i = 0; i < ntensors; i++) {
205
+ data[i] = (char*)iter.data_ptr(i);
206
+ }
207
+
208
+ int64_t numel = iter.numel();
209
+
210
+ bool contiguous = iter.is_contiguous();
211
+
212
+ if (contiguous) {
213
+ return launch_vectorized_kernel(numel, f, data);
214
+ }
215
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
216
+ constexpr int unroll_factor = sizeof(arg0_t) >= 4 ? 2 : 4;
217
+ launch_legacy_kernel<128,unroll_factor>(numel, [=]GPU_LAMBDA(int idx) {
218
+ auto offsets = offset_calc.get(idx);
219
+ arg0_t* out = (arg0_t*)(data[0] + offsets[0]);
220
+ *out = invoke(f, &data.data[1], &offsets.data[1], 1);
221
+ });
222
+ }
223
+
224
+ template <typename func_t>
225
+ void gpu_kernel_impl(TensorIteratorBase& iter, const func_t& f) {
226
+ if (!needs_dynamic_casting<func_t>::check(iter)) {
227
+ return gpu_kernel_impl_nocast(iter, f);
228
+ }
229
+ using traits = function_traits<func_t>;
230
+ using arg0_t = typename traits::result_type;
231
+ constexpr int ntensors = traits::arity + 1;
232
+
233
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
234
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
235
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
236
+
237
+ at::detail::Array<char*, ntensors> data;
238
+ for (int i = 0; i < ntensors; i++) {
239
+ data[i] = (char*)iter.data_ptr(i);
240
+ }
241
+
242
+ int64_t numel = iter.numel();
243
+
244
+ bool contiguous = iter.is_contiguous();
245
+
246
+ if (contiguous) {
247
+ auto loader = memory::LoadWithCast<traits::arity>(iter);
248
+ auto storer = memory::StoreWithCast<1>(iter);
249
+ auto input_offset_calculator = TrivialOffsetCalculator<traits::arity>();
250
+ auto output_offset_calculator = TrivialOffsetCalculator<1>();
251
+ launch_unrolled_kernel(numel, f, data, input_offset_calculator, output_offset_calculator, loader, storer);
252
+ } else {
253
+ at::detail::Array<ScalarType, ntensors> dtypes;
254
+ for (int i = 0; i < ntensors; i++) {
255
+ dtypes[i] = iter.dtype(i);
256
+ }
257
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
258
+ launch_legacy_kernel<128, 4>(numel, [=]GPU_LAMBDA(int idx) {
259
+ auto offsets = offset_calc.get(idx);
260
+ void* out = data[0] + offsets[0];
261
+ arg0_t result = invoke(f, &data.data[1], &offsets.data[1], &dtypes.data[1], 1);
262
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
263
+ });
264
+ }
265
+ }
266
+
267
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CompositeRandomAccessor.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/CompositeRandomAccessorCommon.h>
4
+ #include <thrust/tuple.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ struct TupleInfoCPU {
9
+ template <typename ...Types>
10
+ using tuple = thrust::tuple<Types...>;
11
+
12
+ template <typename ...Types>
13
+ static constexpr auto tie(Types&... args) noexcept {
14
+ return thrust::tie(args...);
15
+ }
16
+ };
17
+
18
+ template <typename KeyAccessor, typename ValueAccessor>
19
+ using CompositeRandomAccessorCPU =
20
+ CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
21
+
22
+ template <typename Values, typename References>
23
+ void swap(
24
+ references_holder<Values, References> rh1,
25
+ references_holder<Values, References> rh2
26
+ ) {
27
+ return thrust::swap(rh1.data(), rh2.data());
28
+ }
29
+
30
+ template <int N, typename Values, typename References>
31
+ auto get(references_holder<Values, References> rh) -> decltype(thrust::get<N>(rh.data())) {
32
+ return thrust::get<N>(rh.data());
33
+ }
34
+
35
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTPlanCache.h ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Config.h>
2
+ #include <ATen/core/DimVector.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/native/cuda/CuFFTUtils.h>
5
+ #include <ATen/native/utils/ParamsHash.h>
6
+ #include <c10/util/accumulate.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ #include <cufft.h>
10
+ #include <cufftXt.h>
11
+
12
+ #include <limits>
13
+ #include <list>
14
+ #include <sstream>
15
+ #include <stdexcept>
16
+ #include <string>
17
+ #include <unordered_map>
18
+
19
+ namespace at { namespace native { namespace detail {
20
+
21
+ // Enum representing the FFT type
22
+ enum class CuFFTTransformType : int8_t {
23
+ C2C, // Complex-to-complex
24
+ R2C, // Real-to-complex
25
+ C2R, // Complex-to-real
26
+ };
27
+
28
+ // This struct is used to let us easily compute hashes of the
29
+ // parameters.
30
+ // It will be the **key** to the plan cache.
31
+ struct CuFFTParams
32
+ {
33
+ int64_t signal_ndim_; // between 1 and max_rank, i.e., 1 <= signal_ndim <= 3
34
+ // These include additional batch dimension as well.
35
+ int64_t sizes_[max_rank + 1];
36
+ int64_t input_strides_[max_rank + 1];
37
+ int64_t output_strides_[max_rank + 1];
38
+ CuFFTTransformType fft_type_;
39
+ ScalarType value_type_;
40
+
41
+ CuFFTParams() = default;
42
+
43
+ CuFFTParams(IntArrayRef in_strides, IntArrayRef out_strides,
44
+ IntArrayRef signal_sizes, CuFFTTransformType fft_type, ScalarType value_type) {
45
+ // Padding bits must be zeroed for hashing
46
+ memset(this, 0, sizeof(*this));
47
+ signal_ndim_ = signal_sizes.size() - 1;
48
+ fft_type_ = fft_type;
49
+ value_type_ = value_type;
50
+
51
+ TORCH_INTERNAL_ASSERT(in_strides.size() == signal_sizes.size());
52
+ TORCH_INTERNAL_ASSERT(out_strides.size() == signal_sizes.size());
53
+ TORCH_INTERNAL_ASSERT(1 <= signal_ndim_ && signal_ndim_ <= max_rank);
54
+
55
+ std::copy(signal_sizes.cbegin(), signal_sizes.cend(), sizes_);
56
+ std::copy(in_strides.cbegin(), in_strides.cend(), input_strides_);
57
+ std::copy(out_strides.cbegin(), out_strides.cend(), output_strides_);
58
+ }
59
+ };
60
+
61
+ static_assert(std::is_trivial<CuFFTParams>::value, "");
62
+
63
+ // Returns true if the transform type has complex input
64
+ inline bool cufft_complex_input(CuFFTTransformType type) {
65
+ switch (type) {
66
+ case CuFFTTransformType::C2C:
67
+ case CuFFTTransformType::C2R:
68
+ return true;
69
+
70
+ case CuFFTTransformType::R2C:
71
+ return false;
72
+ }
73
+ TORCH_INTERNAL_ASSERT(false);
74
+ }
75
+
76
+ // Returns true if the transform type has complex output
77
+ inline bool cufft_complex_output(CuFFTTransformType type) {
78
+ switch (type) {
79
+ case CuFFTTransformType::C2C:
80
+ case CuFFTTransformType::R2C:
81
+ return true;
82
+
83
+ case CuFFTTransformType::C2R:
84
+ return false;
85
+ }
86
+ TORCH_INTERNAL_ASSERT(false);
87
+ }
88
+
89
+ // Create transform type enum from bools representing if input and output are complex
90
+ inline CuFFTTransformType GetCuFFTTransformType(bool complex_input, bool complex_output) {
91
+ if (complex_input && complex_output) {
92
+ return CuFFTTransformType::C2C;
93
+ } else if (complex_input && !complex_output) {
94
+ return CuFFTTransformType::C2R;
95
+ } else if (!complex_input && complex_output) {
96
+ return CuFFTTransformType::R2C;
97
+ }
98
+ TORCH_INTERNAL_ASSERT(false, "Real to real FFTs are not supported");
99
+ }
100
+
101
+
102
+ class CuFFTHandle {
103
+ ::cufftHandle handle_;
104
+ public:
105
+
106
+ CuFFTHandle() {
107
+ CUFFT_CHECK(cufftCreate(&handle_));
108
+ }
109
+
110
+ ::cufftHandle & get() { return handle_; }
111
+ const ::cufftHandle & get() const { return handle_; }
112
+
113
+ ~CuFFTHandle() {
114
+ // Not using fftDestroy() for rocFFT to work around double freeing of handles
115
+ #if !defined(USE_ROCM)
116
+ cufftDestroy(handle_);
117
+ #endif
118
+ }
119
+ };
120
+
121
+ __forceinline__
122
+ static bool is_pow_of_two(int64_t x) {
123
+ return (x & (x - 1)) == 0;
124
+ }
125
+
126
+ #if defined(USE_ROCM)
127
+ using cufft_size_type = int;
128
+ #else
129
+ using cufft_size_type = long long int;
130
+ #endif
131
+
132
+ using CuFFTDimVector = c10::SmallVector<cufft_size_type, at::kDimVectorStaticSize>;
133
+
134
+ // Struct representing a tensor in CuFFT's data layout for planning transforms
135
+ // See NOTE [ cuFFT Embedded Strides ].
136
+ struct CuFFTDataLayout {
137
+ CuFFTDimVector embed;
138
+ cufft_size_type stride, dist;
139
+ bool must_clone, simple;
140
+ };
141
+
142
+ // Returns a cufft embedding for a contiguous signal of the given size.
143
+ // e.g. if the input is cloned, this will be the resulting data layout
144
+ // See NOTE [ cuFFT Embedded Strides ].
145
+ inline CuFFTDataLayout cufft_simple_embed(IntArrayRef sizes, bool onesided) {
146
+ CuFFTDataLayout layout;
147
+ layout.simple = true;
148
+ layout.must_clone = false;
149
+ layout.embed.assign(sizes.cbegin() + 1, sizes.cend());
150
+ if (onesided) {
151
+ layout.embed.back() = sizes.back() / 2 + 1;
152
+ }
153
+ layout.stride = 1;
154
+ layout.dist = 1;
155
+ for (const auto& len : layout.embed) {
156
+ layout.dist *= len;
157
+ }
158
+ return layout;
159
+ }
160
+
161
+ // Convert strides to a CuFFT embedded representation.
162
+ // If strides cannot be embedded, returns a simple layout and sets must_clone flag
163
+ // See NOTE [ cuFFT Embedded Strides ].
164
+ inline CuFFTDataLayout as_cufft_embed(IntArrayRef strides, IntArrayRef sizes, bool onesided) {
165
+ const auto signal_ndim = strides.size() - 1;
166
+ CuFFTDataLayout layout;
167
+ auto last_stride = strides[signal_ndim];
168
+ layout.must_clone = (last_stride <= 0);
169
+
170
+ const auto last_dim_size = onesided ?
171
+ sizes[signal_ndim] / 2 + 1 : sizes[signal_ndim];
172
+ const auto signal_numel = c10::multiply_integers(sizes.slice(1, sizes.size() - 2)) * last_dim_size;
173
+
174
+ // Zero stides are not allowed, even if the batch size is one.
175
+ // If that happens just set a dummy case
176
+ if (sizes[0] == 1) {
177
+ layout.dist = signal_numel;
178
+ } else if (strides[0] == 0) {
179
+ layout.must_clone = true;
180
+ } else {
181
+ layout.dist = strides[0];
182
+ }
183
+
184
+ // Calculate the embedding shape, or set must_clone if the strides cannot be embedded
185
+ layout.embed.resize(signal_ndim);
186
+ for (auto i = signal_ndim - 1; !layout.must_clone && i > 0; i--) {
187
+ auto stride = strides[i];
188
+ if (sizes[i] == 1) {
189
+ layout.embed[i] = 1;
190
+ } else if (stride > 0 && stride % last_stride == 0) {
191
+ layout.embed[i] = stride / last_stride;
192
+ last_stride = stride;
193
+ } else {
194
+ layout.must_clone = true;
195
+ }
196
+ }
197
+
198
+ if (layout.must_clone) {
199
+ // If the input needs to be cloned, assume it will be contiguous
200
+ layout = cufft_simple_embed(sizes, onesided);
201
+ layout.must_clone = true;
202
+ } else {
203
+ layout.embed[0] = sizes[1];
204
+ layout.stride = strides[signal_ndim];
205
+ // Determine if layout represents a simple embedding (contiguous data)
206
+ layout.simple = [&] {
207
+ for (const auto i : c10::irange(1, signal_ndim - 1)) {
208
+ if (layout.embed[i] != sizes[i + 1]) {
209
+ return false;
210
+ }
211
+ }
212
+
213
+ return (layout.stride == 1 && layout.dist == signal_numel &&
214
+ layout.embed.back() == last_dim_size);
215
+ }();
216
+ }
217
+ return layout;
218
+ }
219
+
220
+ // This class contains all the information needed to execute a cuFFT plan:
221
+ // 1. the plan
222
+ // 2. whether to clone input before executing the plan
223
+ // 3. the workspace size needed
224
+ //
225
+ // This class will be the **value** in the plan cache.
226
+ // It **owns** the raw plan via a unique_ptr.
227
+ class CuFFTConfig {
228
+ public:
229
+
230
+ // Only move semantics is enought for this class. Although we already use
231
+ // unique_ptr for the plan, still remove copy constructor and assignment op so
232
+ // we don't accidentally copy and take perf hit.
233
+ CuFFTConfig(const CuFFTConfig&) = delete;
234
+ CuFFTConfig& operator=(CuFFTConfig const&) = delete;
235
+
236
+ explicit CuFFTConfig(const CuFFTParams& params):
237
+ CuFFTConfig(
238
+ IntArrayRef(params.input_strides_, params.signal_ndim_ + 1),
239
+ IntArrayRef(params.output_strides_, params.signal_ndim_ + 1),
240
+ IntArrayRef(params.sizes_, params.signal_ndim_ + 1),
241
+ params.fft_type_,
242
+ params.value_type_) {}
243
+
244
+ // For complex types, strides are in units of 2 * element_size(dtype)
245
+ // sizes are for the full signal, including batch size and always two-sided
246
+ CuFFTConfig(IntArrayRef in_strides, IntArrayRef out_strides,
247
+ IntArrayRef sizes, CuFFTTransformType fft_type, ScalarType dtype):
248
+ fft_type_(fft_type), value_type_(dtype) {
249
+
250
+ // signal sizes (excluding batch dim)
251
+ CuFFTDimVector signal_sizes(sizes.begin() + 1, sizes.end());
252
+
253
+ // input batch size
254
+ const int64_t batch = sizes[0];
255
+ const int64_t signal_ndim = sizes.size() - 1;
256
+
257
+ // Since cuFFT has limited non-unit stride support and various constraints, we
258
+ // use a flag to keep track throughout this function to see if we need to
259
+ // input = input.clone();
260
+
261
+ #if defined(USE_ROCM)
262
+ // clone input to avoid issues with hipfft clobering the input and failing tests
263
+ clone_input = true;
264
+ #else
265
+ clone_input = false;
266
+ #endif
267
+
268
+ // For half, base strides on the real part of real-to-complex and
269
+ // complex-to-real transforms are not supported. Since our output is always
270
+ // contiguous, only need to check real-to-complex case.
271
+ if (dtype == ScalarType::Half) {
272
+ // cuFFT on half requires compute capability of at least SM_53
273
+ auto dev_prop = at::cuda::getCurrentDeviceProperties();
274
+ TORCH_CHECK(dev_prop->major >= 5 && !(dev_prop->major == 5 && dev_prop->minor < 3),
275
+ "cuFFT doesn't support signals of half type with compute "
276
+ "capability less than SM_53, but the device containing input half "
277
+ "tensor only has SM_", dev_prop->major, dev_prop->minor);
278
+ for (const auto i : c10::irange(signal_ndim)) {
279
+ TORCH_CHECK(is_pow_of_two(sizes[i + 1]),
280
+ "cuFFT only supports dimensions whose sizes are powers of two when"
281
+ " computing in half precision, but got a signal size of",
282
+ sizes.slice(1));
283
+ }
284
+ clone_input |= in_strides.back() != 1;
285
+ }
286
+
287
+ CuFFTDataLayout in_layout;
288
+ if (clone_input) {
289
+ in_layout = cufft_simple_embed(sizes, fft_type == CuFFTTransformType::C2R);
290
+ } else {
291
+ in_layout = as_cufft_embed(in_strides, sizes, fft_type == CuFFTTransformType::C2R);
292
+ }
293
+ auto out_layout = as_cufft_embed(out_strides, sizes, fft_type == CuFFTTransformType::R2C);
294
+ TORCH_INTERNAL_ASSERT(!out_layout.must_clone, "Out strides cannot be represented as CuFFT embedding");
295
+ clone_input |= in_layout.must_clone;
296
+
297
+ // Check if we can take advantage of simple data layout.
298
+ //
299
+ // See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu.
300
+
301
+ const bool simple_layout = in_layout.simple && out_layout.simple;
302
+
303
+ #if defined(USE_ROCM)
304
+ hipfftType exec_type = [&]{
305
+ if (dtype == kFloat) {
306
+ switch (fft_type) {
307
+ case CuFFTTransformType::C2C: return HIPFFT_C2C;
308
+ case CuFFTTransformType::R2C: return HIPFFT_R2C;
309
+ case CuFFTTransformType::C2R: return HIPFFT_C2R;
310
+ }
311
+ } else if (dtype == kDouble) {
312
+ switch (fft_type) {
313
+ case CuFFTTransformType::C2C: return HIPFFT_Z2Z;
314
+ case CuFFTTransformType::R2C: return HIPFFT_D2Z;
315
+ case CuFFTTransformType::C2R: return HIPFFT_Z2D;
316
+ }
317
+ }
318
+ TORCH_CHECK(false, "hipFFT doesn't support transforms of type: ", dtype);
319
+ }();
320
+ #else
321
+ cudaDataType itype, otype, exec_type;
322
+ const auto complex_input = cufft_complex_input(fft_type);
323
+ const auto complex_output = cufft_complex_output(fft_type);
324
+ if (dtype == ScalarType::Float) {
325
+ itype = complex_input ? CUDA_C_32F : CUDA_R_32F;
326
+ otype = complex_output ? CUDA_C_32F : CUDA_R_32F;
327
+ exec_type = CUDA_C_32F;
328
+ } else if (dtype == ScalarType::Double) {
329
+ itype = complex_input ? CUDA_C_64F : CUDA_R_64F;
330
+ otype = complex_output ? CUDA_C_64F : CUDA_R_64F;
331
+ exec_type = CUDA_C_64F;
332
+ } else if (dtype == ScalarType::Half) {
333
+ itype = complex_input ? CUDA_C_16F : CUDA_R_16F;
334
+ otype = complex_output ? CUDA_C_16F : CUDA_R_16F;
335
+ exec_type = CUDA_C_16F;
336
+ } else {
337
+ TORCH_CHECK(false, "cuFFT doesn't support tensor of type: ", dtype);
338
+ }
339
+ #endif
340
+
341
+ // disable auto allocation of workspace to use THC allocator
342
+ CUFFT_CHECK(cufftSetAutoAllocation(plan(), /* autoAllocate */ 0));
343
+
344
+ size_t ws_size_t;
345
+
346
+ // make plan
347
+ if (simple_layout) {
348
+ // If with unit-stride, we tell cuFFT by setting inembed == onembed == NULL.
349
+ // In such case, cuFFT ignores istride, ostride, idist, and odist
350
+ // by assuming istride = ostride = 1.
351
+ //
352
+ // See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu.
353
+ #if defined(USE_ROCM)
354
+ CUFFT_CHECK(hipfftMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
355
+ /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1,
356
+ /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1,
357
+ exec_type, batch, &ws_size_t));
358
+ #else
359
+ CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
360
+ /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, itype,
361
+ /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, otype,
362
+ batch, &ws_size_t, exec_type));
363
+ #endif
364
+ } else {
365
+ #if defined(USE_ROCM)
366
+ CUFFT_CHECK(hipfftMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
367
+ in_layout.embed.data(), in_layout.stride, in_layout.dist,
368
+ out_layout.embed.data(), out_layout.stride, out_layout.dist,
369
+ exec_type, batch, &ws_size_t));
370
+ #else
371
+ CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(),
372
+ in_layout.embed.data(), in_layout.stride, in_layout.dist, itype,
373
+ out_layout.embed.data(), out_layout.stride, out_layout.dist, otype,
374
+ batch, &ws_size_t, exec_type));
375
+ #endif
376
+ }
377
+ ws_size = static_cast<int64_t>(ws_size_t);
378
+ }
379
+
380
+ const cufftHandle &plan() const { return plan_ptr.get(); }
381
+
382
+ CuFFTTransformType transform_type() const { return fft_type_; }
383
+ ScalarType data_type() const { return value_type_; }
384
+ bool should_clone_input() const { return clone_input; }
385
+ int64_t workspace_size() const { return ws_size; }
386
+
387
+ private:
388
+ CuFFTHandle plan_ptr;
389
+ bool clone_input;
390
+ int64_t ws_size;
391
+ CuFFTTransformType fft_type_;
392
+ ScalarType value_type_;
393
+ };
394
+
395
+ #if defined(USE_ROCM)
396
+ // Note that the max plan number for CUDA version < 10 has to be 1023
397
+ // due to a bug that fails on the 1024th plan
398
+ constexpr int64_t CUFFT_MAX_PLAN_NUM = 1023;
399
+ constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = CUFFT_MAX_PLAN_NUM;
400
+ #else
401
+ constexpr int64_t CUFFT_MAX_PLAN_NUM = std::numeric_limits<int64_t>::max();
402
+ // The default max cache size chosen for CUDA version > 10 is arbitrary.
403
+ // This number puts a limit on how big of a plan cache should we maintain by
404
+ // default. Users can always configure it via cufft_set_plan_cache_max_size.
405
+ constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = 4096;
406
+ #endif
407
+ static_assert(0 <= CUFFT_MAX_PLAN_NUM && CUFFT_MAX_PLAN_NUM <= std::numeric_limits<int64_t>::max(),
408
+ "CUFFT_MAX_PLAN_NUM not in size_t range");
409
+ static_assert(CUFFT_DEFAULT_CACHE_SIZE >= 0 && CUFFT_DEFAULT_CACHE_SIZE <= CUFFT_MAX_PLAN_NUM,
410
+ "CUFFT_DEFAULT_CACHE_SIZE not in [0, CUFFT_MAX_PLAN_NUM] range");
411
+
412
+ // This cache assumes that the mapping from key to value never changes.
413
+ // This is **NOT** thread-safe. Please use a mutex when using it **AND** the
414
+ // value returned from try_emplace_value.
415
+ // The contract of using this cache is that try_emplace_value should only be
416
+ // used when the max_size is positive.
417
+ class CuFFTParamsLRUCache {
418
+ public:
419
+ using kv_t = typename std::pair<CuFFTParams, CuFFTConfig>;
420
+ using map_t = typename std::unordered_map<std::reference_wrapper<CuFFTParams>,
421
+ typename std::list<kv_t>::iterator,
422
+ ParamsHash<CuFFTParams>,
423
+ ParamsEqual<CuFFTParams>>;
424
+ using map_kkv_iter_t = typename map_t::iterator;
425
+
426
+
427
+ CuFFTParamsLRUCache() : CuFFTParamsLRUCache(CUFFT_DEFAULT_CACHE_SIZE) {}
428
+
429
+ CuFFTParamsLRUCache(int64_t max_size) {
430
+ _set_max_size(max_size);
431
+ }
432
+
433
+ CuFFTParamsLRUCache(CuFFTParamsLRUCache&& other) noexcept :
434
+ _usage_list(std::move(other._usage_list)),
435
+ _cache_map(std::move(other._cache_map)),
436
+ _max_size(other._max_size) {}
437
+
438
+ CuFFTParamsLRUCache& operator=(CuFFTParamsLRUCache&& other) noexcept {
439
+ _usage_list = std::move(other._usage_list);
440
+ _cache_map = std::move(other._cache_map);
441
+ _max_size = other._max_size;
442
+ return *this;
443
+ }
444
+
445
+ // If key is in this cache, return the cached config. Otherwise, emplace the
446
+ // config in this cache and return it.
447
+ // Return const reference because CuFFTConfig shouldn't be tampered with once
448
+ // created.
449
+ const CuFFTConfig &lookup(CuFFTParams params) {
450
+ AT_ASSERT(_max_size > 0);
451
+
452
+ map_kkv_iter_t map_it = _cache_map.find(params);
453
+ // Hit, put to list front
454
+ if (map_it != _cache_map.end()) {
455
+ _usage_list.splice(_usage_list.begin(), _usage_list, map_it->second);
456
+ return map_it->second->second;
457
+ }
458
+
459
+ // Miss
460
+ // remove if needed
461
+ if (_usage_list.size() >= _max_size) {
462
+ auto last = _usage_list.end();
463
+ last--;
464
+ _cache_map.erase(last->first);
465
+ _usage_list.pop_back();
466
+ }
467
+
468
+ // construct new plan at list front, then insert into _cache_map
469
+ _usage_list.emplace_front(std::piecewise_construct,
470
+ std::forward_as_tuple(params),
471
+ std::forward_as_tuple(params));
472
+ auto kv_it = _usage_list.begin();
473
+ _cache_map.emplace(std::piecewise_construct,
474
+ std::forward_as_tuple(kv_it->first),
475
+ std::forward_as_tuple(kv_it));
476
+ return kv_it->second;
477
+ }
478
+
479
+ void clear() {
480
+ _cache_map.clear();
481
+ _usage_list.clear();
482
+ }
483
+
484
+ void resize(int64_t new_size) {
485
+ _set_max_size(new_size);
486
+ auto cur_size = _usage_list.size();
487
+ if (cur_size > _max_size) {
488
+ auto delete_it = _usage_list.end();
489
+ for (size_t i = 0; i < cur_size - _max_size; i++) {
490
+ delete_it--;
491
+ _cache_map.erase(delete_it->first);
492
+ }
493
+ _usage_list.erase(delete_it, _usage_list.end());
494
+ }
495
+ }
496
+
497
+ size_t size() const { return _cache_map.size(); }
498
+
499
+ size_t max_size() const noexcept { return _max_size; }
500
+
501
+ std::mutex mutex;
502
+
503
+ private:
504
+ // Only sets size and does value check. Does not resize the data structures.
505
+ void _set_max_size(int64_t new_size) {
506
+ // We check that 0 <= new_size <= CUFFT_MAX_PLAN_NUM here. Since
507
+ // CUFFT_MAX_PLAN_NUM is of type size_t, we need to do non-negativity check
508
+ // first.
509
+ TORCH_CHECK(new_size >= 0,
510
+ "cuFFT plan cache size must be non-negative, but got ", new_size);
511
+ TORCH_CHECK(new_size <= CUFFT_MAX_PLAN_NUM,
512
+ "cuFFT plan cache size can not be larger than ", CUFFT_MAX_PLAN_NUM, ", but got ", new_size);
513
+ _max_size = static_cast<size_t>(new_size);
514
+ }
515
+
516
+ std::list<kv_t> _usage_list;
517
+ map_t _cache_map;
518
+ size_t _max_size;
519
+ };
520
+
521
+ // Since ATen is separated into CPU build and CUDA build, we need a way to call
522
+ // these functions only when CUDA is loaded. We use CUDA hooks for this purpose
523
+ // (at cuda/detail/CUDAHooks.cpp), and call the hooked functions from the actual
524
+ // native function counterparts (at native/SpectralOps.cpp), i.e.,
525
+ // _cufft_get_plan_cache_max_size, _cufft_set_plan_cache_max_size
526
+ // _cufft_get_plan_cache_size, and _cufft_clear_plan_cache.
527
+ int64_t cufft_get_plan_cache_max_size_impl(DeviceIndex device_index);
528
+ void cufft_set_plan_cache_max_size_impl(DeviceIndex device_index, int64_t max_size);
529
+ int64_t cufft_get_plan_cache_size_impl(DeviceIndex device_index);
530
+ void cufft_clear_plan_cache_impl(DeviceIndex device_index);
531
+
532
+ }}} // namespace at::native::detail
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DeviceSqrt.cuh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at { namespace native {
4
+ #if defined(USE_ROCM)
5
+ // take these out when ROCm implements std:: math functions
6
+ #include <math.h>
7
+ template <typename scalar_t>
8
+ static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
9
+
10
+ template <>
11
+ __forceinline__ __device__ float device_sqrt(float val) {
12
+ return ::sqrtf(val);
13
+ }
14
+
15
+ template <>
16
+ __forceinline__ __device__ double device_sqrt(double val) {
17
+ return ::sqrt(val);
18
+ }
19
+ #else
20
+ template<typename scalar_t>
21
+ __forceinline__ __device__ double device_sqrt(scalar_t val) {
22
+ return std::sqrt(val);
23
+ }
24
+ #endif
25
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DistributionTemplates.h ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/AccumulateType.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/ExpandBase.h>
6
+ #include <ATen/OpMathType.h>
7
+ #include <ATen/native/TensorIterator.h>
8
+ #include <ATen/native/cuda/Loops.cuh>
9
+ #include <c10/util/Half.h>
10
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
11
+ #include <ATen/cuda/CUDAContext.h>
12
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
13
+ #include <ATen/cuda/CUDAGraphsUtils.cuh>
14
+ #include <ATen/detail/FunctionTraits.h>
15
+ #include <ATen/core/DistributionsHelper.h>
16
+
17
+ #include <curand.h>
18
+ #include <curand_kernel.h>
19
+ #include <curand_philox4x32_x.h>
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <utility>
23
+ #include <mutex>
24
+ #include <tuple>
25
+ #include <type_traits>
26
+
27
+ namespace at {
28
+ namespace native {
29
+ namespace {
30
+
31
+ // launch bounds used for kernels utilizing TensorIterator
32
+ const uint32_t block_size_bound = 256;
33
+ const uint32_t grid_size_bound = 4;
34
+ // number of randoms given by distributions like curand_uniform4, curand_uniform2_double
35
+ // used in calculating philox offset.
36
+ const uint32_t curand4_engine_calls = 4;
37
+
38
+ // utility function that calculates proper philox_offset
39
+ // for distributions utilizing TensorIterator. For distributions using
40
+ // TensorIterator, we are using a grid-stride loop with each
41
+ // thread yielding one element per thread. For the edge of the grid-stride
42
+ // loop, if the tensor size is large, the unroll loop will kick in and the float4
43
+ // from curand4 will start getting utilized (for common tensor sizes, we end up
44
+ // using rand.x from each thread). Hence, the philox_offset is
45
+ // (number of elements per thread * number of engine calls), which makes
46
+ // sure that philox offset increment is not less than the number of randoms used
47
+ // in each thread.
48
+ std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
49
+ const uint64_t numel = static_cast<uint64_t>(total_elements);
50
+ const uint32_t block_size = block_size_bound;
51
+ const uint32_t unroll = curand4_engine_calls;
52
+ dim3 dim_block(block_size);
53
+ dim3 grid((numel + block_size - 1) / block_size);
54
+ uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
55
+ grid.x = std::min(
56
+ static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
57
+ grid.x);
58
+ //number of times random will be generated per thread, to offset philox counter in thc random state
59
+ uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
60
+ * curand4_engine_calls;
61
+ return std::make_tuple(counter_offset, grid, dim_block);
62
+ }
63
+
64
+ // grid stride loop kernel for distributions
65
+ template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
66
+ C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
67
+ __global__ void distribution_elementwise_grid_stride_kernel(int numel,
68
+ PhiloxCudaState philox_args,
69
+ const dist_t dist_func,
70
+ const transform_t transform_func) {
71
+ auto seeds = at::cuda::philox::unpack(philox_args);
72
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
73
+ curandStatePhilox4_32_10_t state;
74
+ curand_init(std::get<0>(seeds),
75
+ idx,
76
+ std::get<1>(seeds),
77
+ &state);
78
+
79
+ int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
80
+ blockDim.x * gridDim.x * unroll_factor;
81
+ for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
82
+ auto rand = dist_func(&state);
83
+ #pragma unroll
84
+ for (int ii = 0; ii < unroll_factor; ii++) {
85
+ int li = linear_index + blockDim.x * gridDim.x * ii;
86
+ if (li < numel) {
87
+ transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
88
+ }
89
+ }
90
+ __syncthreads();
91
+ }
92
+ }
93
+
94
+ /**
95
+ * distribution_nullary_kernel is analogous to gpu_kernel in
96
+ * ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
97
+ * TensorIterator to launch a kernel. However, the differences are
98
+ * - it launches a grid-stride loop based kernel. The kernel is not
99
+ * generic like elementwise_kernel in Loops.cuh and is specialized
100
+ * for the distribution kernels here.
101
+ * - For big size tensors, we can launch multiple kernels recursively
102
+ * (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
103
+ * offset calculation is done in this function.
104
+ *
105
+ * FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
106
+ * to have grid-stride loop kernel and then use that to launch our distribution
107
+ * kernels? Note that we need a grid-stride loop kernel because, we found by testing
108
+ * that it achieves peak effective bandwidth.
109
+ */
110
+ template<typename scalar_t,
111
+ typename accscalar_t,
112
+ int unroll_factor,
113
+ typename RNG,
114
+ typename dist_t,
115
+ typename transform_t>
116
+ void distribution_nullary_kernel(at::TensorIteratorBase& iter,
117
+ RNG gen,
118
+ const dist_t& dist_func,
119
+ const transform_t transform_func) {
120
+ static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
121
+ int64_t numel = iter.numel();
122
+ if (numel == 0) {
123
+ return;
124
+ }
125
+
126
+ auto execution_policy = calc_execution_policy(numel);
127
+ auto counter_offset = std::get<0>(execution_policy);
128
+ auto grid = std::get<1>(execution_policy);
129
+ auto block = std::get<2>(execution_policy);
130
+ PhiloxCudaState rng_engine_inputs;
131
+ {
132
+ // See Note [Acquire lock when using random generators]
133
+ std::lock_guard<std::mutex> lock(gen->mutex_);
134
+ rng_engine_inputs = gen->philox_cuda_state(counter_offset);
135
+ }
136
+
137
+ if (!iter.can_use_32bit_indexing()) {
138
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
139
+ distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
140
+ gen, dist_func, transform_func);
141
+ }
142
+ return;
143
+ }
144
+
145
+ char* out_data = (char*)iter.data_ptr(0);
146
+
147
+ auto stream = at::cuda::getCurrentCUDAStream();
148
+ if (iter.is_trivial_1d()) {
149
+ auto strides = iter.get_inner_strides();
150
+ int stride0 = strides[0];
151
+ distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
152
+ numel,
153
+ rng_engine_inputs,
154
+ dist_func,
155
+ [=]__device__(int idx, accscalar_t rand) {
156
+ scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
157
+ *out = transform_func(rand);
158
+ }
159
+ );
160
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
161
+ } else {
162
+ auto offset_calc = make_offset_calculator<1>(iter);
163
+ distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
164
+ numel,
165
+ rng_engine_inputs,
166
+ dist_func,
167
+ [=]__device__(int idx, accscalar_t rand) {
168
+ auto offsets = offset_calc.get(idx);
169
+ scalar_t* out = (scalar_t*)&out_data[offsets[0]];
170
+ *out = transform_func(rand);
171
+ }
172
+ );
173
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
174
+ }
175
+ }
176
+
177
+ // Binary kernel
178
+ template <typename func_t, typename inp_offset_calc_t, typename out_offset_calc_t>
179
+ __global__ void distribution_binary_elementwise_kernel(
180
+ int numel,
181
+ func_t f,
182
+ PhiloxCudaState philox_args,
183
+ typename function_traits<func_t>::result_type *output_data,
184
+ const typename function_traits<func_t>::template arg<1>::type *input_data_1,
185
+ const typename function_traits<func_t>::template arg<2>::type *input_data_2,
186
+ inp_offset_calc_t inp_calc,
187
+ out_offset_calc_t out_calc) {
188
+ auto seeds = at::cuda::philox::unpack(philox_args);
189
+
190
+ using input_t_1 = typename function_traits<func_t>::template arg<1>::type;
191
+ using input_t_2 = typename function_traits<func_t>::template arg<2>::type;
192
+
193
+ input_t_1 inputs_1[thread_work_size()];
194
+ input_t_2 inputs_2[thread_work_size()];
195
+
196
+ int base_index = block_work_size() * blockIdx.x;
197
+ int remaining = std::min<int>(numel - base_index, block_work_size());
198
+
199
+ curandStatePhilox4_32_10_t state;
200
+ curand_init(std::get<0>(seeds),
201
+ blockIdx.x * blockDim.x + threadIdx.x,
202
+ std::get<1>(seeds),
203
+ &state);
204
+
205
+ // load data into registers
206
+ int thread_idx = threadIdx.x;
207
+ #pragma unroll
208
+ for (int i = 0; i < thread_work_size(); i++) {
209
+ if (thread_idx >= remaining) {
210
+ break;
211
+ }
212
+ int input_idx = thread_idx + base_index;
213
+ auto offsets = inp_calc.get(input_idx);
214
+ inputs_1[i] = input_data_1[offsets[0]];
215
+ inputs_2[i] = input_data_2[offsets[1]];
216
+
217
+ thread_idx += num_threads();
218
+ }
219
+
220
+ // compute and store
221
+ thread_idx = threadIdx.x;
222
+ #pragma unroll
223
+ for (int i = 0; i < thread_work_size(); i++) {
224
+ if (thread_idx >= remaining) {
225
+ break;
226
+ }
227
+ int input_idx = thread_idx + base_index;
228
+ auto offsets = out_calc.get(input_idx);
229
+ output_data[offsets[0]] = f(state, inputs_1[i], inputs_2[i]);
230
+ thread_idx += num_threads();
231
+ }
232
+ }
233
+
234
+ template <typename func_t>
235
+ void distribution_binary_kernel(TensorIteratorBase &iter, PhiloxCudaState philox_args, const func_t &f) {
236
+ static_assert(std::is_same<typename function_traits<func_t>::template arg<0>::type, curandStatePhilox4_32_10_t&>::value, "the first argument of functor must be curandStatePhilox4_32_10_t");
237
+ using input_t_1 = typename function_traits<func_t>::template arg<1>::type;
238
+ using input_t_2 = typename function_traits<func_t>::template arg<2>::type;
239
+ using output_t = typename function_traits<func_t>::result_type;
240
+
241
+ if (!iter.can_use_32bit_indexing()) {
242
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
243
+ distribution_binary_kernel(sub_iter, philox_args, f);
244
+ }
245
+ return;
246
+ }
247
+
248
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(iter.can_use_32bit_indexing());
249
+
250
+ int64_t numel = iter.numel();
251
+ if (numel == 0) {
252
+ return;
253
+ }
254
+
255
+ output_t *output_data = static_cast<output_t *>(iter.data_ptr(0));
256
+ const input_t_1 *input_data_1 = static_cast<const input_t_1 *>(iter.data_ptr(1));
257
+ const input_t_2 *input_data_2 = static_cast<const input_t_2 *>(iter.data_ptr(2));
258
+
259
+ int64_t grid = (numel + block_work_size() - 1) / block_work_size();
260
+ auto stream = at::cuda::getCurrentCUDAStream();
261
+
262
+ if (iter.is_contiguous()) {
263
+ distribution_binary_elementwise_kernel<<<grid,num_threads(), 0, stream>>>(
264
+ numel, f, philox_args, output_data, input_data_1, input_data_2,
265
+ TrivialOffsetCalculator<2>(), TrivialOffsetCalculator<1>());
266
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
267
+ } else {
268
+ distribution_binary_elementwise_kernel<<<grid, num_threads(), 0, stream>>>(
269
+ numel, f, philox_args, output_data, input_data_1, input_data_2,
270
+ make_input_offset_calculator<2>(iter), make_output_offset_calculator(iter));
271
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
272
+ }
273
+ }
274
+
275
+ } // namespace
276
+ }} // namespace at::native
277
+
278
+
279
+ namespace at {
280
+ namespace native {
281
+ namespace templates {
282
+ namespace cuda {
283
+
284
+ // ==================================================== Random ========================================================
285
+
286
+ template<typename RNG>
287
+ void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG gen) {
288
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_from_to_kernel_cuda", [&] {
289
+ if ((
290
+ std::is_same<scalar_t, int64_t>::value ||
291
+ std::is_same<scalar_t, double>::value ||
292
+ std::is_same<scalar_t, float>::value ||
293
+ std::is_same<scalar_t, at::BFloat16>::value) && range >= 1ULL << 32)
294
+ {
295
+ // define lambda to mod with range and add base
296
+ auto random_func = [range, base] __device__ (uint64_t rand) {
297
+ return transformation::uniform_int_from_to<scalar_t>(rand, range, base);
298
+ };
299
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
300
+ gen,
301
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
302
+ ulonglong2 ret;
303
+ uint4 rand_val = curand4(state);
304
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
305
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
306
+ return ret;
307
+ },
308
+ random_func);
309
+ } else {
310
+ auto random_func = [range, base] __device__ (uint32_t rand) {
311
+ return transformation::uniform_int_from_to<scalar_t>(rand, range, base);
312
+ };
313
+ distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
314
+ gen,
315
+ [] __device__ (curandStatePhilox4_32_10_t* state) {
316
+ return curand4(state);
317
+ },
318
+ random_func);
319
+ }
320
+ });
321
+ }
322
+
323
+ // This is the special kernel to handle single specific case:
324
+ // from(inclusive) = std::numeric_limits<int64_t>::lowest()
325
+ // to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
326
+ template<typename RNG>
327
+ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) {
328
+ AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cuda", [&] {
329
+ if (std::is_same<scalar_t, int64_t>::value ||
330
+ std::is_same<scalar_t, double>::value ||
331
+ std::is_same<scalar_t, float>::value ||
332
+ std::is_same<scalar_t, at::BFloat16>::value) {
333
+ auto random_func = [] __device__ (uint64_t rand) {
334
+ return transformation::uniform_int_full_range<scalar_t>(rand);
335
+ };
336
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
337
+ gen,
338
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
339
+ ulonglong2 ret;
340
+ uint4 rand_val = curand4(state);
341
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
342
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
343
+ return ret;
344
+ },
345
+ random_func);
346
+ } else {
347
+ TORCH_CHECK(false, "random_full_64_bits_range_kernel_cuda handles only int64, double, float and bfloat16");
348
+ }
349
+ });
350
+ }
351
+
352
+ template<typename RNG>
353
+ struct RandomFromToKernel {
354
+ void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
355
+ random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
356
+ }
357
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
358
+ random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
359
+ }
360
+ };
361
+
362
+ template<typename RNG>
363
+ void random_kernel(TensorIteratorBase& iter, RNG gen) {
364
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cuda", [&] {
365
+ if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
366
+ auto random_func = [] __device__ (uint64_t rand) {
367
+ return transformation::uniform_int<scalar_t>(rand);
368
+ };
369
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter, gen,
370
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
371
+ ulonglong2 ret;
372
+ uint4 rand_val = curand4(state);
373
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
374
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
375
+ return ret;
376
+ },
377
+ random_func);
378
+ } else {
379
+ auto random_func = [] __device__ (uint32_t rand) {
380
+ return transformation::uniform_int<scalar_t>(rand);
381
+ };
382
+ distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
383
+ gen,
384
+ [] __device__ (curandStatePhilox4_32_10_t* state) {
385
+ return curand4(state);
386
+ },
387
+ random_func);
388
+ }
389
+ });
390
+ }
391
+
392
+ template<typename RNG>
393
+ struct RandomKernel {
394
+ void operator()(TensorIteratorBase& iter, RNG gen) {
395
+ random_kernel(iter, gen);
396
+ }
397
+ };
398
+
399
+ // ====================================================================================================================
400
+
401
+ template<typename scalar_t, typename accscalar_t, size_t curand4_engine_calls, typename RNG, typename transform_t>
402
+ void uniform_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) {
403
+ if (std::is_same<scalar_t, double>::value) {
404
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
405
+ gen,
406
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
407
+ transform);
408
+ } else {
409
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
410
+ gen,
411
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
412
+ transform);
413
+ }
414
+ }
415
+
416
+ template<typename scalar_t, typename accscalar_t, size_t curand4_engine_calls, typename RNG, typename transform_t>
417
+ void normal_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) {
418
+ if (std::is_same<scalar_t, double>::value) {
419
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
420
+ gen,
421
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
422
+ transform);
423
+ } else {
424
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
425
+ gen,
426
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
427
+ transform);
428
+ }
429
+ }
430
+
431
+ // ==================================================== Normal ========================================================
432
+
433
+ template<typename RNG>
434
+ void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) {
435
+ auto iter = TensorIterator::borrowing_nullary_op(self);
436
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_kernel_cuda", [&] {
437
+ using accscalar_t = at::acc_type<scalar_t, true>;
438
+ auto mean = static_cast<accscalar_t>(mean_);
439
+ auto std = static_cast<accscalar_t>(std_);
440
+ // define lambda to multiply std and add mean
441
+ auto normal_func = [mean, std] __device__ (accscalar_t rand) {
442
+ return static_cast<scalar_t>(transformation::normal<accscalar_t>(rand, mean, std));
443
+ };
444
+ normal_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, normal_func);
445
+ });
446
+ }
447
+
448
+ template<typename RNG>
449
+ struct NormalKernel {
450
+ void operator()(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
451
+ normal_kernel(self, mean, std, check_generator<RNG>(gen));
452
+ }
453
+ };
454
+
455
+ // ==================================================== Uniform ========================================================
456
+
457
+ template<typename RNG>
458
+ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) {
459
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_kernel_cuda", [&] {
460
+ auto from = static_cast<scalar_t>(from_);
461
+ auto to = static_cast<scalar_t>(to_);
462
+ using opmath_t = at::opmath_type<scalar_t>;
463
+ auto range = static_cast<opmath_t>(to-from);
464
+ // define lambda to reverse bounds, multiply 'range' and add 'from_'
465
+ auto uniform_func = [range, from, to] __device__ (opmath_t rand) {
466
+ // Compute output value before reversing the bounds
467
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/96947
468
+ auto value = static_cast<scalar_t>(rand * range + from);
469
+ // reverse the bounds of curand4 from (0, 1] to [0, 1)
470
+ // Note that this method is from legacy THCTensorRandom and is likely to give
471
+ // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
472
+ // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
473
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
474
+ auto reverse_bound_value = value == to ? from : value;
475
+ return reverse_bound_value;
476
+ };
477
+ uniform_and_transform<scalar_t, opmath_t, curand4_engine_calls>(iter, gen, uniform_func);
478
+ });
479
+ }
480
+
481
+ template<typename RNG>
482
+ struct UniformKernel {
483
+ void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
484
+ uniform_kernel(iter, from, to, check_generator<RNG>(gen));
485
+ }
486
+ };
487
+
488
+ // ================================================== LogNormal =======================================================
489
+
490
+ template<typename RNG>
491
+ void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG gen) {
492
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cuda", [&] {
493
+ using accscalar_t = at::acc_type<scalar_t, true>;
494
+ auto mean = static_cast<accscalar_t>(mean_);
495
+ auto std = static_cast<accscalar_t>(std_);
496
+ // define lambda for log_normal transformation
497
+ auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
498
+ return static_cast<scalar_t>(transformation::log_normal<accscalar_t>(transformation::normal<accscalar_t>(rand, mean, std)));
499
+ };
500
+ normal_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, log_normal_func);
501
+ });
502
+ }
503
+
504
+ template<typename RNG>
505
+ struct LogNormalKernel {
506
+ void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
507
+ log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
508
+ }
509
+ };
510
+
511
+ // =================================================== Geometric ======================================================
512
+
513
+ template<typename RNG>
514
+ void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) {
515
+ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cuda", [&] {
516
+ using accscalar_t = at::DiscreteDistributionType<scalar_t>::type;
517
+ // define lambda for geometric transformation
518
+ auto geometric_func = [p] __device__ (accscalar_t rand) {
519
+ return static_cast<scalar_t>(transformation::geometric<accscalar_t>(rand, p));
520
+ };
521
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, geometric_func);
522
+ });
523
+ }
524
+
525
+ template<typename RNG>
526
+ struct GeometricKernel {
527
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
528
+ geometric_kernel(iter, p, check_generator<RNG>(gen));
529
+ }
530
+ };
531
+
532
+ // ================================================== Exponential =====================================================
533
+
534
+ template<typename RNG>
535
+ void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) {
536
+ TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype());
537
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cuda", [&] {
538
+ using accscalar_t = at::acc_type<scalar_t, true>;
539
+ auto lambda = static_cast<accscalar_t>(lambda_);
540
+ // define lambda for exponential transformation
541
+ auto exponential_func = [lambda] __device__ (accscalar_t rand) {
542
+ return static_cast<scalar_t>(transformation::exponential<accscalar_t>(rand, lambda));
543
+ };
544
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, exponential_func);
545
+ });
546
+ }
547
+
548
+ template<typename RNG>
549
+ struct ExponentialKernel {
550
+ void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
551
+ exponential_kernel(iter, lambda, check_generator<RNG>(gen));
552
+ }
553
+ };
554
+
555
+ // ==================================================== Cauchy ========================================================
556
+
557
+ template<typename RNG>
558
+ void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG gen) {
559
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_cuda", [&] {
560
+ using accscalar_t = at::acc_type<scalar_t, true>;
561
+ auto median = static_cast<accscalar_t>(median_);
562
+ auto sigma = static_cast<accscalar_t>(sigma_);
563
+ // define lambda for cauchy transformation
564
+ auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
565
+ return static_cast<scalar_t>(transformation::cauchy<accscalar_t>(rand, median, sigma));
566
+ };
567
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, cauchy_func);
568
+ });
569
+ }
570
+
571
+ template<typename RNG>
572
+ struct CauchyKernel {
573
+ void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
574
+ cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
575
+ }
576
+ };
577
+
578
+ // ==================================================== Bernoulli =====================================================
579
+
580
+ template<typename scalar_t, typename prob_t>
581
+ void bernoulli_tensor_cuda_kernel(
582
+ const TensorBase &ret, const at::TensorBase &p,
583
+ PhiloxCudaState philox_args) {
584
+ auto functor = [philox_args] __device__(
585
+ int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
586
+ const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
587
+ auto seeds = at::cuda::philox::unpack(philox_args);
588
+ curandStatePhilox4_32_10_t state;
589
+ curand_init(std::get<0>(seeds),
590
+ blockIdx.x * blockDim.x + threadIdx.x,
591
+ std::get<1>(seeds),
592
+ &state);
593
+
594
+ // See Note [Register spilling in curand call for CUDA < 10]
595
+ float4 rand = curand_uniform4(&state);
596
+ switch (n) {
597
+ case 4: {
598
+ CUDA_KERNEL_ASSERT(0 <= p4 && p4 <= 1);
599
+ v4 = static_cast<scalar_t>(rand.w <= p4);
600
+ // fallthrough
601
+ }
602
+ case 3: {
603
+ CUDA_KERNEL_ASSERT(0 <= p3 && p3 <= 1);
604
+ v3 = static_cast<scalar_t>(rand.z <= p3);
605
+ // fallthrough
606
+ }
607
+ case 2: {
608
+ CUDA_KERNEL_ASSERT(0 <= p2 && p2 <= 1);
609
+ v2 = static_cast<scalar_t>(rand.y <= p2);
610
+ // fallthrough
611
+ }
612
+ case 1: {
613
+ CUDA_KERNEL_ASSERT(0 <= p1 && p1 <= 1);
614
+ v1 = static_cast<scalar_t>(rand.x <= p1);
615
+ }
616
+ }
617
+ };
618
+ // The template argument `4` below indicates that we want to operate on four
619
+ // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
620
+ at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4, decltype(functor),
621
+ /*max_threads_per_block=*/512,
622
+ /*min_blocks_per_sm==*/2>(ret, p, functor);
623
+ }
624
+
625
+ template<typename RNG>
626
+ void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG gen) {
627
+ PhiloxCudaState rng_engine_inputs;
628
+ {
629
+ // See Note [Acquire lock when using random generators]
630
+ std::lock_guard<std::mutex> lock(gen->mutex_);
631
+ rng_engine_inputs = gen->philox_cuda_state(10);
632
+ }
633
+ TORCH_CHECK(at::isFloatingType(p_.scalar_type()), "expected probabilities tensor to have floating type, got ", p_.scalar_type());
634
+ // cast probabilities tensor to double for double `self` tensor, and to `float` for everything else
635
+ const auto p_type = self.dtype() == at::kDouble ? at::kDouble : at::kFloat;
636
+ auto p_cuda = p_.to(TensorOptions().device(self.device()).dtype(p_type));
637
+ auto p = expand_inplace(self, p_cuda);
638
+ AT_DISPATCH_ALL_TYPES_AND3(
639
+ at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
640
+ if (std::is_same<scalar_t, double>::value) {
641
+ return bernoulli_tensor_cuda_kernel<double, double>(self, *p, rng_engine_inputs);
642
+ } else {
643
+ return bernoulli_tensor_cuda_kernel<scalar_t, float>(self, *p, rng_engine_inputs);
644
+ }
645
+ });
646
+ }
647
+
648
+ template<typename RNG>
649
+ void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) {
650
+ AT_DISPATCH_ALL_TYPES_AND3(
651
+ at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
652
+ using accscalar_t = at::DiscreteDistributionType<scalar_t>::type;
653
+ // define lambda for bernoulli transformation
654
+ auto bernoulli_func = [p] __device__ (accscalar_t rand) {
655
+ return static_cast<scalar_t>(transformation::bernoulli<accscalar_t>(rand, p));
656
+ };
657
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, bernoulli_func);
658
+ });
659
+ }
660
+
661
+ template<typename RNG>
662
+ struct BernoulliKernel {
663
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
664
+ bernoulli_kernel(iter, p, check_generator<RNG>(gen));
665
+ }
666
+ void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
667
+ bernoulli_kernel(self, p_, check_generator<RNG>(gen));
668
+ }
669
+ };
670
+
671
+ }}}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ForeachFunctors.cuh ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/OpMathType.h>
3
+ #include <ATen/native/ForeachUtils.h>
4
+ #include <ATen/native/cuda/MultiTensorApply.cuh>
5
+ #include <ATen/native/cuda/Pow.cuh>
6
+
7
+ namespace at::native {
8
+
9
+ namespace {
10
+
11
+ // TODO(crcrpar): Handle version bump in codegen.
12
+ // rel:
13
+ // https://github.com/pytorch/pytorch/blob/9cf84347767c8abb8feba18a9a1baba321eeb8b9/tools/autograd/gen_inplace_or_view_type.py#L481-L482
14
+ inline void increment_version(TensorList tensors) {
15
+ for (const auto& t : tensors) {
16
+ t.unsafeGetTensorImpl()->bump_version();
17
+ }
18
+ }
19
+
20
+ // Initializes args and checks if all args are aligned
21
+ template <int depth, typename T>
22
+ __device__ bool init_args(
23
+ T** args,
24
+ TensorListMetadata<depth>& tl,
25
+ const int64_t chunk_idx,
26
+ const int64_t chunk_size,
27
+ const int64_t tensor_loc) {
28
+ bool all_aligned = true;
29
+ for (int i = 0; i < depth; i++) {
30
+ args[i] = (T*)tl.addresses[i][tensor_loc];
31
+ args[i] += chunk_idx * chunk_size;
32
+
33
+ if (!is_aligned(args[i])) {
34
+ all_aligned = false;
35
+ }
36
+ }
37
+ return all_aligned;
38
+ }
39
+
40
+ // Initializes args and checks if all args are aligned
41
+ template <int depth, typename T, typename T2>
42
+ __device__ bool init_args(
43
+ T** args,
44
+ TensorListScalarListMetadata<T2, depth>& tl,
45
+ const int64_t chunk_idx,
46
+ const int64_t chunk_size,
47
+ const int64_t tensor_loc) {
48
+ bool all_aligned = true;
49
+ for (int i = 0; i < depth; i++) {
50
+ args[i] = (T*)tl.addresses[i][tensor_loc];
51
+ args[i] += chunk_idx * chunk_size;
52
+
53
+ if (!is_aligned(args[i])) {
54
+ all_aligned = false;
55
+ }
56
+ }
57
+ return all_aligned;
58
+ }
59
+
60
+ template <int depth, typename T>
61
+ __device__ bool init_args(
62
+ T** args,
63
+ FusedOptimizerTensorListMetadata<depth>& tl,
64
+ const int64_t chunk_idx,
65
+ const int64_t chunk_size,
66
+ const int64_t tensor_loc) {
67
+ bool all_aligned = true;
68
+ for (int i = 0; i < depth; i++) {
69
+ args[i] = (T*)tl.addresses[i][tensor_loc];
70
+ args[i] += chunk_idx * chunk_size;
71
+
72
+ if (!is_aligned(args[i])) {
73
+ all_aligned = false;
74
+ }
75
+ }
76
+ return all_aligned;
77
+ }
78
+
79
+ template <int depth, typename T>
80
+ __device__ void load_args(
81
+ T r_args[][kILP],
82
+ T** args,
83
+ const int64_t i_start,
84
+ const int64_t chunk_size,
85
+ const int64_t n) {
86
+ #pragma unroll
87
+ for (int ii = 0; ii < kILP; ii++) {
88
+ const auto i = i_start + threadIdx.x + ii * blockDim.x;
89
+ for (int r_index = 0; r_index < depth; r_index++) {
90
+ r_args[r_index][ii] = 0;
91
+ if (i < n && i < chunk_size) {
92
+ r_args[r_index][ii] = args[r_index][i];
93
+ }
94
+ }
95
+ }
96
+ }
97
+
98
+ template <typename T>
99
+ __device__ void store_args(
100
+ T* dst,
101
+ T* src,
102
+ const int64_t i_start,
103
+ const int64_t chunk_size,
104
+ const int64_t n) {
105
+ #pragma unroll
106
+ for (int ii = 0; ii < kILP; ii++) {
107
+ const int64_t i = i_start + threadIdx.x + ii * blockDim.x;
108
+ if (i < n && i < chunk_size)
109
+ dst[i] = src[ii];
110
+ }
111
+ }
112
+
113
+ template <int res_arg_index, typename Op, typename T, typename opmath_t>
114
+ __device__ __forceinline__ void binary_op_scalar(
115
+ T r_args[][kILP],
116
+ T** args,
117
+ opmath_t scalar,
118
+ const int64_t n,
119
+ const int64_t chunk_size,
120
+ const bool all_aligned,
121
+ Op op) {
122
+ // to make things simple, we put aligned case in a different code path
123
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
124
+ for (int64_t i_start = threadIdx.x;
125
+ i_start * kILP < n && i_start * kILP < chunk_size;
126
+ i_start += blockDim.x) {
127
+ // load
128
+ load_store(r_args[0], args[0], 0, i_start);
129
+ #pragma unroll
130
+ for (int ii = 0; ii < kILP; ii++) {
131
+ r_args[0][ii] = static_cast<T>(
132
+ op(static_cast<opmath_t>(r_args[0][ii]),
133
+ static_cast<opmath_t>(scalar)));
134
+ }
135
+ // store
136
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
137
+ }
138
+ } else {
139
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
140
+ i_start += blockDim.x * kILP) {
141
+ // Regardless if depth is 1 (for inplace) or 2 (for out of place), r_args
142
+ // has depth 1
143
+ load_args<1>(r_args, args, i_start, chunk_size, n);
144
+ #pragma unroll
145
+ for (int ii = 0; ii < kILP; ii++) {
146
+ r_args[0][ii] = static_cast<T>(
147
+ op(static_cast<opmath_t>(r_args[0][ii]),
148
+ static_cast<opmath_t>(scalar)));
149
+ }
150
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
151
+ }
152
+ }
153
+ }
154
+
155
+ template <int res_arg_index, typename Op, typename T, typename opmath_t>
156
+ __device__ __forceinline__ void pointwise_op_scalar(
157
+ T r_args[][kILP],
158
+ T** args,
159
+ opmath_t scalar,
160
+ const int64_t n,
161
+ const int64_t chunk_size,
162
+ const bool all_aligned,
163
+ Op op) {
164
+ // to make things simple, we put aligned case in a different code path
165
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
166
+ for (int64_t i_start = threadIdx.x;
167
+ i_start * kILP < n && i_start * kILP < chunk_size;
168
+ i_start += blockDim.x) {
169
+ // load
170
+ load_store(r_args[0], args[0], 0, i_start);
171
+ load_store(r_args[1], args[1], 0, i_start);
172
+ load_store(r_args[2], args[2], 0, i_start);
173
+ #pragma unroll
174
+ for (int ii = 0; ii < kILP; ii++) {
175
+ r_args[0][ii] = static_cast<T>(
176
+ static_cast<opmath_t>(r_args[0][ii]) +
177
+ scalar *
178
+ op(static_cast<opmath_t>(r_args[1][ii]),
179
+ static_cast<opmath_t>(r_args[2][ii])));
180
+ }
181
+ // store
182
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
183
+ }
184
+ } else {
185
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
186
+ i_start += blockDim.x * kILP) {
187
+ // Regardless if depth is 3 (for inplace) or 4 (for out of place), r_args
188
+ // has depth 3
189
+ load_args<3>(r_args, args, i_start, chunk_size, n);
190
+ #pragma unroll
191
+ for (int ii = 0; ii < kILP; ii++) {
192
+ r_args[0][ii] = static_cast<T>(
193
+ static_cast<opmath_t>(r_args[0][ii]) +
194
+ scalar *
195
+ op(static_cast<opmath_t>(r_args[1][ii]),
196
+ static_cast<opmath_t>(r_args[2][ii])));
197
+ }
198
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
199
+ }
200
+ }
201
+ }
202
+
203
+ //
204
+ // Binary Functors
205
+ //
206
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
207
+ struct BinaryOpScalarFunctor {
208
+ using opmath_t = at::opmath_type<T>;
209
+ template <typename Op>
210
+ __device__ __forceinline__ void operator()(
211
+ int chunk_size,
212
+ TensorListMetadata<depth>& tl,
213
+ Op op,
214
+ opmath_t scalar) {
215
+ const int tensor_loc = tl.block_to_tensor[blockIdx.x];
216
+ const int chunk_idx = tl.block_to_chunk[blockIdx.x];
217
+ auto n = tl.numel_for_tensor[tensor_loc];
218
+
219
+ T* args[depth];
220
+ const bool all_aligned =
221
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
222
+ n -= chunk_idx * chunk_size;
223
+ T r_args[r_args_depth][kILP];
224
+
225
+ binary_op_scalar<res_arg_index>(
226
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
227
+ }
228
+ };
229
+
230
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
231
+ struct BinaryOpScalarListFunctor {
232
+ using opmath_t = at::opmath_type<T>;
233
+ template <typename Op>
234
+ __device__ __forceinline__ void operator()(
235
+ int chunk_size,
236
+ TensorListScalarListMetadata<opmath_t, depth>& tl,
237
+ Op op) {
238
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
239
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
240
+ auto n = tl.numel_for_tensor[tensor_loc];
241
+
242
+ T* args[depth];
243
+ const bool all_aligned =
244
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
245
+ opmath_t scalar = tl.scalar_vals[tensor_loc];
246
+ n -= chunk_idx * chunk_size;
247
+ T r_args[r_args_depth][kILP];
248
+
249
+ binary_op_scalar<res_arg_index>(
250
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
251
+ }
252
+ };
253
+
254
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
255
+ struct BinaryOpListAlphaFunctor {
256
+ using opmath_t = at::opmath_type<T>;
257
+ template <typename Op>
258
+ __device__ __forceinline__ void operator()(
259
+ int chunk_size,
260
+ TensorListMetadata<depth>& tl,
261
+ Op op,
262
+ opmath_t alpha) {
263
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
264
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
265
+ auto n = tl.numel_for_tensor[tensor_loc];
266
+
267
+ T* args[depth];
268
+ const bool all_aligned =
269
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
270
+ n -= chunk_idx * chunk_size;
271
+ T r_args[r_args_depth][kILP];
272
+
273
+ // to make things simple, we put aligned case in a different code path
274
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
275
+ for (int64_t i_start = threadIdx.x;
276
+ i_start * kILP < n && i_start * kILP < chunk_size;
277
+ i_start += blockDim.x) {
278
+ // load
279
+ load_store(r_args[0], args[0], 0, i_start);
280
+ load_store(r_args[1], args[1], 0, i_start);
281
+ #pragma unroll
282
+ for (int ii = 0; ii < kILP; ii++) {
283
+ r_args[0][ii] = static_cast<T>(
284
+ op(static_cast<opmath_t>(r_args[0][ii]),
285
+ alpha * static_cast<opmath_t>(r_args[1][ii])));
286
+ }
287
+ // store
288
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
289
+ }
290
+ } else {
291
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
292
+ i_start += blockDim.x * kILP) {
293
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
294
+ #pragma unroll
295
+ for (int ii = 0; ii < kILP; ii++) {
296
+ r_args[0][ii] = static_cast<T>(
297
+ op(static_cast<opmath_t>(r_args[0][ii]),
298
+ alpha * static_cast<opmath_t>(r_args[1][ii])));
299
+ }
300
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
301
+ }
302
+ }
303
+ }
304
+ };
305
+
306
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
307
+ struct BinaryOpScalarTensorFunctor {
308
+ using opmath_t = at::opmath_type<T>;
309
+ template <typename Op>
310
+ __device__ __forceinline__ void operator()(
311
+ int chunk_size,
312
+ TensorListMetadata<depth>& tl,
313
+ Op op,
314
+ T* scalar,
315
+ opmath_t alpha) {
316
+ const int tensor_loc = tl.block_to_tensor[blockIdx.x];
317
+ const int chunk_idx = tl.block_to_chunk[blockIdx.x];
318
+ auto n = tl.numel_for_tensor[tensor_loc];
319
+
320
+ T* args[depth];
321
+ const bool all_aligned =
322
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
323
+ n -= chunk_idx * chunk_size;
324
+ T r_args[r_args_depth][kILP];
325
+
326
+ // to make things simple, we put aligned case in a different code path
327
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
328
+ for (int64_t i_start = threadIdx.x;
329
+ i_start * kILP < n && i_start * kILP < chunk_size;
330
+ i_start += blockDim.x) {
331
+ // load
332
+ load_store(r_args[0], args[0], 0, i_start);
333
+ #pragma unroll
334
+ for (int ii = 0; ii < kILP; ii++) {
335
+ r_args[0][ii] = static_cast<T>(op(
336
+ static_cast<opmath_t>(r_args[0][ii]),
337
+ static_cast<opmath_t>(alpha) * static_cast<opmath_t>(*scalar)));
338
+ }
339
+ // store
340
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
341
+ }
342
+ } else {
343
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
344
+ i_start += blockDim.x * kILP) {
345
+ // Regardless if depth is 1 (for inplace) or 2 (for out of place),
346
+ // r_args has depth 1
347
+ load_args<1>(r_args, args, i_start, chunk_size, n);
348
+ #pragma unroll
349
+ for (int ii = 0; ii < kILP; ii++) {
350
+ r_args[0][ii] = static_cast<T>(op(
351
+ static_cast<opmath_t>(r_args[0][ii]),
352
+ static_cast<opmath_t>(alpha) * static_cast<opmath_t>(*scalar)));
353
+ }
354
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
355
+ }
356
+ }
357
+ }
358
+ };
359
+
360
+ //
361
+ // Unary Functors
362
+ //
363
+
364
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
365
+ struct ZeroFunctor {
366
+ __device__ __forceinline__ void operator()(
367
+ int chunk_size,
368
+ TensorListMetadata<1>& tl) {
369
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
370
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
371
+ auto n = tl.numel_for_tensor[tensor_loc];
372
+
373
+ T* args[depth];
374
+ const auto all_aligned =
375
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
376
+ n -= chunk_idx * chunk_size;
377
+ T r_args[r_args_depth][kILP];
378
+
379
+ // to make things simple, we put aligned case in a different code path
380
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
381
+ for (int64_t i_start = threadIdx.x;
382
+ i_start * kILP < n && i_start * kILP < chunk_size;
383
+ i_start += blockDim.x) {
384
+ #pragma unroll
385
+ for (int ii = 0; ii < kILP; ii++) {
386
+ r_args[0][ii] = 0;
387
+ }
388
+ // store
389
+ load_store(args[0], r_args[0], i_start, 0);
390
+ }
391
+ } else {
392
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
393
+ i_start += blockDim.x * kILP) {
394
+ #pragma unroll
395
+ for (int ii = 0; ii < kILP; ii++) {
396
+ r_args[0][ii] = 0;
397
+ }
398
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
399
+ }
400
+ }
401
+ }
402
+ };
403
+
404
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
405
+ struct UnaryOpFunctor {
406
+ using opmath_t = at::opmath_type<T>;
407
+ template <typename Op>
408
+ __device__ __forceinline__ void operator()(
409
+ int chunk_size,
410
+ TensorListMetadata<depth>& tl,
411
+ Op op) {
412
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
413
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
414
+ auto n = tl.numel_for_tensor[tensor_loc];
415
+
416
+ T* args[depth];
417
+ bool all_aligned =
418
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
419
+ n -= chunk_idx * chunk_size;
420
+ T r_args[r_args_depth][kILP];
421
+
422
+ // to make things simple, we put aligned case in a different code path
423
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
424
+ for (int64_t i_start = threadIdx.x;
425
+ i_start * kILP < n && i_start * kILP < chunk_size;
426
+ i_start += blockDim.x) {
427
+ // load
428
+ load_store(r_args[0], args[0], 0, i_start);
429
+ #pragma unroll
430
+ for (int ii = 0; ii < kILP; ii++) {
431
+ r_args[0][ii] =
432
+ static_cast<T>(op(static_cast<opmath_t>(r_args[0][ii])));
433
+ }
434
+ // store
435
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
436
+ }
437
+ } else {
438
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
439
+ i_start += blockDim.x * kILP) {
440
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
441
+ #pragma unroll
442
+ for (int ii = 0; ii < kILP; ii++) {
443
+ r_args[0][ii] =
444
+ static_cast<T>(op(static_cast<opmath_t>(r_args[0][ii])));
445
+ }
446
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
447
+ }
448
+ }
449
+ }
450
+ };
451
+
452
+ //
453
+ // Pointwise Functors
454
+ //
455
+
456
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
457
+ struct PointwiseOpScalarFunctor {
458
+ using opmath_t = at::opmath_type<T>;
459
+ template <typename Op>
460
+ __device__ __forceinline__ void operator()(
461
+ int chunk_size,
462
+ TensorListMetadata<depth>& tl,
463
+ Op op,
464
+ opmath_t scalar) {
465
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
466
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
467
+ auto n = tl.numel_for_tensor[tensor_loc];
468
+
469
+ T* args[depth];
470
+ const bool all_aligned =
471
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
472
+ n -= chunk_idx * chunk_size;
473
+ T r_args[r_args_depth][kILP];
474
+
475
+ pointwise_op_scalar<res_arg_index>(
476
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
477
+ }
478
+ };
479
+
480
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
481
+ struct PointwiseOpScalarListFunctor {
482
+ using opmath_t = at::opmath_type<T>;
483
+ template <typename Op>
484
+ __device__ __forceinline__ void operator()(
485
+ int chunk_size,
486
+ TensorListScalarListMetadata<opmath_t, depth>& tl,
487
+ Op op) {
488
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
489
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
490
+ auto n = tl.numel_for_tensor[tensor_loc];
491
+
492
+ T* args[depth];
493
+ const bool all_aligned =
494
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
495
+ opmath_t scalar = tl.scalar_vals[tensor_loc];
496
+ n -= chunk_idx * chunk_size;
497
+ T r_args[r_args_depth][kILP];
498
+
499
+ pointwise_op_scalar<res_arg_index>(
500
+ r_args, args, scalar, n, chunk_size, all_aligned, op);
501
+ }
502
+ };
503
+
504
+ template <typename T, int depth>
505
+ struct PointwiseOpListFunctor {
506
+ using opmath_t = at::opmath_type<T>;
507
+ template <typename Op>
508
+ __device__ __forceinline__ void operator()(
509
+ int chunk_size,
510
+ TensorListMetadata<depth>& tl,
511
+ Op op) {
512
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
513
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
514
+ auto n = tl.numel_for_tensor[tensor_loc];
515
+
516
+ T* args[depth];
517
+ const bool all_aligned =
518
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
519
+ n -= chunk_idx * chunk_size;
520
+ T r_args[depth - 1][kILP];
521
+
522
+ // to make things simple, we put aligned case in a different code path
523
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
524
+ for (int64_t i_start = threadIdx.x;
525
+ i_start * kILP < n && i_start * kILP < chunk_size;
526
+ i_start += blockDim.x) {
527
+ // load
528
+ load_store(r_args[0], args[0], 0, i_start);
529
+ load_store(r_args[1], args[1], 0, i_start);
530
+ #pragma unroll
531
+ for (int ii = 0; ii < kILP; ii++) {
532
+ r_args[0][ii] = static_cast<T>(
533
+ op(static_cast<opmath_t>(r_args[0][ii]),
534
+ static_cast<opmath_t>(r_args[1][ii])));
535
+ }
536
+ // store
537
+ load_store(args[2], r_args[0], i_start, 0);
538
+ }
539
+ } else {
540
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
541
+ i_start += blockDim.x * kILP) {
542
+ load_args<depth - 1>(r_args, args, i_start, chunk_size, n);
543
+ #pragma unroll
544
+ for (int ii = 0; ii < kILP; ii++) {
545
+ r_args[0][ii] = static_cast<T>(
546
+ op(static_cast<opmath_t>(r_args[0][ii]),
547
+ static_cast<opmath_t>(r_args[1][ii])));
548
+ }
549
+ store_args(args[2], r_args[0], i_start, chunk_size, n);
550
+ }
551
+ }
552
+ }
553
+ };
554
+
555
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
556
+ struct TernaryOpListFunctor {
557
+ using opmath_t = at::opmath_type<T>;
558
+ template <typename Op>
559
+ __device__ __forceinline__ void operator()(
560
+ int chunk_size,
561
+ TensorListMetadata<depth>& tl,
562
+ Op op) {
563
+ static_assert(depth == 3 || depth == 4, "");
564
+ static_assert(depth >= r_args_depth, "");
565
+ static_assert(res_arg_index == depth - 1 || res_arg_index == 0, "");
566
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
567
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
568
+ auto n = tl.numel_for_tensor[tensor_loc];
569
+
570
+ T* args[depth];
571
+ const bool all_aligned =
572
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
573
+ n -= chunk_idx * chunk_size;
574
+ T r_args[r_args_depth][kILP];
575
+
576
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
577
+ for (int64_t i_start = threadIdx.x;
578
+ i_start * kILP < n && i_start * kILP < chunk_size;
579
+ i_start += blockDim.x) {
580
+ load_store(r_args[0], args[0], 0, i_start);
581
+ load_store(r_args[1], args[1], 0, i_start);
582
+ load_store(r_args[2], args[2], 0, i_start);
583
+ #pragma unroll
584
+ for (int ii = 0; ii < kILP; ii++) {
585
+ r_args[0][ii] =
586
+ op(static_cast<opmath_t>(r_args[0][ii]),
587
+ static_cast<opmath_t>(r_args[1][ii]),
588
+ static_cast<opmath_t>(r_args[2][ii]));
589
+ }
590
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
591
+ }
592
+ } else {
593
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
594
+ i_start += blockDim.x * kILP) {
595
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
596
+ #pragma unroll
597
+ for (int ii = 0; ii < kILP; ii++) {
598
+ r_args[0][ii] =
599
+ op(static_cast<opmath_t>(r_args[0][ii]),
600
+ static_cast<opmath_t>(r_args[1][ii]),
601
+ static_cast<opmath_t>(r_args[2][ii]));
602
+ }
603
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
604
+ }
605
+ }
606
+ }
607
+ };
608
+
609
+ template <typename T, int depth, int r_args_depth, int res_arg_index>
610
+ struct TernaryOpScalarFunctor {
611
+ using opmath_t = at::opmath_type<T>;
612
+ template <typename Op>
613
+ __device__ __forceinline__ void operator()(
614
+ int chunk_size,
615
+ TensorListMetadata<depth>& tl,
616
+ Op op,
617
+ opmath_t alpha) {
618
+ static_assert(depth == 2 || depth == 3, "");
619
+ static_assert(depth >= r_args_depth, "");
620
+ static_assert(res_arg_index == depth - 1 || res_arg_index == 0, "");
621
+ const auto tensor_loc = tl.block_to_tensor[blockIdx.x];
622
+ const auto chunk_idx = tl.block_to_chunk[blockIdx.x];
623
+ auto n = tl.numel_for_tensor[tensor_loc];
624
+
625
+ T* args[depth];
626
+ const bool all_aligned =
627
+ init_args<depth>(args, tl, chunk_idx, chunk_size, tensor_loc);
628
+ n -= chunk_idx * chunk_size;
629
+ T r_args[r_args_depth][kILP];
630
+
631
+ // to make things simple, we put aligned case in a different code path
632
+ if (n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) {
633
+ for (int64_t i_start = threadIdx.x;
634
+ i_start * kILP < n && i_start * kILP < chunk_size;
635
+ i_start += blockDim.x) {
636
+ // load
637
+ load_store(r_args[0], args[0], 0, i_start);
638
+ load_store(r_args[1], args[1], 0, i_start);
639
+ #pragma unroll
640
+ for (int ii = 0; ii < kILP; ii++) {
641
+ r_args[0][ii] =
642
+ op(static_cast<opmath_t>(r_args[0][ii]),
643
+ static_cast<opmath_t>(r_args[1][ii]),
644
+ alpha);
645
+ }
646
+ // store
647
+ load_store(args[res_arg_index], r_args[0], i_start, 0);
648
+ }
649
+ } else {
650
+ for (int64_t i_start = 0; i_start < n && i_start < chunk_size;
651
+ i_start += blockDim.x * kILP) {
652
+ load_args<r_args_depth>(r_args, args, i_start, chunk_size, n);
653
+ #pragma unroll
654
+ for (int ii = 0; ii < kILP; ii++) {
655
+ r_args[0][ii] =
656
+ op(static_cast<opmath_t>(r_args[0][ii]),
657
+ static_cast<opmath_t>(r_args[1][ii]),
658
+ alpha);
659
+ }
660
+ store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n);
661
+ }
662
+ }
663
+ }
664
+ };
665
+
666
+ template <typename T>
667
+ struct power_functor {
668
+ C10_DEVICE T operator()(const T& a, const T& b) const {
669
+ return at::native::pow_(a, b);
670
+ }
671
+ };
672
+
673
+ template <typename T>
674
+ struct reverse_power_functor {
675
+ C10_DEVICE T operator()(const T& a, const T& b) const {
676
+ return at::native::pow_(b, a);
677
+ }
678
+ };
679
+
680
+ } // namespace
681
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/GridSampler.cuh ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/cuda/KernelUtils.cuh>
3
+ #include <ATen/native/GridSamplerUtils.h>
4
+
5
+ namespace at { namespace native {
6
+
7
+ using detail::GridSamplerInterpolation;
8
+ using detail::GridSamplerPadding;
9
+
10
+ // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
11
+ // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
12
+ // if align_corners: -1 and +1 get sent to the centers of the corner pixels
13
+ // -1 --> 0
14
+ // +1 --> (size - 1)
15
+ // scale_factor = (size - 1) / 2
16
+ // if not align_corners: -1 and +1 get sent to the image edges
17
+ // -1 --> -0.5
18
+ // +1 --> (size - 1) + 0.5 == size - 0.5
19
+ // scale_factor = size / 2
20
+ template <typename scalar_t>
21
+ static __forceinline__ __device__
22
+ scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) {
23
+ if (align_corners) {
24
+ // unnormalize coord from [-1, 1] to [0, size - 1]
25
+ return ((coord + 1.f) / 2) * (size - 1);
26
+ } else {
27
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
28
+ return ((coord + 1.f) * size - 1) / 2;
29
+ }
30
+ }
31
+
32
+ // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
33
+ // except that it also returns the `d output / d input` via pointer argument
34
+ // `grad_in`.
35
+ // This is useful in the backward pass of grid_sampler.
36
+ template <typename scalar_t>
37
+ static __forceinline__ __device__
38
+ scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int size,
39
+ bool align_corners, scalar_t *grad_in) {
40
+ if (align_corners) {
41
+ // unnormalize coord from [-1, 1] to [0, size - 1]
42
+ *grad_in = static_cast<scalar_t>(size - 1) / 2;
43
+ return ((coord + 1.f) / 2) * (size - 1);
44
+ } else {
45
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
46
+ *grad_in = static_cast<scalar_t>(size) / 2;
47
+ return ((coord + 1.f) * size - 1) / 2;
48
+ }
49
+ }
50
+
51
+ // Clips coordinates to between 0 and clip_limit - 1
52
+ template <typename scalar_t>
53
+ static __forceinline__ __device__
54
+ scalar_t clip_coordinates(scalar_t in, int clip_limit) {
55
+ return ::min(static_cast<scalar_t>(clip_limit - 1), ::max(in, static_cast<scalar_t>(0)));
56
+ }
57
+
58
+ // clip_coordinates_set_grad works similarly to clip_coordinates except that
59
+ // it also returns the `d output / d input` via pointer argument `grad_in`.
60
+ // This is useful in the backward pass of grid_sampler.
61
+ template <typename scalar_t>
62
+ static __forceinline__ __device__
63
+ scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) {
64
+ // Note that it is important for the gradient calculation that borders
65
+ // are considered out of bounds.
66
+ if (in <= static_cast<scalar_t>(0)) {
67
+ *grad_in = static_cast<scalar_t>(0);
68
+ return static_cast<scalar_t>(0);
69
+ } else {
70
+ scalar_t max = static_cast<scalar_t>(clip_limit - 1);
71
+ if (in >= max) {
72
+ *grad_in = static_cast<scalar_t>(0);
73
+ return max;
74
+ } else {
75
+ *grad_in = static_cast<scalar_t>(1);
76
+ return in;
77
+ }
78
+ }
79
+ }
80
+
81
+ // Reflects coordinates until they fall between low and high (inclusive).
82
+ // The bounds are passed as twice their value so that half-integer values
83
+ // can be represented as ints.
84
+ template <typename scalar_t>
85
+ static __forceinline__ __device__
86
+ scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) {
87
+ if (twice_low == twice_high) {
88
+ return static_cast<scalar_t>(0);
89
+ }
90
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
91
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
92
+ in = ::fabs(in - min);
93
+ // `fmod` returns same sign as `in`, which is positive after the `fabs` above.
94
+ scalar_t extra = ::fmod(in, span);
95
+ int flips = static_cast<int>(::floor(in / span));
96
+ if (flips % 2 == 0) {
97
+ return extra + min;
98
+ } else {
99
+ return span - extra + min;
100
+ }
101
+ }
102
+
103
+ // reflect_coordinates_set_grad works similarly to reflect_coordinates except
104
+ // that it also returns the `d output / d input` via pointer argument
105
+ // `grad_in`.
106
+ // This is useful in the backward pass of grid_sampler.
107
+ template <typename scalar_t>
108
+ static __forceinline__ __device__
109
+ scalar_t reflect_coordinates_set_grad(scalar_t in, int twice_low, int twice_high,
110
+ scalar_t *grad_in) {
111
+ if (twice_low == twice_high) {
112
+ *grad_in = static_cast<scalar_t>(0);
113
+ return static_cast<scalar_t>(0);
114
+ }
115
+ int grad_in_mult_;
116
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
117
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
118
+ in = in - min;
119
+ if (in < static_cast<scalar_t>(0)) {
120
+ grad_in_mult_ = -1;
121
+ in = -in;
122
+ } else {
123
+ grad_in_mult_ = 1;
124
+ }
125
+ // `fmod` returns same sign as `in`, which is positive after the `if` above.
126
+ scalar_t extra = ::fmod(in, span);
127
+ int flips = static_cast<int>(::floor(in / span));
128
+ if (flips % 2 == 0) {
129
+ *grad_in = static_cast<scalar_t>(grad_in_mult_);
130
+ return extra + min;
131
+ } else {
132
+ *grad_in = static_cast<scalar_t>(-grad_in_mult_);
133
+ return span - extra + min;
134
+ }
135
+ }
136
+
137
+ template<typename scalar_t>
138
+ static __forceinline__ __device__
139
+ scalar_t safe_downgrade_to_int_range(scalar_t x){
140
+ // -100.0 does not have special meaning. This is just to make sure
141
+ // it's not within_bounds_2d or within_bounds_3d, and does not cause
142
+ // undefined behavior. See #35506.
143
+ if (x > INT_MAX-1 || x < INT_MIN || !::isfinite(static_cast<double>(x)))
144
+ return static_cast<scalar_t>(-100.0);
145
+ return x;
146
+ }
147
+
148
+ template<typename scalar_t>
149
+ static __forceinline__ __device__
150
+ scalar_t compute_coordinates(scalar_t coord, int size,
151
+ GridSamplerPadding padding_mode,
152
+ bool align_corners) {
153
+ if (padding_mode == GridSamplerPadding::Border) {
154
+ // clip coordinates to image borders
155
+ coord = clip_coordinates(coord, size);
156
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
157
+ // reflect coordinates by image borders
158
+ if (align_corners) {
159
+ coord = reflect_coordinates(coord, 0, 2*(size - 1));
160
+ } else {
161
+ coord = reflect_coordinates(coord, -1, 2*size - 1);
162
+ }
163
+ // clip coordinates to image borders
164
+ coord = clip_coordinates(coord, size);
165
+ }
166
+
167
+ coord = safe_downgrade_to_int_range(coord);
168
+ return coord;
169
+ }
170
+
171
+ // Computes the pixel source index value for a grid coordinate
172
+ template <typename scalar_t>
173
+ static __forceinline__ __device__
174
+ scalar_t grid_sampler_compute_source_index(
175
+ scalar_t coord,
176
+ int size,
177
+ GridSamplerPadding padding_mode,
178
+ bool align_corners) {
179
+ coord = grid_sampler_unnormalize(coord, size, align_corners);
180
+ coord = compute_coordinates(coord, size, padding_mode, align_corners);
181
+ return coord;
182
+ }
183
+
184
+ // grid_sampler_compute_source_index_set_grad works similarly to
185
+ // grid_sampler_compute_source_index except that it also returns the
186
+ // `d output / d input` via pointer argument `grad_in`.
187
+ // This is useful in the backward pass of grid_sampler.
188
+ template <typename scalar_t>
189
+ static __forceinline__ __device__
190
+ scalar_t grid_sampler_compute_source_index_set_grad(
191
+ scalar_t coord,
192
+ int size,
193
+ GridSamplerPadding padding_mode,
194
+ bool align_corners,
195
+ scalar_t *grad_in) {
196
+ scalar_t grad_clip, grad_refl;
197
+ coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
198
+ if (padding_mode == GridSamplerPadding::Border) {
199
+ // clip coordinates to image borders
200
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
201
+ *grad_in = (*grad_in) * grad_clip;
202
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
203
+ // reflect coordinates by image borders
204
+ if (align_corners) {
205
+ coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
206
+ } else {
207
+ coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
208
+ }
209
+ // clip coordinates to image borders
210
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
211
+ *grad_in = (*grad_in) * grad_refl * grad_clip;
212
+ }
213
+
214
+ coord = safe_downgrade_to_int_range(coord);
215
+ return coord;
216
+ }
217
+
218
+ static __forceinline__ __device__
219
+ bool within_bounds_2d(int h, int w, int H, int W) {
220
+ return h >= 0 && h < H && w >= 0 && w < W;
221
+ }
222
+
223
+ static __forceinline__ __device__
224
+ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) {
225
+ return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
226
+ }
227
+
228
+ template<typename scalar_t>
229
+ static __forceinline__ __device__
230
+ scalar_t get_value_bounded(
231
+ scalar_t *data, scalar_t x, scalar_t y, int W, int H, int sW, int sH,
232
+ GridSamplerPadding padding_mode,
233
+ bool align_corners) {
234
+
235
+ x = compute_coordinates(x, W, padding_mode, align_corners);
236
+ y = compute_coordinates(y, H, padding_mode, align_corners);
237
+
238
+ int ix = static_cast<int>(x);
239
+ int iy = static_cast<int>(y);
240
+
241
+ if (within_bounds_2d(iy, ix, H, W)) {
242
+ return data[iy * sH + ix * sW];
243
+ }
244
+ return static_cast<scalar_t>(0);
245
+ }
246
+
247
+ template<typename scalar_t, typename index_t>
248
+ static __forceinline__ __device__
249
+ void safe_add_2d(scalar_t *data, int h, int w,
250
+ int sH, int sW, int H, int W,
251
+ scalar_t delta,
252
+ const index_t NC_offset,
253
+ const index_t memory_span) {
254
+ if (within_bounds_2d(h, w, H, W)) {
255
+ fastAtomicAdd(data,
256
+ NC_offset + h * sH + w * sW,
257
+ memory_span,
258
+ delta,
259
+ true);
260
+ }
261
+ }
262
+
263
+ template<typename scalar_t, typename index_t>
264
+ static __forceinline__ __device__
265
+ void safe_add_3d(scalar_t *data, int d, int h, int w,
266
+ int sD, int sH, int sW, int D, int H, int W,
267
+ scalar_t delta,
268
+ const index_t NC_offset,
269
+ const index_t memory_span) {
270
+ if (within_bounds_3d(d, h, w, D, H, W)) {
271
+ fastAtomicAdd(data,
272
+ NC_offset + d * sD + h * sH + w * sW,
273
+ memory_span,
274
+ delta,
275
+ true);
276
+ }
277
+ }
278
+
279
+ template<typename scalar_t, typename index_t>
280
+ static __forceinline__ __device__
281
+ void add_value_bounded(
282
+ scalar_t* data, scalar_t x, scalar_t y, int W, int H, int sW, int sH,
283
+ scalar_t delta,
284
+ GridSamplerPadding padding_mode,
285
+ bool align_corners,
286
+ const index_t NC_offset,
287
+ const index_t memory_span) {
288
+
289
+ x = compute_coordinates(x, W, padding_mode, align_corners);
290
+ y = compute_coordinates(y, H, padding_mode, align_corners);
291
+
292
+ int ix = static_cast<int>(x);
293
+ int iy = static_cast<int>(y);
294
+
295
+ safe_add_2d(data, iy, ix, sH, sW, H, W, delta, NC_offset, memory_span);
296
+ }
297
+
298
+ // Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
299
+ template<typename scalar_t>
300
+ static __forceinline__ __device__
301
+ void get_cubic_coefficients_grad(
302
+ scalar_t coeffs[4],
303
+ scalar_t t) {
304
+
305
+ // Must be the same as forward calculation in
306
+ // aten/src/ATen/native/cuda/UpSample.cuh:get_cubic_upsample_coefficients
307
+ scalar_t A = -0.75;
308
+
309
+ scalar_t x;
310
+ x = -1 - t; // 1 < x = |-1 - tx| < 2
311
+ coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
312
+ x = -t; // x = |0 - tx| <= 1
313
+ coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
314
+ x = 1 - t; // x = |1 - tx| <= 1
315
+ coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
316
+ x = 2 - t; // 1 < x = |2 - tx| < 2
317
+ coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
318
+ }
319
+
320
+
321
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/GridSampler.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <array>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at {
10
+ namespace native {
11
+
12
+ void launch_grid_sampler_2d_forward_kernel(
13
+ const TensorBase &output, const TensorBase &input, const TensorBase &grid,
14
+ int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
15
+
16
+ void launch_grid_sampler_3d_forward_kernel(
17
+ const TensorBase &output, const TensorBase &input, const TensorBase &grid,
18
+ int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
19
+
20
+ void launch_grid_sampler_2d_backward_kernel(
21
+ const TensorBase &grad_input, const TensorBase &grad_grid,
22
+ const TensorBase &grad_output, const TensorBase &input,
23
+ const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
24
+ bool align_corners, std::array<bool, 2> output_mask);
25
+
26
+ void launch_grid_sampler_3d_backward_kernel(
27
+ const TensorBase &grad_input, const TensorBase &grad_grid,
28
+ const TensorBase &grad_output, const TensorBase &input,
29
+ const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
30
+ bool align_corners, std::array<bool, 2> output_mask);
31
+
32
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/JitLoops.cuh ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/jit_macros.h>
4
+
5
+ #if AT_USE_JITERATOR()
6
+
7
+ #include <ATen/cuda/CUDAConfig.h>
8
+
9
+ #include <ATen/OpMathType.h>
10
+ #include <ATen/TensorIterator.h>
11
+ #include <ATen/native/TensorIteratorDynamicCasting.h>
12
+
13
+ #include <ATen/native/cuda/MemoryAccess.cuh>
14
+
15
+ #include <ATen/native/cuda/CUDAJitLoops.cuh>
16
+
17
+ namespace at {
18
+ namespace native {
19
+
20
+ /* Note [Jiterator]
21
+ The "jiterator" simply just-in-time compiles the same kernels that
22
+ Loops.cuh (and CUDALoops.cuh) usually build. This reduces build time,
23
+ build size, and initial CUDA context size.
24
+
25
+ By default on non-Windows systems, it also caches compiled kernels in ~/.cache/torch/kernels.
26
+ This behavior is controlled with two environment variables:
27
+ - USE_PYTORCH_KERNEL_CACHE, if set to zero then this will disable all cache use
28
+ - PYTORCH_KERNEL_CACHE_PATH, if set specifies the folder to use for cached kernels
29
+
30
+ The jiterator currently has some limitations, however. It cannot:
31
+ - handle math on complex datatypes
32
+ - handle kernels with scalar parameters
33
+
34
+ These improvements will likely come soon.
35
+
36
+ For examples of how to use the jiterator see the i1 and gcd kernel
37
+ implementations, which pass jittable strings implementing their
38
+ operations instead of the typical CUDA functors.
39
+
40
+ To pass a runtime argument (similar to lambda captures in non-JIT kernels),
41
+ we need to pass to additional arguments to `jitted_gpu_kernel` by value.
42
+ Currently only primitive C++ types used for computation are valid.
43
+ The order of these extra arguments should be same as the order they appear
44
+ in kernel's function signature. (look at polygamma for example)
45
+
46
+ NOTE: One big restriction being that these arguments should be after the
47
+ arguments provided by TensorIterator. Eg. While capturing `n`, where
48
+ `scalar_t x` and `scalar_t y` are provided by TensorIterator,
49
+ * foo(scalar_t x, scalar_t y, int n) works!
50
+ * foo(int n, scalar_t x, scalar_y) doesn't work
51
+ * foo(scalar_t x, int n, scalar_y) doesn't work
52
+
53
+ */
54
+
55
+ // Entrypoint for jitted GPU kernels.
56
+ // Only handles elementwise unary and binary kernels with a
57
+ // common dtype and a single output.
58
+ // NOTE: this assumes the op's iterator has a common_dtype.
59
+ // NOTE: We use std::tuple instead of parameter pack
60
+ // for `extra_args` due to following
61
+ // bug on older versions of clang
62
+ // https://bugs.llvm.org/show_bug.cgi?id=23029
63
+ template <
64
+ char const* name,
65
+ typename return_type,
66
+ typename f_inputs_type,
67
+ int arity,
68
+ typename... Args>
69
+ void jitted_gpu_kernel(
70
+ TensorIteratorBase& iter,
71
+ const std::string& f,
72
+ at::cuda::jit::BinaryFuncVariant scalar_pos =
73
+ at::cuda::jit::BinaryFuncVariant::NoScalar,
74
+ at::opmath_type<f_inputs_type> scalar_val = 0,
75
+ std::tuple<Args...> extra_args = std::make_tuple()) {
76
+ // TODO: much of preamble is common to both jitted_gpu_kernel and gpu_kernel
77
+ // Maybe it could be refactored?
78
+ for (int arg = 0; arg < iter.ntensors(); arg++) {
79
+ TORCH_INTERNAL_ASSERT(
80
+ iter.device(arg).is_cuda(),
81
+ "argument ", arg, ": expected a CUDA device but found ", iter.device(arg));
82
+ }
83
+
84
+ if (iter.numel() == 0) {
85
+ return;
86
+ }
87
+
88
+ if (!iter.can_use_32bit_indexing()) {
89
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
90
+ jitted_gpu_kernel<name, return_type, f_inputs_type, arity>(
91
+ sub_iter, f, scalar_pos, scalar_val, extra_args);
92
+ }
93
+
94
+ return;
95
+ }
96
+
97
+ // Computes if dynamic casting is needed
98
+ // Dynamic casting is needed if an input's dtype differs from the common dtype
99
+ // or if the result dtype differs from the output's dtype
100
+ // Note: this is intentionally divergent from calling needs_dynamic_casting,
101
+ // which is more general and inspects a lambda to determine if dynamic
102
+ // casting is needed.
103
+ bool needs_dynamic_casting = false;
104
+
105
+ // Checks output
106
+ const ScalarType return_scalar_type = c10::CppTypeToScalarType<return_type>::value;
107
+ const auto dtype0 = iter.dtype(0);
108
+ if (dtype0 != return_scalar_type) {
109
+ needs_dynamic_casting = true;
110
+ }
111
+
112
+ // Checks input(s)
113
+ const ScalarType inputs_scalar_type = c10::CppTypeToScalarType<f_inputs_type>::value;
114
+ for (auto i = decltype(arity){1}; i < (arity + 1); ++i) {
115
+ const auto dtypei = iter.dtype(i);
116
+ if (dtypei != inputs_scalar_type) {
117
+ needs_dynamic_casting = true;
118
+ break;
119
+ }
120
+ }
121
+ if (scalar_pos == at::cuda::jit::BinaryFuncVariant::NoScalar) {
122
+ // NOTE: With `scalar_pos=NoScalar`,`scalar_val` is not used
123
+ // for computation in the generated code and hence we pass a dummy
124
+ // value of `0`.
125
+ jitted_gpu_kernel_impl<
126
+ /*name*/ name,
127
+ /*return_type=*/return_type,
128
+ /*f_inputs_type=*/f_inputs_type,
129
+ arity,
130
+ at::cuda::jit::BinaryFuncVariant::NoScalar>(
131
+ iter, f, needs_dynamic_casting, /*scalar_val=*/scalar_val, extra_args);
132
+ } else if (scalar_pos == at::cuda::jit::BinaryFuncVariant::RhsScalar) {
133
+ jitted_gpu_kernel_impl<
134
+ /*name*/ name,
135
+ /*return_type=*/return_type,
136
+ /*f_inputs_type=*/f_inputs_type,
137
+ arity,
138
+ at::cuda::jit::BinaryFuncVariant::RhsScalar>(
139
+ iter,
140
+ f,
141
+ needs_dynamic_casting,
142
+ scalar_val,
143
+ extra_args);
144
+
145
+ } else {
146
+ jitted_gpu_kernel_impl<
147
+ /*name*/ name,
148
+ /*return_type=*/return_type,
149
+ /*f_inputs_type=*/f_inputs_type,
150
+ arity,
151
+ at::cuda::jit::BinaryFuncVariant::LhsScalar>(
152
+ iter,
153
+ f,
154
+ needs_dynamic_casting,
155
+ scalar_val,
156
+ extra_args);
157
+ }
158
+ }
159
+
160
+ // TODO: support runtime state capture similar to `jitted_gpu_kernel`.
161
+ template <char const *name, typename return_type, typename f_inputs_type>
162
+ void opmath_jitted_gpu_kernel_with_scalars(TensorIteratorBase& iter, const std::string& f) {
163
+ TORCH_INTERNAL_ASSERT(iter.ntensors() == 3);
164
+ //currently jiterator only handles binary functions where both inputs are of the same type (f_inputs_type)
165
+ using opmath_t = at::opmath_type<f_inputs_type>;
166
+ if (iter.is_cpu_scalar(1)) {
167
+ auto scalar_val = iter.scalar_value<opmath_t>(1);
168
+ iter.remove_operand(1);
169
+ // TODO: When all kernels that use gpu_kernel_with_scalars are
170
+ // ported to structured, this device guard can be deleted. This
171
+ // works around incorrect device guard generation for pre-structured
172
+ // kernels device guards, but structured kernels do it right and
173
+ // we can assume the device is already set correctly
174
+ const OptionalDeviceGuard device_guard(iter.device(1));
175
+ jitted_gpu_kernel<name, return_type, f_inputs_type, 1>(iter, f, at::cuda::jit::BinaryFuncVariant::LhsScalar, scalar_val);
176
+ } else if (iter.is_cpu_scalar(2)) {
177
+ auto scalar_val = iter.scalar_value<opmath_t>(2);
178
+ iter.remove_operand(2);
179
+ jitted_gpu_kernel<name, return_type, f_inputs_type, 1>(iter, f, at::cuda::jit::BinaryFuncVariant::RhsScalar, scalar_val);
180
+ } else {
181
+ jitted_gpu_kernel<name, return_type, f_inputs_type, 2>(iter, f);
182
+ }
183
+ }
184
+
185
+ }} // at::native
186
+
187
+ #endif // AT_USE_JITERATOR()
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/KernelUtils.cuh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/Atomic.cuh>
3
+
4
+ #if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))))
5
+ #include <cuda_bf16.h>
6
+ #endif
7
+
8
+ namespace at {
9
+ namespace native {
10
+
11
+ __device__ __forceinline__ size_t
12
+ idx(const size_t nc,
13
+ const size_t height,
14
+ const size_t width,
15
+ const size_t h,
16
+ const size_t w) {
17
+ return (nc * height + h) * width + w;
18
+ }
19
+
20
+ // for channels-last
21
+ __device__ __forceinline__ size_t
22
+ idx_cl(
23
+ const size_t n, const size_t h, const size_t w, const size_t c,
24
+ const size_t height, const size_t width, const size_t channel
25
+ ) {
26
+ return ((n * height + h) * width + w) * channel + c;
27
+ }
28
+
29
+ // fastSpecializedAtomicAdd (and fastAtomicAdd) are an optimization
30
+ // that speed up half-precision atomics. The situation with half
31
+ // precision atomics is that we have a slow __half atomic, and
32
+ // a fast vectored __half2 atomic (this can be worth up to a 6x
33
+ // speedup, see https://github.com/pytorch/pytorch/pull/21879).
34
+ // We can convert a __half atomic into a __half2 atomic by simply
35
+ // pairing the __half with a zero entry on the left/right depending
36
+ // on alignment... but only if this wouldn't cause an out of bounds
37
+ // access! Thus, you must specify tensor and numel so we can check
38
+ // if you would be out-of-bounds and use a plain __half atomic if
39
+ // you would be.
40
+ template <
41
+ typename scalar_t,
42
+ typename index_t,
43
+ typename std::enable_if<std::is_same<c10::Half, scalar_t>::value>::type* =
44
+ nullptr>
45
+ __device__ __forceinline__ void fastSpecializedAtomicAdd(
46
+ scalar_t* tensor,
47
+ index_t index,
48
+ const index_t numel,
49
+ scalar_t value) {
50
+ #if ( \
51
+ (defined(USE_ROCM)) || \
52
+ (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
53
+ gpuAtomicAddNoReturn(
54
+ reinterpret_cast<at::Half*>(tensor) + index,
55
+ static_cast<at::Half>(value));
56
+ #else
57
+ // Accounts for the chance tensor falls on an odd 16 bit alignment (ie, not 32 bit aligned)
58
+ __half* target_addr = reinterpret_cast<__half*>(tensor + index);
59
+ bool low_byte = (reinterpret_cast<std::uintptr_t>(target_addr) % sizeof(__half2) == 0);
60
+
61
+ if (low_byte && index < (numel - 1)) {
62
+ __half2 value2;
63
+ value2.x = static_cast<__half>(value);
64
+ value2.y = __int2half_rz(0);
65
+ atomicAdd(reinterpret_cast<__half2*>(target_addr), value2);
66
+
67
+ } else if (!low_byte && index > 0) {
68
+ __half2 value2;
69
+ value2.x = __int2half_rz(0);
70
+ value2.y = static_cast<__half>(value);
71
+ atomicAdd(reinterpret_cast<__half2*>(target_addr - 1), value2);
72
+
73
+ } else {
74
+ atomicAdd(
75
+ reinterpret_cast<__half*>(tensor) + index, static_cast<__half>(value));
76
+ }
77
+ #endif
78
+ }
79
+
80
+ template <
81
+ typename scalar_t,
82
+ typename index_t,
83
+ typename std::enable_if<std::is_same<c10::BFloat16, scalar_t>::value>::type* =
84
+ nullptr>
85
+ __device__ __forceinline__ void fastSpecializedAtomicAdd(
86
+ scalar_t* tensor,
87
+ index_t index,
88
+ const index_t numel,
89
+ scalar_t value) {
90
+ #if ( \
91
+ (defined(USE_ROCM)) || \
92
+ (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))
93
+ gpuAtomicAddNoReturn(
94
+ reinterpret_cast<at::BFloat16*>(tensor) + index,
95
+ static_cast<at::BFloat16>(value));
96
+ #else
97
+ // Accounts for the chance tensor falls on an odd 16 bit alignment (ie, not 32 bit aligned)
98
+ __nv_bfloat16* target_addr = reinterpret_cast<__nv_bfloat16*>(tensor + index);
99
+ bool low_byte = (reinterpret_cast<std::uintptr_t>(target_addr) % sizeof(__nv_bfloat162) == 0);
100
+
101
+ if (low_byte && index < (numel - 1)) {
102
+ __nv_bfloat162 value2;
103
+ value2.x = *reinterpret_cast<__nv_bfloat16*>(&value);
104
+ value2.y = __int2bfloat16_rz(0);
105
+ atomicAdd(reinterpret_cast<__nv_bfloat162*>(target_addr), value2);
106
+
107
+ } else if (!low_byte && index > 0) {
108
+ __nv_bfloat162 value2;
109
+ value2.x = __int2bfloat16_rz(0);
110
+ value2.y = *reinterpret_cast<__nv_bfloat16*>(&value);
111
+ atomicAdd(reinterpret_cast<__nv_bfloat162*>(target_addr - 1), value2);
112
+
113
+ } else {
114
+ atomicAdd(
115
+ reinterpret_cast<__nv_bfloat16*>(tensor) + index, *reinterpret_cast<__nv_bfloat16*>(&value));
116
+ }
117
+ #endif
118
+ }
119
+
120
+
121
+ template <
122
+ typename scalar_t,
123
+ typename index_t,
124
+ typename std::enable_if<!std::is_same<c10::Half, scalar_t>::value && !std::is_same<c10::BFloat16, scalar_t>::value >::type* =
125
+ nullptr>
126
+ __device__ __forceinline__ void fastSpecializedAtomicAdd(
127
+ scalar_t* tensor,
128
+ index_t index,
129
+ const index_t numel,
130
+ scalar_t value) {
131
+ gpuAtomicAddNoReturn(tensor + index, value);
132
+ }
133
+
134
+ template <class scalar_t, class index_t>
135
+ __device__ __forceinline__ void fastAtomicAdd(
136
+ scalar_t* tensor,
137
+ index_t index,
138
+ const index_t numel,
139
+ scalar_t value,
140
+ bool fast_atomics) {
141
+ if (fast_atomics) {
142
+ fastSpecializedAtomicAdd(tensor, index, numel, value);
143
+ } else {
144
+ gpuAtomicAddNoReturn(tensor + index, value);
145
+ }
146
+ }
147
+
148
+ } // namespace native
149
+ } // namespace at