applied-ai-018 commited on
Commit
e313d5c
·
verified ·
1 Parent(s): e30745c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h +139 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h +41 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h +27 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h +69 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h +449 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h +28 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/AtomicAddFloat.h +37 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h +12 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CopyKernel.h +12 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DepthwiseConvKernel.h +21 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/GridSamplerKernel.h +34 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Intrinsics.h +33 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IsContiguous.h +62 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/LogAddExp.h +61 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Loops.h +394 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/MaxUnpoolKernel.h +14 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/PixelShuffleKernel.h +14 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SerialStackImpl.h +144 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SpmmReduceKernel.h +22 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/StackKernel.h +12 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/UpSampleKernelAVXAntialias.h +1376 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/WeightNormKernel.h +20 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/avx_mathfun.h +522 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/utils.h +198 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/zmath.h +250 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Copy.h +10 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DistributionTemplates.h +672 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh +22 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/LaunchUtils.h +18 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Math.cuh +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MiscUtils.h +32 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MultiTensorApply.cuh +379 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/PersistentSoftmax.cuh +401 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sorting.h +18 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortingCommon.cuh +193 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/fused_adamw_amsgrad_impl.cuh +40 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/fused_adamw_impl.cuh +38 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/reduction_template.cuh +680 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/thread_constants.h +22 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/vol2col.cuh +263 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/Copy.h +15 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/MPSGraphSonomaOps.h +49 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/MPSGraphVenturaOps.h +197 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/OperationUtils.h +394 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/TensorFactory.h +12 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/UnaryConstants.h +43 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorBinaryOps.h +16 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorFactories.h +7 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorMath.h +81 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorTransformerFunctions.h +103 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/Config.h>
3
+ #include <cstdint>
4
+
5
+ #ifdef USE_FBGEMM
6
+ #include <fbgemm/FbgemmEmbedding.h>
7
+ #endif
8
+
9
+ namespace at::native {
10
+
11
+ void check_arguments(
12
+ const Tensor& weight,
13
+ const Tensor& indices,
14
+ const Tensor& offsets,
15
+ const int64_t mode,
16
+ const c10::optional<Tensor>& per_sample_weights,
17
+ bool include_last_offset);
18
+
19
+ void make_bag_size_out(
20
+ Tensor& bag_size_out,
21
+ const Tensor& offsets,
22
+ const Tensor& indices,
23
+ const int64_t mode,
24
+ const bool include_last_offset,
25
+ const bool requires_grad);
26
+
27
+ void make_max_indices_out(
28
+ Tensor& max_indices_out,
29
+ const Tensor& weight,
30
+ const Tensor& indices,
31
+ const Tensor& offsets,
32
+ const Tensor& bag_size,
33
+ const int64_t mode,
34
+ bool include_last_offset);
35
+
36
+ void make_offset2bag_out(
37
+ Tensor& offset2bag,
38
+ Tensor& output,
39
+ const Tensor& weight,
40
+ const Tensor& indices,
41
+ const Tensor& offsets,
42
+ const int64_t mode,
43
+ const c10::optional<Tensor>& per_sample_weights,
44
+ const int64_t padding_idx = -1);
45
+
46
+ #ifdef USE_FBGEMM
47
+
48
+ template<bool has_weight, typename TIndex, typename TData>
49
+ struct _CallbackAndBlockSize {
50
+ using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature<TData, TIndex, TIndex, TData>::Type;
51
+
52
+ int64_t blockSize = -1;
53
+ TCallback callback = nullptr;
54
+
55
+ static TCallback generateCallback(int64_t block_size) {
56
+ return fbgemm::GenerateEmbeddingSpMDM<TData, TIndex, TIndex, TData>(
57
+ block_size,
58
+ has_weight,
59
+ /* normalize_by_lengths */false,
60
+ /* prefetch */16,
61
+ /* is_weight_positional */false,
62
+ /* use_offsets */true);
63
+ }
64
+
65
+ _CallbackAndBlockSize() = default;
66
+
67
+ explicit _CallbackAndBlockSize(c10::optional<int64_t> maybe_block_size)
68
+ : blockSize(maybe_block_size.value_or(-1))
69
+ , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr)
70
+ {}
71
+ };
72
+
73
+ template<typename... StorageMixins>
74
+ struct _EmbeddingBagKernelCacheImpl : private StorageMixins... {
75
+
76
+ _EmbeddingBagKernelCacheImpl() = default;
77
+ // use each of the mixins to store corresponding kernel and block size
78
+ explicit _EmbeddingBagKernelCacheImpl(c10::optional<int64_t> maybe_block_size)
79
+ : StorageMixins(maybe_block_size)...
80
+ {}
81
+
82
+ // this method is thread safe (call sites may call from different threads)
83
+ template<bool has_weight, typename TIndex, typename TData>
84
+ typename _CallbackAndBlockSize<has_weight, TIndex, TData>::TCallback
85
+ getCallback(int64_t block_size) const {
86
+ // if the cache doesn't store the kernel for the incoming block size
87
+ // (so it is different from the one stored in corresponding mixin)
88
+ // regenerate the kernel (not writing it into the cache so we avoid locks)
89
+ if (block_size != _CallbackAndBlockSize<has_weight, TIndex, TData>::blockSize) {
90
+ return _CallbackAndBlockSize<has_weight, TIndex, TData>::generateCallback(block_size);
91
+ }
92
+ // else retrieve the cached kernel from the corresponding mixin
93
+ return _CallbackAndBlockSize<has_weight, TIndex, TData>::callback;
94
+ }
95
+ };
96
+
97
+ // instantiate the cache with the list of storage mixins
98
+ // for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file
99
+ using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl<
100
+ _CallbackAndBlockSize<true, int32_t, float>,
101
+ _CallbackAndBlockSize<false, int32_t, float>,
102
+ _CallbackAndBlockSize<true, int64_t, float>,
103
+ _CallbackAndBlockSize<false, int64_t, float>,
104
+ _CallbackAndBlockSize<true, int32_t, unsigned short>,
105
+ _CallbackAndBlockSize<false, int32_t, unsigned short>,
106
+ _CallbackAndBlockSize<true, int64_t, unsigned short>,
107
+ _CallbackAndBlockSize<false, int64_t, unsigned short>>;
108
+ #else
109
+ struct _EmbeddingBagKernelCache {
110
+ explicit _EmbeddingBagKernelCache(c10::optional<int64_t> /* maybe_block_size */) {}
111
+ };
112
+ #endif
113
+
114
+ void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag,
115
+ Tensor& bag_size, Tensor* max_indices,
116
+ const Tensor &weight, const Tensor &indices,
117
+ const Tensor &offsets, const int64_t mode = 0,
118
+ const c10::optional<Tensor>& per_sample_weights = c10::nullopt,
119
+ bool include_last_offset = false,
120
+ int64_t padding_idx = -1,
121
+ _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
122
+
123
+ void _embedding_bag_cpu_out(
124
+ at::Tensor& output,
125
+ at::Tensor& offset2bag,
126
+ at::Tensor& bag_size,
127
+ at::Tensor* p_max_indices,
128
+ const at::Tensor& weight,
129
+ const at::Tensor& indices,
130
+ const at::Tensor& offsets,
131
+ const bool scale_grad_by_freq,
132
+ const int64_t mode,
133
+ const bool sparse,
134
+ const c10::optional<at::Tensor>& per_sample_weights,
135
+ const bool include_last_offset,
136
+ const c10::optional<int64_t>& padding_idx,
137
+ _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
138
+
139
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <c10/util/ArrayRef.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+ class TensorBase;
8
+ struct TensorIterator;
9
+ struct TensorIteratorBase;
10
+ }
11
+
12
+ namespace c10 {
13
+ class Scalar;
14
+ }
15
+
16
+ namespace at::native {
17
+
18
+ using index_fn = void(*)(TensorIteratorBase &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides);
19
+ using index_fill_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source);
20
+ using index_copy_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride);
21
+ using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides, bool accumulate);
22
+ using put_fn = void(*)(TensorIterator & iter, const TensorBase& self, const bool accumulate);
23
+ using take_fn = void(*)(TensorIterator & iter, const TensorBase& input);
24
+ using flip_fn = void(*)(TensorIterator &, const bool);
25
+ using masked_fill_fn = void(*)(TensorIterator &, const Scalar& scalar);
26
+ using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride);
27
+ using masked_scatter_fn = void(*)(TensorIterator &, const TensorBase &);
28
+
29
+ DECLARE_DISPATCH(index_fn, index_stub);
30
+ DECLARE_DISPATCH(index_fill_fn, index_fill_stub);
31
+ DECLARE_DISPATCH(index_copy_fn, index_copy_stub);
32
+ DECLARE_DISPATCH(index_put_fn, index_put_stub);
33
+ DECLARE_DISPATCH(put_fn, put_stub);
34
+ DECLARE_DISPATCH(take_fn, take_stub);
35
+ DECLARE_DISPATCH(flip_fn, flip_stub);
36
+ DECLARE_DISPATCH(masked_fill_fn, masked_fill_stub);
37
+ DECLARE_DISPATCH(masked_select_fn, masked_select_serial_stub);
38
+ DECLARE_DISPATCH(masked_select_fn, masked_select_stub);
39
+ DECLARE_DISPATCH(masked_scatter_fn, masked_scatter_stub);
40
+
41
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/NonEmptyUtils.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBase.h>
2
+ #include <algorithm>
3
+ #include <vector>
4
+
5
+ namespace at::native {
6
+
7
+ inline int64_t ensure_nonempty_dim(int64_t dim) {
8
+ return std::max<int64_t>(dim, 1);
9
+ }
10
+
11
+ inline int64_t ensure_nonempty_size(const TensorBase &t, int64_t dim) {
12
+ return t.dim() == 0 ? 1 : t.size(dim);
13
+ }
14
+
15
+ inline int64_t ensure_nonempty_stride(const TensorBase &t, int64_t dim) {
16
+ return t.dim() == 0 ? 1 : t.stride(dim);
17
+ }
18
+
19
+ using IdxVec = std::vector<int64_t>;
20
+ inline IdxVec ensure_nonempty_vec(IdxVec vec) {
21
+ if (vec.empty()) {
22
+ vec.push_back(1);
23
+ }
24
+ return vec;
25
+ }
26
+
27
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace c10 {
6
+ class Scalar;
7
+ }
8
+
9
+ namespace at {
10
+
11
+ struct TensorIterator;
12
+ struct TensorIteratorBase;
13
+
14
+ namespace native {
15
+
16
+ #if defined(__CUDACC__) || defined(__HIPCC__)
17
+ #define HOST_DEVICE __host__ __device__
18
+ #else
19
+ #define HOST_DEVICE
20
+ #endif
21
+
22
+ // integral power in pytorch allows for negative exponents, giving truncated integral results.
23
+ // e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the
24
+ // only non-zero result.
25
+ template <class T,
26
+ typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
27
+ static inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) {
28
+ T result = 1;
29
+ while (b) {
30
+ if (b & 1) {
31
+ result *= a;
32
+ }
33
+ b /= 2;
34
+ a *= a;
35
+ }
36
+ return result;
37
+ }
38
+
39
+ template <class T,
40
+ typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, T>::type* = nullptr>
41
+ static inline HOST_DEVICE T powi(T a, T b) {
42
+ return powi_impl(a, b);
43
+ }
44
+
45
+ template <class T,
46
+ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, T>::type* = nullptr>
47
+ static inline HOST_DEVICE T powi(T a, T b) {
48
+ if ( b < 0 ) {
49
+ if ( a == 1 ) {
50
+ return 1;
51
+ } else if ( a == -1 ) {
52
+ auto negative = (-b) % static_cast<T>(2);
53
+ return negative ? -1 : 1;
54
+ } else {
55
+ return 0;
56
+ }
57
+ }
58
+ return powi_impl(a, b);
59
+ }
60
+
61
+ using pow_tensor_tensor_fn = void (*)(TensorIteratorBase&);
62
+ using pow_tensor_scalar_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
63
+
64
+ DECLARE_DISPATCH(pow_tensor_tensor_fn, pow_tensor_tensor_stub);
65
+ DECLARE_DISPATCH(pow_tensor_scalar_fn, pow_tensor_scalar_stub);
66
+
67
+ } // namespace native
68
+
69
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <limits>
4
+ #include <ATen/core/Tensor.h>
5
+ #include <ATen/native/Resize.h>
6
+ #include <ATen/native/TensorIterator.h>
7
+ #include <ATen/native/NonEmptyUtils.h>
8
+ #include <ATen/WrapDimUtilsMulti.h>
9
+ #include <c10/core/ScalarType.h>
10
+ #include <c10/util/irange.h>
11
+
12
+ #ifndef AT_PER_OPERATOR_HEADERS
13
+ #include <ATen/Functions.h>
14
+ #else
15
+ #include <ATen/ops/empty.h>
16
+ #include <ATen/ops/scalar_tensor.h>
17
+ #endif
18
+
19
+ namespace at::native {
20
+
21
+ // Maximum and minimum possible scalar values, including infinities
22
+ template <typename scalar_t>
23
+ constexpr scalar_t upper_bound() {
24
+ using lim = std::numeric_limits<scalar_t>;
25
+ return lim::has_infinity ? lim::infinity() : lim::max();
26
+ }
27
+
28
+ template <typename scalar_t>
29
+ constexpr scalar_t lower_bound() {
30
+ using lim = std::numeric_limits<scalar_t>;
31
+ return lim::has_infinity ? -lim::infinity() : lim::lowest();
32
+ }
33
+
34
+ static inline Tensor restride_dim(
35
+ const Tensor& src, int64_t dim,
36
+ IntArrayRef replacement_shape
37
+ ) {
38
+ auto strides = ensure_nonempty_vec(src.strides().vec());
39
+ strides[dim] = 0;
40
+ return src.as_strided(replacement_shape, strides);
41
+ }
42
+
43
+ inline void _dimreduce_setup(const Tensor &result, const Tensor &self,
44
+ int64_t dim) {
45
+ IntArrayRef self_sizes = self.sizes();
46
+ std::vector<int64_t> result_sizes;
47
+ result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end());
48
+ result_sizes[dim] = 1;
49
+ result.resize_(result_sizes);
50
+ }
51
+
52
+ inline bool _dimreduce_return_trivial(const Tensor &result, const Tensor &self,
53
+ const Scalar& ident, int64_t dim, bool keepdim) {
54
+ if (self.numel() == 1 && self.ndimension() == 0) {
55
+ result.resize_({});
56
+ result.fill_(self);
57
+ return true;
58
+ }
59
+ // Return identity
60
+ if (self.numel() == 0) {
61
+ _dimreduce_setup(result, self, dim);
62
+ result.fill_(ident);
63
+ if (!keepdim) result.squeeze_(dim);
64
+ return true;
65
+ }
66
+ return false;
67
+ }
68
+
69
+ inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self,
70
+ int64_t /*dim*/, bool /*keepdim*/, const char* /*fn_name*/) {
71
+ if (self.numel() == 1 && self.ndimension() == 0) {
72
+ result.resize_({});
73
+ result.fill_(self);
74
+ return true;
75
+ }
76
+
77
+ return false;
78
+ }
79
+
80
+ inline c10::optional<Tensor> _allreduce_return_trivial(
81
+ const Tensor& self,
82
+ const Scalar& ident) {
83
+ // Return identity
84
+ if (self.numel() == 0) {
85
+ return at::scalar_tensor(ident, self.options());
86
+ }
87
+ return c10::nullopt;
88
+ }
89
+
90
+ #define OPTION_TYPE_EQUALITY_CHECK(option, out, self) \
91
+ { \
92
+ TORCH_CHECK(\
93
+ out.option() == self.option(),\
94
+ "expected ", #option, " ",\
95
+ self.option(),\
96
+ " but found ", out.option())\
97
+ }
98
+
99
+ static inline void check_scalar_type_device_layout_equal(const Tensor& out, const Tensor& self) {
100
+ OPTION_TYPE_EQUALITY_CHECK(scalar_type, out, self);
101
+ OPTION_TYPE_EQUALITY_CHECK(device, out.options(), self.options());
102
+ OPTION_TYPE_EQUALITY_CHECK(layout, out.options(), self.options());
103
+ }
104
+
105
+ static inline Tensor integer_upcast(const Tensor& self, c10::optional<ScalarType> dtype) {
106
+ ScalarType scalarType = self.scalar_type();
107
+ TORCH_CHECK(!isBarebonesUnsignedType(scalarType), "integer upcasting for uint16, uint32 and uint64 is not currently implemented");
108
+ ScalarType upcast_scalarType = dtype.value_or(at::isIntegralType(scalarType, /*includeBool=*/true) ? ScalarType::Long : scalarType);
109
+ return self.toType(upcast_scalarType);
110
+ }
111
+
112
+ using DimMask = TensorIterator::DimMask;
113
+
114
+ static DimVector make_dim_vector(OptionalIntArrayRef opt_dims, int64_t ndim) {
115
+ if (opt_dims.has_value()) {
116
+ return DimVector(opt_dims.value());
117
+ } else {
118
+ std::vector<int64_t> all_dims(ndim);
119
+ std::iota(all_dims.begin(), all_dims.end(), 0);
120
+ return DimVector(all_dims);
121
+ }
122
+ }
123
+
124
+ static DimMask make_dim_mask(OptionalIntArrayRef opt_dims, int64_t ndim, bool allow_empty_dims=false) {
125
+ DimMask mask;
126
+ if (opt_dims.has_value()) {
127
+ auto dims = opt_dims.value();
128
+ if (dims.empty() && !allow_empty_dims) {
129
+ mask = DimMask().flip();
130
+ } else {
131
+ mask = at::dim_list_to_bitset(dims, ndim);
132
+ }
133
+ } else {
134
+ mask = DimMask().flip();
135
+ }
136
+ return mask;
137
+ }
138
+
139
+ inline DimVector shape_from_dim_mask(const Tensor& self, DimMask mask, bool keepdim) {
140
+ auto shape = DimVector(self.sizes());
141
+ for (int dim = shape.size() - 1; dim >= 0; dim--) {
142
+ if (mask[dim]) {
143
+ if (keepdim) {
144
+ shape[dim] = 1;
145
+ } else {
146
+ shape.erase(shape.begin() + dim);
147
+ }
148
+ }
149
+ }
150
+ return shape;
151
+ }
152
+
153
+ static void resize_reduction_result(
154
+ Tensor& result, const Tensor& self, DimMask mask, bool keepdim,
155
+ ScalarType /*dtype*/)
156
+ {
157
+ auto shape = shape_from_dim_mask(self, mask, keepdim);
158
+ TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor.");
159
+ at::native::resize_output(result, shape);
160
+ }
161
+
162
+ inline Tensor create_reduction_result(
163
+ const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype
164
+ ) {
165
+ DimMask mask = make_dim_mask(dim, self.dim());
166
+ auto shape = shape_from_dim_mask(self, mask, keepdim);
167
+ return at::empty(shape, self.options().dtype(dtype));
168
+ }
169
+
170
+ static Tensor review_reduce_result(const Tensor& result, int ndim, DimMask mask, bool keepdim) {
171
+ if (keepdim) {
172
+ return result;
173
+ }
174
+ auto shape = DimVector(result.sizes());
175
+ auto stride = DimVector(result.strides());
176
+ for (const auto dim : c10::irange(ndim)) {
177
+ if (mask[dim]) {
178
+ shape.insert(shape.begin() + dim, 1);
179
+ stride.insert(stride.begin() + dim, 0);
180
+ }
181
+ }
182
+ return result.as_strided(shape, stride);
183
+ }
184
+
185
+ static TensorIterator make_reduction(
186
+ const char* name, Tensor& result, const Tensor& self,
187
+ at::OptionalIntArrayRef dim_opt,
188
+ bool keepdim, ScalarType in_dtype, ScalarType out_dtype) {
189
+ // check that result type and dtype match if provided
190
+ TORCH_CHECK(
191
+ !result.defined() || result.scalar_type() == out_dtype,
192
+ name, ": provided dtype must match dtype of result. Got ",
193
+ toString(result.scalar_type()),
194
+ " and ",
195
+ toString(out_dtype),
196
+ ".");
197
+ // dim={} performs an all-reduce, same as dim=None
198
+ IntArrayRef dim = dim_opt.value_or(IntArrayRef{});
199
+ int64_t ndim = self.dim();
200
+ auto mask = make_dim_mask(dim, ndim);
201
+ resize_reduction_result(result, self, mask, keepdim, out_dtype);
202
+ auto viewed_result = review_reduce_result(result, ndim, mask, keepdim);
203
+ namedinference::propagate_names_for_reduction(result, self, dim, keepdim);
204
+ if (self.scalar_type() == in_dtype) {
205
+ return TensorIterator::reduce_op(viewed_result, self);
206
+ }
207
+ return TensorIterator::reduce_op(viewed_result, self.to(in_dtype));
208
+ }
209
+
210
+ static C10_UNUSED TensorIterator make_reduction(
211
+ const char* name, Tensor& result, const Tensor& self,
212
+ at::OptionalIntArrayRef dim, bool keepdim, ScalarType out_dtype) {
213
+ // special case for type promotion in mixed precision, improves computational
214
+ // efficiency.
215
+ // not generalize this to common mismatched input/output types to avoid cross
216
+ // product of templated kernel launches.
217
+ const bool gpu_lowp_to_f32 = (
218
+ self.is_cuda() && (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && out_dtype == kFloat);
219
+ auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type()
220
+ : self.is_complex() ? c10::toComplexType(out_dtype)
221
+ : out_dtype;
222
+ return make_reduction(name, result, self, dim, keepdim, in_dtype, out_dtype);
223
+ }
224
+
225
+ static TensorIterator make_reduction(
226
+ const char* name, Tensor& result1, Tensor& result2, const Tensor& self,
227
+ at::OptionalIntArrayRef dim_opt, bool keepdim, ScalarType dtype1,
228
+ ScalarType dtype2) {
229
+ // check that result type and dtype match if provided
230
+ TORCH_CHECK(
231
+ (!result1.defined() || result1.scalar_type() == dtype1) && (!result2.defined() || result2.scalar_type() == dtype2),
232
+ name, ": provided dtype must match dtype of result. Got ",
233
+ toString(result1.scalar_type()), toString(result2.scalar_type()),
234
+ " and ",
235
+ toString(dtype1), toString(dtype2),
236
+ ".");
237
+
238
+ // dim={} performs an all-reduce, same as dim=None
239
+ auto dim = dim_opt.value_or(IntArrayRef{});
240
+ int64_t ndim = self.dim();
241
+ DimMask mask = make_dim_mask(dim, ndim);
242
+ resize_reduction_result(result1, self, mask, keepdim, dtype1);
243
+ auto viewed_result1 = review_reduce_result(result1, ndim, mask, keepdim);
244
+
245
+ resize_reduction_result(result2, self, mask, keepdim, dtype2);
246
+ auto viewed_result2 = review_reduce_result(result2, ndim, mask, keepdim);
247
+
248
+ namedinference::propagate_names_for_reduction(result1, self, dim, keepdim);
249
+ namedinference::propagate_names_for_reduction(result2, self, dim, keepdim);
250
+
251
+ // special case for type promotion in mixed precision, improves computational
252
+ // efficiency.
253
+ // We don't generalize this to common mismatched input/output types to avoid cross
254
+ // product of templated kernel launches.
255
+ if (self.scalar_type() == dtype1 ||
256
+ (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) {
257
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self);
258
+ }
259
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1));
260
+ }
261
+
262
+ static C10_UNUSED TensorIterator make_reduction(
263
+ const char* name, Tensor& result1, Tensor& result2, const Tensor& self,
264
+ at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype) {
265
+ return make_reduction(name, result1, result2, self, dim, keepdim, dtype, dtype);
266
+ }
267
+
268
+ static void zero_numel_check_dims(const Tensor& self, const int64_t dim, const char *fn_name) {
269
+ if (self.ndimension() == 0) {
270
+ TORCH_CHECK_INDEX(dim == 0 || dim == -1, fn_name,
271
+ ": Expected reduction dim -1 or 0 for scalar but got ", dim);
272
+ }
273
+ else {
274
+ TORCH_CHECK_INDEX(self.size(dim) != 0, fn_name,
275
+ ": Expected reduction dim ", dim, " to have non-zero size.");
276
+ }
277
+ }
278
+
279
+ static void zero_numel_check_dims(const Tensor& self, const IntArrayRef dim, const char *fn_name) {
280
+ TORCH_CHECK(
281
+ !dim.empty(),
282
+ fn_name, ": Expected reduction dim to be specified for input.numel() == 0. ",
283
+ "Specify the reduction dim with the 'dim' argument.");
284
+ for (const int64_t d : dim) {
285
+ zero_numel_check_dims(self, d, fn_name);
286
+ }
287
+ }
288
+
289
+ static std::vector<int64_t> get_zero_numel_tensor_size(
290
+ const Tensor& self,
291
+ const int64_t dim,
292
+ const bool keepdim,
293
+ const char* fn_name) {
294
+ TORCH_INTERNAL_ASSERT(self.numel() == 0, fn_name, ": Expected self.numel() == 0.");
295
+ zero_numel_check_dims(self, dim, fn_name);
296
+ std::vector<int64_t> sizes;
297
+ if (keepdim) {
298
+ sizes = self.sizes().vec();
299
+ sizes[dim] = 1;
300
+ }
301
+ else {
302
+ for (const auto d : c10::irange(self.dim())) {
303
+ if (d != dim) {
304
+ sizes.push_back(self.sizes()[d]);
305
+ }
306
+ }
307
+ }
308
+ return sizes;
309
+ }
310
+
311
+ // Resize the result tensor and indices when result.numel() == 0 depending on values of
312
+ // dim and keepdim for returning tensors containing reduction results.
313
+ // This function should be called when you are reducing a zero-numel tensor and want to
314
+ // resize the output and return it. This function exists for resizing zero-numel
315
+ // tensors when the size of the reduction dimension is non-zero.
316
+ static C10_UNUSED void zero_numel_tensor_resize(Tensor& result, Tensor& result_indices,
317
+ const Tensor& self, const int64_t dim,
318
+ const bool keepdim, const char *fn_name) {
319
+ auto sizes = get_zero_numel_tensor_size(self, dim, keepdim, fn_name);
320
+ at::native::resize_output(result, sizes);
321
+ at::native::resize_output(result_indices, sizes);
322
+ }
323
+
324
+ inline ScalarType get_dtype_from_self(
325
+ const Tensor& self,
326
+ const c10::optional<ScalarType>& dtype,
327
+ bool promote_integers) {
328
+ if (dtype.has_value()) {
329
+ return dtype.value();
330
+ }
331
+ ScalarType src_type = self.scalar_type();
332
+ if (promote_integers && at::isIntegralType(src_type, /*includeBool=*/true)) {
333
+ return kLong;
334
+ }
335
+ return src_type;
336
+ }
337
+
338
+ inline ScalarType get_dtype_from_result(Tensor& result, c10::optional<ScalarType> dtype) {
339
+ TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor.");
340
+ if (dtype.has_value()) {
341
+ return dtype.value();
342
+ } else {
343
+ return result.scalar_type();
344
+ }
345
+ }
346
+
347
+
348
+ } // namespace at::native
349
+
350
+ namespace at::meta {
351
+
352
+ static C10_UNUSED DimVector get_reduction_shape(
353
+ const Tensor& self,
354
+ IntArrayRef dims,
355
+ bool keepdim,
356
+ bool allow_empty_dims=false) {
357
+ auto mask = native::make_dim_mask(dims, self.dim(), allow_empty_dims);
358
+ return native::shape_from_dim_mask(self, mask, keepdim);
359
+ }
360
+
361
+ static void resize_reduction(
362
+ impl::MetaBase& meta,
363
+ const Tensor& self,
364
+ OptionalIntArrayRef opt_dims,
365
+ bool keepdim,
366
+ ScalarType out_dtype,
367
+ bool allow_empty_dims=false) {
368
+ DimVector dims_ = at::native::make_dim_vector(opt_dims, self.dim());
369
+ maybe_wrap_dims(dims_, self.dim());
370
+ auto shape = get_reduction_shape(self, dims_, keepdim, allow_empty_dims);
371
+ meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype));
372
+ namedinference::propagate_names_for_reduction(
373
+ meta.maybe_get_output(), self, dims_, keepdim);
374
+ }
375
+
376
+ static void resize_reduction_with_indices(
377
+ impl::MetaBase& meta,
378
+ const Tensor& self,
379
+ IntArrayRef dims,
380
+ bool keepdim,
381
+ ScalarType out_dtype) {
382
+ DimVector dims_(dims);
383
+ maybe_wrap_dims(dims_, self.dim());
384
+ auto shape = get_reduction_shape(self, dims_, keepdim);
385
+ meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype));
386
+ meta.set_output_raw_strided(1, shape, {}, self.options().dtype(kLong));
387
+ namedinference::propagate_names_for_reduction(
388
+ meta.maybe_get_output(0), self, dims_, keepdim);
389
+ namedinference::propagate_names_for_reduction(
390
+ meta.maybe_get_output(1), self, dims_, keepdim);
391
+ }
392
+
393
+ static TensorIterator make_reduction(
394
+ const Tensor& self,
395
+ const Tensor& result,
396
+ OptionalIntArrayRef opt_dims,
397
+ bool keepdim,
398
+ ScalarType in_dtype) {
399
+ int64_t ndim = self.dim();
400
+ auto mask = at::native::make_dim_mask(opt_dims, ndim);
401
+ auto viewed_result =
402
+ at::native::review_reduce_result(result, ndim, mask, keepdim);
403
+ if (self.scalar_type() == in_dtype) {
404
+ return TensorIterator::reduce_op(viewed_result, self);
405
+ }
406
+ return TensorIterator::reduce_op(viewed_result, self.to(in_dtype));
407
+ }
408
+
409
+ static TensorIterator make_reduction(
410
+ const Tensor& self,
411
+ const Tensor& result1,
412
+ const Tensor& result2,
413
+ IntArrayRef dims,
414
+ bool keepdim,
415
+ ScalarType dtype1,
416
+ ScalarType /*dtype2*/) {
417
+ int64_t ndim = self.dim();
418
+ auto mask = at::native::make_dim_mask(dims, ndim);
419
+ auto viewed_result1 = at::native::review_reduce_result(result1, ndim, mask, keepdim);
420
+ auto viewed_result2 = at::native::review_reduce_result(result2, ndim, mask, keepdim);
421
+ // special case for type promotion in mixed precision, improves computational efficiency.
422
+ // We don't generalize this to common mismatched input/output types to avoid cross product
423
+ // of templated kernel launches.
424
+ if (self.scalar_type() == dtype1 ||
425
+ (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) {
426
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self);
427
+ }
428
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1));
429
+ }
430
+
431
+ static C10_UNUSED TensorIterator make_reduction_from_out_ty(
432
+ const Tensor& self,
433
+ const Tensor& result,
434
+ OptionalIntArrayRef opt_dims,
435
+ bool keepdim,
436
+ ScalarType out_dtype) {
437
+ // special case for type promotion in mixed precision, improves computational
438
+ // efficiency.
439
+ // not generalize this to common mismatched input/output types to avoid cross
440
+ // product of templated kernel launches.
441
+ const bool gpu_lowp_to_f32 =
442
+ (self.is_cuda() &&
443
+ (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) &&
444
+ out_dtype == kFloat);
445
+ auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() : out_dtype;
446
+ return make_reduction(self, result, opt_dims, keepdim, in_dtype);
447
+ }
448
+
449
+ } // namespace at::meta
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <cstdint>
5
+
6
+ namespace at {
7
+ class TensorBase;
8
+ }
9
+
10
+ namespace at::native {
11
+
12
+ enum class QUANTILE_INTERPOLATION_MODE : uint8_t {
13
+ LINEAR,
14
+ LOWER,
15
+ HIGHER,
16
+ MIDPOINT,
17
+ NEAREST
18
+ };
19
+
20
+ using sort_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, bool, bool);
21
+ using topk_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, int64_t, bool, bool);
22
+
23
+ DECLARE_DISPATCH(sort_fn, sort_stub);
24
+ DECLARE_DISPATCH(topk_fn, topk_stub);
25
+
26
+ void _fill_indices(const TensorBase &indices, int64_t dim);
27
+
28
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/AtomicAddFloat.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef ATOMIC_ADD_FLOAT
2
+ #define ATOMIC_ADD_FLOAT
3
+
4
+ #if (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))
5
+ #include <ATen/native/cpu/Intrinsics.h>
6
+ #else
7
+ #define _mm_pause()
8
+ #endif
9
+
10
+ #include <atomic>
11
+
12
+ static inline void cpu_atomic_add_float(float* dst, float fvalue)
13
+ {
14
+ typedef union {
15
+ unsigned intV;
16
+ float floatV;
17
+ } uf32_t;
18
+
19
+ uf32_t new_value, old_value;
20
+ std::atomic<unsigned>* dst_intV = (std::atomic<unsigned>*)(dst);
21
+
22
+ old_value.floatV = *dst;
23
+ new_value.floatV = old_value.floatV + fvalue;
24
+
25
+ unsigned* old_intV = (unsigned*)(&old_value.intV);
26
+ while (!std::atomic_compare_exchange_strong(dst_intV, old_intV, new_value.intV)) {
27
+ #ifdef __aarch64__
28
+ __asm__ __volatile__("yield;" : : : "memory");
29
+ #else
30
+ _mm_pause();
31
+ #endif
32
+ old_value.floatV = *dst;
33
+ new_value.floatV = old_value.floatV + fvalue;
34
+ }
35
+ }
36
+
37
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/core/IListRef.h>
6
+
7
+ namespace at { namespace native {
8
+
9
+ using cat_serial_fn = void(*)(const Tensor &, const MaterializedITensorListRef&, int64_t);
10
+ DECLARE_DISPATCH(cat_serial_fn, cat_serial_stub);
11
+
12
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CopyKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ struct TensorIteratorBase;
5
+
6
+ namespace native {
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ void direct_copy_kernel(TensorIteratorBase &iter);
10
+ void copy_kernel(TensorIterator& iter, bool /*non_blocking*/);
11
+
12
+ }}} // namespace at::native::CPU_CAPABILITY
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/DepthwiseConvKernel.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/ArrayRef.h>
5
+
6
+ /*
7
+ Depthwise 3x3 Winograd convolution operator
8
+ */
9
+
10
+ namespace at {
11
+ class Tensor;
12
+
13
+ namespace native {
14
+
15
+ using convolution_depthwise3x3_winograd_fn =
16
+ Tensor (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t);
17
+
18
+ DECLARE_DISPATCH(convolution_depthwise3x3_winograd_fn, convolution_depthwise3x3_winograd_stub);
19
+
20
+ } // namespace native
21
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/GridSamplerKernel.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ #include <array>
6
+ #include <cstdint>
7
+
8
+ namespace at {
9
+ class TensorBase;
10
+ }
11
+
12
+ namespace at { namespace native {
13
+
14
+ using forward_2d_fn = void (*) (
15
+ const TensorBase &output,
16
+ const TensorBase &input,
17
+ const TensorBase &grid,
18
+ int64_t interpolation_mode,
19
+ int64_t padding_mode,
20
+ bool align_corners);
21
+ using backward_2d_fn = void (*) (
22
+ const TensorBase &grad_input,
23
+ const TensorBase &grad_grid,
24
+ const TensorBase &grad_output,
25
+ const TensorBase &input,
26
+ const TensorBase &grid,
27
+ int64_t interpolation_mode,
28
+ int64_t padding_mode,
29
+ bool align_corners,
30
+ std::array<bool, 2> output_mask);
31
+ DECLARE_DISPATCH(forward_2d_fn, grid_sampler_2d_cpu_kernel);
32
+ DECLARE_DISPATCH(backward_2d_fn, grid_sampler_2d_backward_cpu_kernel);
33
+
34
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Intrinsics.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
4
+ /* Clang-compatible compiler, targeting x86/x86-64 */
5
+ #include <x86intrin.h>
6
+ #elif defined(_MSC_VER)
7
+ /* Microsoft C/C++-compatible compiler */
8
+ #include <intrin.h>
9
+ #if _MSC_VER <= 1900
10
+ #define _mm256_extract_epi64(X, Y) (((uint64_t*)&X)[Y])
11
+ #endif
12
+ #elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
13
+ /* GCC-compatible compiler, targeting x86/x86-64 */
14
+ #include <x86intrin.h>
15
+ #elif defined(__GNUC__) && defined(__ARM_NEON__)
16
+ /* GCC-compatible compiler, targeting ARM with NEON */
17
+ #include <arm_neon.h>
18
+ #elif defined(__GNUC__) && defined(__IWMMXT__)
19
+ /* GCC-compatible compiler, targeting ARM with WMMX */
20
+ #include <mmintrin.h>
21
+ #elif (defined(__GNUC__) || defined(__xlC__)) && \
22
+ (defined(__VEC__) || defined(__ALTIVEC__))
23
+ /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
24
+ #include <altivec.h>
25
+ /* We need to undef those tokens defined by <altivec.h> to avoid conflicts
26
+ with the C++ types. => Can still use __bool/__vector */
27
+ #undef bool
28
+ #undef vector
29
+ #undef pixel
30
+ #elif defined(__GNUC__) && defined(__SPE__)
31
+ /* GCC-compatible compiler, targeting PowerPC with SPE */
32
+ #include <spe.h>
33
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IsContiguous.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at { namespace native { inline namespace CPU_CAPABILITY {
4
+
5
+ // n: number of function arguments (arity)
6
+ // traits: function_traits (see FunctionTraits.h)
7
+ // s: index of scalar argument or -1
8
+ template <int n, int stride_index, typename traits, int s=-1>
9
+ struct IsContiguous {
10
+ static bool eval(const int64_t* strides) {
11
+ using type = typename traits::template arg<n - 1>::type;
12
+ return strides[stride_index] == (s == n ? 0 : sizeof(type)) &&
13
+ IsContiguous<n - 1, stride_index - 1, traits, s>::eval(strides);
14
+ }
15
+ };
16
+
17
+ // will be called when there is an output exists
18
+ template <typename traits, int s>
19
+ struct IsContiguous<0, 0, traits, s> {
20
+ static bool eval(const int64_t* strides) {
21
+ return strides[0] == sizeof(typename traits::result_type);
22
+ }
23
+ };
24
+
25
+ // will be called when there is no output
26
+ template <typename traits, int s>
27
+ struct IsContiguous<0, -1, traits, s> {
28
+ static bool eval(const int64_t* /*strides*/) {
29
+ return true;
30
+ }
31
+ };
32
+
33
+ // output and all inputs are contiguous
34
+ template <typename traits,
35
+ typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
36
+ static inline bool is_contiguous(const int64_t* strides) {
37
+ return IsContiguous<traits::arity, traits::arity - 1, traits>::eval(strides);
38
+ }
39
+
40
+ template <typename traits,
41
+ typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
42
+ static inline bool is_contiguous(const int64_t* strides) {
43
+ return IsContiguous<traits::arity, traits::arity, traits>::eval(strides);
44
+ }
45
+
46
+ // input at `s` is scalar (stride 0); output and other inputs are contiguous
47
+ // NB: output is typically at strides[0] so first input corresponds to s=1
48
+ template <typename traits, int s,
49
+ typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
50
+ static inline bool is_contiguous_scalar(const int64_t* strides) {
51
+ static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
52
+ return IsContiguous<traits::arity, traits::arity - 1, traits, s>::eval(strides);
53
+ }
54
+
55
+ template <typename traits, int s,
56
+ typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
57
+ static inline bool is_contiguous_scalar(const int64_t* strides) {
58
+ static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
59
+ return IsContiguous<traits::arity, traits::arity, traits, s>::eval(strides);
60
+ }
61
+
62
+ }}}
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/LogAddExp.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/complex.h>
4
+ #include <ATen/NumericUtils.h>
5
+
6
+ namespace at { namespace native {
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ // custom min and max to be used in logcumsumexp for complex arguments
10
+ template <typename scalar_t>
11
+ std::pair<c10::complex<scalar_t>, c10::complex<scalar_t>> _logcumsumexp_minmax(c10::complex<scalar_t> x, c10::complex<scalar_t> y) {
12
+ if (at::_isnan(y)) { // either real is nan or imag is nan
13
+ return std::make_pair(y, y);
14
+ } else if (at::_isnan(x)) { // either real is nan or imag is nan
15
+ return std::make_pair(x, x);
16
+ } else {
17
+ return (x.real() < y.real()) ? std::make_pair(x, y) : std::make_pair(y, x);
18
+ }
19
+ }
20
+
21
+ template <typename scalar_t>
22
+ scalar_t _log_add_exp_helper(scalar_t x, scalar_t y) {
23
+ // Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
24
+ scalar_t min = at::_isnan(y) ? y : std::min(x, y); // std::min returns first arg if one of the args is nan
25
+ scalar_t max = at::_isnan(y) ? y : std::max(x, y); // std::max returns first arg if one of the args is nan
26
+ if (min != max || std::isfinite(min)) {
27
+ // nan will be propagated here
28
+ return std::log1p(std::exp(min - max)) + max;
29
+ } else {
30
+ // special case to correctly handle infinite cases
31
+ return x;
32
+ }
33
+ }
34
+
35
+ template <typename scalar_t>
36
+ c10::complex<scalar_t> _log_add_exp_helper(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
37
+ auto [min, max] = _logcumsumexp_minmax<scalar_t>(x, y);
38
+ auto min_real = std::real(min);
39
+ auto max_real = std::real(max);
40
+
41
+ if (at::_isnan(min)) { // either real is nan or imag is nan
42
+ // handling the "infectious" NaNs
43
+ return {std::numeric_limits<scalar_t>::quiet_NaN(), std::numeric_limits<scalar_t>::quiet_NaN()};
44
+ } else if (!std::isfinite(min_real) && (min_real == max_real)) {
45
+ if (min_real < 0) {
46
+ // handle the -inf case, the imaginary part here does not really matter as the exp(value)
47
+ // will be around 0.0 and the angle (i.e. the imaginary part) cannot be determined.
48
+ // It does not matter if we're taking the exp of this value
49
+ return min;
50
+ } else {
51
+ // handle the +inf case, we don't need the special precision for log1p for small values
52
+ // and to avoid producing nan in case of real(max) == real(min) == +inf
53
+ return std::log(std::exp(min) + std::exp(max));
54
+ }
55
+ } else {
56
+ return std::log1p(std::exp(min - max)) + max;
57
+ }
58
+ }
59
+
60
+ } // end namespace
61
+ }} //end at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/Loops.h ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides two functions to help write elementwise kernels:
4
+ //
5
+ // cpu_kernel(TensorIterator iter, <lambda>)
6
+ // cpu_kernel_vec(TensorIterator iter, <lambda>, <vec_lambda>)
7
+ //
8
+ // Both functions may generate vectorized code. The cpu_kernel implementation
9
+ // relies on the compiler's auto-vectorization. The cpu_kernel_vec
10
+ // implementation uses x86 SIMD intrinsics when available. These functions
11
+ // are only intended to be used in the ATen/native/cpu subdirectory, since files
12
+ // in other directories are not compiled with AVX/AVX2 enabled. See README.md
13
+ // for more details.
14
+ //
15
+ // For example, to write a multiplication kernel for float:
16
+ //
17
+ // cpu_kernel(iter, [](float a, float b) { return a * b; });
18
+ //
19
+ // Or you may write:
20
+ //
21
+ // cpu_kernel_vec(iter,
22
+ // [](float a, float b) { return a * b; },
23
+ // [](Vectorized<float> a, Vectorized<float> b) { return a * b; });
24
+ //
25
+ // See BinaryOpsKernel.cpp for the complete implementation
26
+ //
27
+ //
28
+
29
+ #include <stdint.h>
30
+ #include <c10/util/C++17.h>
31
+ #include <c10/util/Load.h>
32
+ #include <c10/util/irange.h>
33
+ #include <ATen/detail/FunctionTraits.h>
34
+ #include <ATen/native/cpu/IsContiguous.h>
35
+ #include <ATen/native/TensorIterator.h>
36
+ #include <ATen/native/TensorIteratorDynamicCasting.h>
37
+ #include <ATen/cpu/vec/vec.h>
38
+
39
+ #include <utility>
40
+
41
+ namespace at { namespace native { inline namespace CPU_CAPABILITY {
42
+
43
+ using namespace vec;
44
+
45
+ template <typename traits, std::size_t... INDEX>
46
+ typename traits::ArgsTuple
47
+ dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i,
48
+ std::index_sequence<INDEX...>) {
49
+ return std::make_tuple(
50
+ c10::load<typename traits::template arg<INDEX>::type>(
51
+ data[INDEX] + i * strides[INDEX])...);
52
+ }
53
+
54
+ template <typename traits>
55
+ typename traits::ArgsTuple
56
+ dereference(char* C10_RESTRICT data[], const int64_t* strides, int64_t i) {
57
+ using Indices = std::make_index_sequence<traits::arity>;
58
+ return dereference_impl<traits>(data, strides, i, Indices{});
59
+ }
60
+
61
+ template <typename traits, std::size_t... INDEX>
62
+ typename traits::ArgsTuple
63
+ dereference_vec_impl(char* C10_RESTRICT data[],
64
+ const typename traits::result_type& opt_scalar,
65
+ size_t S,
66
+ int64_t i,
67
+ std::index_sequence<INDEX...>) {
68
+ using Vec = typename traits::result_type;
69
+ using scalar_t = typename Vec::value_type;
70
+ return std::make_tuple(
71
+ S == INDEX + 1 ?
72
+ opt_scalar :
73
+ Vec::loadu(data[INDEX] + i * sizeof(scalar_t))...);
74
+ }
75
+
76
+ template <typename traits>
77
+ typename traits::ArgsTuple
78
+ dereference_vec(char* C10_RESTRICT data[], const typename traits::result_type& opt_scalar, size_t S, int64_t i) {
79
+ using Indices = std::make_index_sequence<traits::arity>;
80
+ return dereference_vec_impl<traits>(data, opt_scalar, S, i, Indices{});
81
+ }
82
+
83
+ template <typename func_t,
84
+ typename std::enable_if<!std::is_void<typename function_traits<func_t>::result_type>::value>::type* = nullptr>
85
+ static inline void
86
+ execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) {
87
+ using traits = function_traits<func_t>;
88
+ using result_type = typename traits::result_type;
89
+ for (; i < n; i++) {
90
+ result_type* out_ptr = (result_type*)(data[0] + i * strides[0]);
91
+ *out_ptr = c10::guts::apply(std::forward<func_t>(op), dereference<traits>(
92
+ &data[1],
93
+ &strides[1],
94
+ i));
95
+ }
96
+ }
97
+
98
+ template <typename func_t,
99
+ typename std::enable_if<std::is_void<typename function_traits<func_t>::result_type>::value>::type* = nullptr>
100
+ static inline void
101
+ execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) {
102
+ using traits = function_traits<func_t>;
103
+ for (; i < n; i++) {
104
+ c10::guts::apply(std::forward<func_t>(op), dereference<traits>(
105
+ &data[0],
106
+ &strides[0],
107
+ i));
108
+ }
109
+ }
110
+
111
+ // Basic loop operation (one output, N inputs). May be auto-vectorized
112
+ // by the compiler. Supports inputs and outputs of different types.
113
+ template <typename func_t>
114
+ static inline void
115
+ basic_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) {
116
+ using traits = function_traits<func_t>;
117
+ constexpr int ntensors = traits::arity + 1;
118
+
119
+ // Copying strides to temporary array helps auto vectorization in older GCC
120
+ // versions.
121
+ int64_t strides[ntensors];
122
+ for (const auto arg : c10::irange(ntensors)) {
123
+ strides[arg] = strides_[arg];
124
+ }
125
+
126
+ execute_op(data, strides, i, n, std::forward<func_t>(op));
127
+ }
128
+
129
+ // the recursive variadic template for iterating over the returned tuple
130
+ template<class T, size_t N>
131
+ struct TupleOutput {
132
+ static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i,
133
+ const T &tuple) {
134
+ TupleOutput<T, N - 1>::handle(data, strides, i, tuple);
135
+
136
+ auto output = std::get<N - 1>(tuple);
137
+ using output_type = decltype(output);
138
+ output_type * out_ptr = (output_type *)(data[N - 1] + i * strides[N - 1]);
139
+ *out_ptr = output;
140
+ }
141
+ };
142
+
143
+ // Base case for the above recursive template
144
+ template<class T>
145
+ struct TupleOutput<T, 1> {
146
+ static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i,
147
+ const T &tuple) {
148
+ auto output = std::get<0>(tuple);
149
+ using output_type = decltype(output);
150
+ output_type* out_ptr = (output_type *)(data[0] + i * strides[0]);
151
+ *out_ptr = output;
152
+ }
153
+ };
154
+
155
+ template<class... Args>
156
+ void handle_tuple_outputs(char* C10_RESTRICT data[],
157
+ const int64_t* strides,
158
+ int64_t i,
159
+ const std::tuple<Args...> &tuple) {
160
+ TupleOutput<decltype(tuple), sizeof...(Args)>::handle(data, strides, i, tuple);
161
+ }
162
+
163
+ // Loop operation for `cpu_kernel_multiple_outputs`.
164
+ // 1. Use `c10::guts::apply` to make dynamic method invocation
165
+ // for the lambda passed in `cpu_kernel_multiple_outputs`.
166
+ // 2. Iterate over the members of the returned tuple, set the corresponding
167
+ // output tensor by the tuple member in `handle_tuple_outputs` function.
168
+ template <typename func_t>
169
+ static inline void
170
+ multiple_outputs_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) {
171
+ using traits = function_traits<func_t>;
172
+
173
+ using result_type = typename traits::result_type;
174
+ constexpr int num_outputs = std::tuple_size<result_type>::value;
175
+ constexpr int ntensors = traits::arity + num_outputs;
176
+
177
+ // Copying strides to temporary array helps auto vectorization in older GCC
178
+ // versions.
179
+ int64_t strides[ntensors];
180
+ for (const auto arg : c10::irange(ntensors)) {
181
+ strides[arg] = strides_[arg];
182
+ }
183
+
184
+ for (; i < n; i++) {
185
+ auto output = c10::guts::apply(op, dereference<traits>(
186
+ &data[num_outputs],
187
+ &strides[num_outputs],
188
+ i));
189
+ handle_tuple_outputs(data, strides, i, output);
190
+ }
191
+ }
192
+
193
+ // Explicitly vectorized loop implementation. All inputs and outputs must be
194
+ // the same type and contiguous with one exception: a single input may be
195
+ // a scalar (stride 0). It's position is indicated by the argument `S`. If `S`
196
+ // is 0, then there are no scalar inputs.
197
+ template <typename func_t, typename vec_func_t>
198
+ static inline void
199
+ vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, vec_func_t&& vop) {
200
+ using traits = function_traits<vec_func_t>;
201
+ using scalar_t = typename function_traits<func_t>::result_type;
202
+ using Vec = Vectorized<scalar_t>;
203
+ constexpr int ntensors = traits::arity + 1;
204
+
205
+ char* C10_RESTRICT data[ntensors];
206
+ for (const auto arg : c10::irange(ntensors)) {
207
+ data[arg] = data_[arg];
208
+ }
209
+
210
+ Vec opt_scalar = Vec(S > 0 ? *(scalar_t*)data[S] : scalar_t(0));
211
+ int64_t i = 0;
212
+ for (; i <= n - 2 * Vec::size(); i += 2 * Vec::size()) {
213
+ auto args1 = dereference_vec<traits>(&data[1], opt_scalar, S, i);
214
+ auto args2 = dereference_vec<traits>(&data[1], opt_scalar, S, i + Vec::size());
215
+ auto out1 = c10::guts::apply(std::forward<vec_func_t>(vop), std::move(args1));
216
+ auto out2 = c10::guts::apply(std::forward<vec_func_t>(vop), std::move(args2));
217
+ out1.store(data[0] + i * sizeof(scalar_t));
218
+ out2.store(data[0] + (i + Vec::size()) * sizeof(scalar_t));
219
+ }
220
+ if (i < n) {
221
+ int64_t strides[ntensors];
222
+ for (const auto arg : c10::irange(ntensors)) {
223
+ strides[arg] = (S > 0 && arg == S) ? 0 : sizeof(scalar_t);
224
+ }
225
+ basic_loop(data, strides, i, n, std::forward<func_t>(op));
226
+ }
227
+ }
228
+
229
+
230
+ template <typename traits, typename cb_t>
231
+ static inline void unroll_contiguous_scalar_checks(
232
+ const int64_t* /*strides*/,
233
+ std::index_sequence<>,
234
+ cb_t&& cb) {
235
+ cb(0);
236
+ }
237
+
238
+ template <typename traits, typename cb_t, size_t INDEX0, size_t ...INDEX>
239
+ static inline void unroll_contiguous_scalar_checks(
240
+ const int64_t* strides,
241
+ std::index_sequence<INDEX0, INDEX...>,
242
+ cb_t&& cb) {
243
+ if (is_contiguous_scalar<traits, INDEX0 + 1>(strides)) {
244
+ cb(INDEX0 + 1);
245
+ } else {
246
+ unroll_contiguous_scalar_checks<traits>(strides, std::index_sequence<INDEX...>{}, std::forward<cb_t>(cb));
247
+ }
248
+ }
249
+
250
+ template <typename op_t, typename vop_t>
251
+ struct VectorizedLoop2d {
252
+ op_t op;
253
+ vop_t vop;
254
+
255
+ using traits = function_traits<op_t>;
256
+ static constexpr int ntensors = traits::arity + 1;
257
+ using data_t = std::array<char*, ntensors>;
258
+
259
+ VectorizedLoop2d(const op_t &op, vop_t vop):
260
+ op(op), vop(std::move(vop)) {}
261
+
262
+ static void advance(data_t &data, const int64_t *outer_strides) {
263
+ for (const auto arg : c10::irange(data.size())) {
264
+ data[arg] += outer_strides[arg];
265
+ }
266
+ }
267
+
268
+ void operator()(char** base, const int64_t *strides, int64_t size0, int64_t size1) {
269
+ data_t data;
270
+ std::copy_n(base, ntensors, data.data());
271
+ const int64_t *outer_strides = &strides[ntensors];
272
+
273
+ if (is_contiguous<traits>(strides)) {
274
+ for (const auto i C10_UNUSED : c10::irange(size1)) {
275
+ vectorized_loop(data.data(), size0, 0, op, vop);
276
+ advance(data, outer_strides);
277
+ }
278
+ } else {
279
+ using Indices = std::make_index_sequence<traits::arity>;
280
+ unroll_contiguous_scalar_checks<traits>(strides, Indices{}, [&](size_t idx) {
281
+ if (idx) {
282
+ for (const auto i C10_UNUSED : c10::irange(size1)) {
283
+ vectorized_loop(data.data(), size0, idx, op, vop);
284
+ advance(data, outer_strides);
285
+ }
286
+ } else {
287
+ for (const auto i C10_UNUSED : c10::irange(size1)) {
288
+ basic_loop(data.data(), strides, 0, size0, op);
289
+ advance(data, outer_strides);
290
+ }
291
+ }
292
+ });
293
+ }
294
+ }
295
+ };
296
+
297
+ template <typename op_t, typename vop_t>
298
+ VectorizedLoop2d<op_t, vop_t> make_vectorized_loop2d(
299
+ const op_t &op, const vop_t &vop) {
300
+ return VectorizedLoop2d<op_t, vop_t>(op, vop);
301
+ }
302
+
303
+ template <typename func_t>
304
+ void cpu_kernel(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) {
305
+ using traits = function_traits<func_t>;
306
+ // this could be extended to work with void return types
307
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
308
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
309
+ // dynamic casting not currently supported on CPU
310
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
311
+
312
+ iter.for_each([&](char** data, const int64_t* strides, int64_t n) {
313
+ // basic loop can handle 1d slices with arbitrary strides, and 1d slices is all that
314
+ // iter.for_each is ever sending to the loop lambda
315
+ basic_loop(data, strides, 0, n, std::forward<func_t>(op));
316
+ }, grain_size);
317
+ iter.cast_outputs();
318
+ }
319
+
320
+ // This function helps write elementwise kernels that requires multiple outputs.
321
+ // It follows the similar structure of cpu_kernel.
322
+ // Instead of `basic_loop` function, a new `multiple_outputs_loop` function is
323
+ // manipulated to handle multiple return values.
324
+ // For now `needs_dynamic_casting` check is not added as the passed lambda (`func_t`)
325
+ // of `multiple_outputs_loop` returns `std::tuple` instead of `scalar_t`.
326
+ // The `gpu_kernel_multiple_outputs` is also implemented without this check,
327
+ // We could extend `needs_dynamic_casting` to support both `std::tuple` and
328
+ // `thrust::tuple` in the future.
329
+ template <typename func_t>
330
+ void cpu_kernel_multiple_outputs(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) {
331
+ using traits = function_traits<func_t>;
332
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
333
+
334
+ iter.for_each([&](char** data, const int64_t* strides, int64_t n) {
335
+ multiple_outputs_loop(data, strides, 0, n, std::forward<func_t>(op));
336
+ }, grain_size);
337
+ iter.cast_outputs();
338
+ }
339
+
340
+ template <bool check_dynamic_cast=true, typename func_t, typename vec_func_t>
341
+ void cpu_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, int64_t grain_size = at::internal::GRAIN_SIZE) {
342
+ using traits = function_traits<func_t>;
343
+ // this could be extended to work with void return types
344
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
345
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
346
+ // dynamic casting not currently supported on CPU, but some kernels (like Fill)
347
+ // explicitly dynamic_cast, so we give the opt-out of checking.
348
+ if constexpr (check_dynamic_cast) {
349
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
350
+ }
351
+
352
+ iter.for_each(make_vectorized_loop2d(op, vop), grain_size);
353
+ iter.cast_outputs();
354
+ }
355
+
356
+ template <typename func_t>
357
+ void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op, const Range& range) {
358
+ using traits = function_traits<func_t>;
359
+ constexpr bool result_void = std::is_void<typename traits::result_type>::value;
360
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity &&
361
+ ((result_void && iter.noutputs() == 0) || (!result_void && iter.noutputs() == 1)));
362
+ // dynamic casting not currently supported on CPU
363
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
364
+
365
+ iter.serial_for_each([&](char** data, const int64_t* strides, int64_t n) {
366
+ basic_loop(data, strides, 0, n, std::forward<func_t>(op));
367
+ }, range);
368
+ iter.cast_outputs();
369
+ }
370
+
371
+ template <typename func_t>
372
+ void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op) {
373
+ cpu_serial_kernel(iter, op, {0, iter.numel()});
374
+ }
375
+
376
+ template <typename func_t, typename vec_func_t>
377
+ void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, const Range& range) {
378
+ using traits = function_traits<func_t>;
379
+ // this could be extended to work with void return types
380
+ TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
381
+ TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
382
+ // dynamic casting not currently supported on CPU
383
+ TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
384
+
385
+ iter.serial_for_each(make_vectorized_loop2d(op, vop), range);
386
+ iter.cast_outputs();
387
+ }
388
+
389
+ template <typename func_t, typename vec_func_t>
390
+ void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop) {
391
+ cpu_serial_kernel_vec(iter, op, vop, {0, iter.numel()});
392
+ }
393
+
394
+ }}} // namespace at::native::<anonymous>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/MaxUnpoolKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+
4
+ namespace at {
5
+ class Tensor;
6
+
7
+ namespace native {
8
+
9
+ using max_unpooling_fn = void(*)(Tensor&, const Tensor&, const Tensor&);
10
+
11
+ DECLARE_DISPATCH(max_unpooling_fn, max_unpool2d_kernel);
12
+ DECLARE_DISPATCH(max_unpooling_fn, max_unpool3d_kernel);
13
+
14
+ }} // at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/PixelShuffleKernel.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at { namespace native {
9
+
10
+ using pixel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t);
11
+ DECLARE_DISPATCH(pixel_shuffle_fn, pixel_shuffle_kernel);
12
+ DECLARE_DISPATCH(pixel_shuffle_fn, pixel_unshuffle_kernel);
13
+
14
+ }} // at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SerialStackImpl.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2004-present Facebook. All Rights Reserved.
2
+ #pragma once
3
+
4
+ #include <ATen/core/Tensor.h>
5
+
6
+ #include <ATen/MemoryOverlap.h>
7
+ #include <ATen/Parallel.h>
8
+ #include <ATen/TensorIterator.h>
9
+ #include <ATen/cpu/vec/functional.h>
10
+ #include <ATen/cpu/vec/vec.h>
11
+ #include <c10/util/irange.h>
12
+
13
+ namespace at { namespace native { namespace detail {
14
+
15
+ struct InputMeta {
16
+ void* data_ptr;
17
+ int64_t inner_size;
18
+
19
+ InputMeta(const Tensor& t, int64_t dim, int64_t inner)
20
+ : data_ptr(t.data_ptr()), inner_size(t.sizes()[dim] * inner) {}
21
+ };
22
+
23
+ // This kernel is used by two TensorList types:
24
+ // 1. stack_serial_kernel uses at::ArrayRef<Tensor>
25
+ // 2. Static runtime calls this kernel directly (csrc/jit/runtime/static/ops.cpp) with
26
+ // ProcessedNodeInputWrapper.
27
+ // When making changes, make sure that they are compatible with both types!
28
+ template <typename scalar_t, typename TensorListType>
29
+ void stack_serial_kernel_impl(Tensor& result, TensorListType tensors, int64_t dim) {
30
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
31
+ dim >= 0 && dim <= result.dim(),
32
+ "dim out of range in stack_serial_kernel_impl");
33
+ int64_t outer =
34
+ result.numel() / (result.sizes()[dim] * result.strides()[dim]);
35
+ scalar_t* result_data = result.data_ptr<scalar_t>();
36
+ int64_t ninputs = tensors.size();
37
+ std::vector<InputMeta> inputs;
38
+ inputs.reserve(ninputs);
39
+ for (const auto& tensor : tensors) {
40
+ inputs.emplace_back(tensor, dim, tensor.strides()[dim]);
41
+ }
42
+
43
+ using Vec = vec::Vectorized<scalar_t>;
44
+ scalar_t* result_ptr = result_data;
45
+ for (const auto i : c10::irange(outer)) {
46
+ for (const auto j : c10::irange(ninputs)) {
47
+ int64_t local_inner = inputs[j].inner_size;
48
+ scalar_t* input_ptr = (scalar_t*)(inputs[j].data_ptr) + i * local_inner;
49
+
50
+ if (local_inner < Vec::size()) {
51
+ for (const auto k : c10::irange(local_inner)) {
52
+ result_ptr[k] = input_ptr[k];
53
+ }
54
+ } else {
55
+ vec::map(
56
+ [](Vec x) { return x; }, result_ptr, input_ptr, local_inner);
57
+ }
58
+ result_ptr += local_inner;
59
+ }
60
+ }
61
+ }
62
+
63
+ // Checks to see whether native stack can be invoked under these conditions:
64
+ // - result and input tensors are contiguous
65
+ // - only one thread is used
66
+ // - no type promotion has to occur
67
+ // - tensors dtype is Double or Float
68
+ template <typename TensorListType>
69
+ bool can_use_native_serial_stack_impl(Tensor& result, TensorListType tensors, int64_t dim) {
70
+ TORCH_CHECK(tensors.size() > 0, "expected a non-empty list of Tensors");
71
+ const Tensor& first_tensor = tensors[0];
72
+ // stack dimension should be in range [0,firstTensor.dim())
73
+ // dim == firstTensor.dim() is a valid input, but it is handled by default code path
74
+ // that uses unsqueeze
75
+ if (dim >= first_tensor.dim()) return false;
76
+ // Native stack doesn't apply any tensor is skipped.
77
+ if (first_tensor.numel() == 0 && first_tensor.dim() == 1) return false;
78
+ // there should be no type promotion
79
+ if (result.dtype() != first_tensor.dtype()) return false;
80
+
81
+ auto first_tensor_mem_format = first_tensor.suggest_memory_format();
82
+ ScalarType dtype = first_tensor.scalar_type();
83
+
84
+ if (!result.is_contiguous(first_tensor_mem_format)) {
85
+ return false;
86
+ }
87
+
88
+ // fast path only works for Double and Float
89
+ if (dtype != ScalarType::Double && dtype != ScalarType::Float) {
90
+ return false;
91
+ }
92
+
93
+ // check remainder of inputs
94
+ auto const &first_tensor_shape = first_tensor.sizes();
95
+ for (const auto i : c10::irange(1, tensors.size())) {
96
+ auto const &tensor = tensors[i];
97
+ TORCH_CHECK(tensors[i].sizes() == first_tensor.sizes(),
98
+ "stack expects each tensor to be equal size, but got ", first_tensor_shape,
99
+ " at entry 0 and ", tensor.sizes(), " at entry ", i);
100
+
101
+ // every tensor must be contiguous
102
+ // tensor sizes and strides must be the same
103
+ // there should be no type promotion
104
+ if (!tensor.is_contiguous(first_tensor_mem_format) ||
105
+ tensor.strides() != first_tensor.strides() ||
106
+ tensor.dtype() != dtype) {
107
+ return false;
108
+ }
109
+ }
110
+
111
+ // fast native stack should only be used when it is not worth using multiple threads
112
+ // or there is only one thread. Note that we aren't checking result.numel() here because
113
+ // it may not have been resized and we want to defer that cost till later.
114
+ int64_t numel_in_stack = first_tensor.numel() * tensors.size();
115
+ return numel_in_stack < at::internal::GRAIN_SIZE || at::get_num_threads() == 1;
116
+ }
117
+
118
+ template <typename TensorListType, bool should_skip_overlap_check>
119
+ struct CanUseNativeSerialStack;
120
+
121
+ template <typename TensorListType>
122
+ struct CanUseNativeSerialStack<TensorListType, false> {
123
+ static bool call(Tensor& result, TensorListType tensors, int64_t dim) {
124
+ // Inputs cannot alias the output tensor
125
+ for (const auto i : c10::irange(tensors.size())) {
126
+ auto lap = at::get_overlap_status(result, tensors[i]);
127
+ TORCH_CHECK(lap != at::MemOverlapStatus::Partial &&
128
+ lap != at::MemOverlapStatus::Full, 0,
129
+ "unsupported operation: the input tensors cannot refer to any of the "
130
+ "output memory locations. Found overlap in input tensor ", i);
131
+ }
132
+
133
+ return can_use_native_serial_stack_impl(result, tensors, dim);
134
+ }
135
+ };
136
+
137
+ template <typename TensorListType>
138
+ struct CanUseNativeSerialStack<TensorListType, true> {
139
+ static bool call(Tensor& result, TensorListType tensors, int64_t dim) {
140
+ return can_use_native_serial_stack_impl(result, tensors, dim);
141
+ }
142
+ };
143
+
144
+ }}} // namespace at::native::detail
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/SpmmReduceKernel.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/native/ReductionType.h>
6
+
7
+ namespace at::native {
8
+
9
+ using spmm_reduce_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
10
+ using spmm_reduce_arg_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
11
+ using spmm_reduce_backward_input_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
12
+ using spmm_reduce_backward_input_arg_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
13
+ using spmm_reduce_backward_other_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, ReductionType op);
14
+
15
+ DECLARE_DISPATCH(spmm_reduce_fn, spmm_reduce_stub);
16
+ DECLARE_DISPATCH(spmm_reduce_arg_fn, spmm_reduce_arg_stub);
17
+ DECLARE_DISPATCH(spmm_reduce_backward_input_fn, spmm_reduce_backward_input_stub);
18
+ DECLARE_DISPATCH(spmm_reduce_backward_input_arg_fn, spmm_reduce_backward_input_arg_stub);
19
+ DECLARE_DISPATCH(spmm_reduce_backward_other_fn, spmm_reduce_backward_other_stub);
20
+ DECLARE_DISPATCH(spmm_reduce_backward_input_arg_fn, spmm_reduce_backward_other_arg_stub);
21
+
22
+ } // at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/StackKernel.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2004-present Facebook. All Rights Reserved.
2
+ #pragma once
3
+
4
+ #include <ATen/core/Tensor.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+
7
+ namespace at { namespace native {
8
+
9
+ using stack_serial_fn = void(*)(Tensor &, TensorList, int64_t);
10
+ DECLARE_DISPATCH(stack_serial_fn, stack_serial_stub);
11
+
12
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/UpSampleKernelAVXAntialias.h ADDED
@@ -0,0 +1,1376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ The Python Imaging Library (PIL) is
3
+
4
+ Copyright © 1997-2011 by Secret Labs AB
5
+ Copyright © 1995-2011 by Fredrik Lundh
6
+
7
+ Pillow is the friendly PIL fork. It is
8
+
9
+ Copyright © 2010-2022 by Alex Clark and contributors
10
+
11
+ Like PIL, Pillow is licensed under the open source HPND License
12
+ */
13
+
14
+ // This code is heavily inspired from PILLOW-SIMD's implementation:
15
+ // https://github.com/uploadcare/pillow-simd/blob/simd/master/src/libImaging/Resample.c
16
+
17
+ #pragma once
18
+ #ifdef CPU_CAPABILITY_AVX2
19
+ // TODO: This file only supports AVX2. We could split the AVX kernels into
20
+ // smaller logical blocks in order to port them into the Vec.h logic. This would
21
+ // allow to support other vectorization architectures and perhaps also support
22
+ // the non-vectorized fallback (we'd need to make sure it's not slower than the
23
+ // current fallback).
24
+
25
+ #include <ATen/core/Tensor.h>
26
+ #include <ATen/cpu/vec/intrinsics.h>
27
+ #include <c10/util/irange.h>
28
+
29
+ #ifndef AT_PER_OPERATOR_HEADERS
30
+ #include <ATen/Functions.h>
31
+ #else
32
+ #include <ATen/ops/empty.h>
33
+ #endif
34
+
35
+
36
+ namespace {
37
+
38
+ static inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
39
+ int32_t v;
40
+ if (i32_aligned) {
41
+ v = *(const int32_t*)ptr;
42
+ } else {
43
+ std::memcpy(&v, ptr, 4);
44
+ }
45
+ return _mm_cvtsi32_si128(v);
46
+ }
47
+
48
+ static inline __m128i mm_cvtepu8_epi32(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
49
+ return _mm_cvtepu8_epi32(mm_cvtsi32_si128(ptr, i32_aligned));
50
+ }
51
+
52
+ static inline void _write_endline_rgb_as_uint32(
53
+ uint8_t* C10_RESTRICT output,
54
+ uint32_t data
55
+ ) {
56
+ // data is (R G B X), output is (X1 X2 X3 | R1 B1 G1 R2 ...)
57
+ // Here we explicitly set X as R1
58
+ uint8_t* data_ptr = reinterpret_cast<uint8_t*>(&data);
59
+ data_ptr[3] = output[3];
60
+ std::memcpy(output, data_ptr, 4);
61
+ }
62
+
63
+ at::Tensor unpack_rgb(const at::Tensor& packed_tensor) {
64
+ // Convert a "packed" tensor (typically RGBRGBRGB if channels_last) into
65
+ // RGBARGBARGBA format where A is hard-coded to 0. Each pixel is encoded
66
+ // into as 32 bits. This generalizes to num_channels <= 4 and also works for
67
+ // non-channels_last tensors.
68
+
69
+ const uint8_t* packed = (const uint8_t*)packed_tensor.data_ptr<uint8_t>();
70
+ auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
71
+ auto num_channels = packed_tensor.size(0);
72
+
73
+ constexpr int rgba_size = 4;
74
+ auto unpacked_tensor = at::empty({rgba_size, packed_tensor.size(1), packed_tensor.size(2)}, at::CPU(at::kByte));
75
+ uint8_t* unpacked = (uint8_t*) unpacked_tensor.data_ptr<uint8_t>();
76
+
77
+ auto stride_i = packed_tensor.stride(2);
78
+ auto stride_j = packed_tensor.stride(0);
79
+
80
+ for (const auto i : c10::irange(num_pixels)) {
81
+ for (const auto j : c10::irange(rgba_size)) {
82
+ unpacked[rgba_size * i + j] = (j < num_channels) ? packed[stride_i * i + stride_j * j] : 0;
83
+ }
84
+ }
85
+ return unpacked_tensor;
86
+ }
87
+
88
+ void pack_rgb(
89
+ const at::Tensor& unpacked_tensor, // IN
90
+ const at::Tensor& packed_tensor // OUT
91
+ ) {
92
+ // Convert from unpacked channels last 3-channels or 4-channels tensor into original data layout.
93
+
94
+ uint8_t* unpacked = (uint8_t*)unpacked_tensor.data_ptr<uint8_t>();
95
+ uint8_t* packed = (uint8_t*)packed_tensor.data_ptr<uint8_t>();
96
+ auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
97
+ auto num_channels = packed_tensor.size(0);
98
+
99
+ auto unpacked_increment = unpacked_tensor.size(0);
100
+ auto packed_increment = packed_tensor.stride(2);
101
+ auto packed_stride = packed_tensor.stride(0);
102
+
103
+ TORCH_INTERNAL_ASSERT(unpacked_increment == 3 || unpacked_increment == 4);
104
+
105
+ for (const auto i C10_UNUSED : c10::irange(num_pixels)) {
106
+ for (const auto j : c10::irange(num_channels)) {
107
+ packed[j * packed_stride] = unpacked[j];
108
+ }
109
+ unpacked += unpacked_increment;
110
+ packed += packed_increment;
111
+ }
112
+ }
113
+
114
+ void ImagingResampleHorizontalConvolution8u4x(
115
+ uint8_t* C10_RESTRICT lineOut0,
116
+ uint8_t* C10_RESTRICT lineOut1,
117
+ uint8_t* C10_RESTRICT lineOut2,
118
+ uint8_t* C10_RESTRICT lineOut3,
119
+ int64_t out_xsize,
120
+ const uint8_t* C10_RESTRICT lineIn0,
121
+ const uint8_t* C10_RESTRICT lineIn1,
122
+ const uint8_t* C10_RESTRICT lineIn2,
123
+ const uint8_t* C10_RESTRICT lineIn3,
124
+ int64_t in_xsize,
125
+ const int64_t* idx_ptr_xmin,
126
+ const int64_t* idx_ptr_size,
127
+ const int16_t* kk,
128
+ int kmax,
129
+ unsigned int coefs_precision,
130
+ int64_t num_channels,
131
+ bool is_last_line);
132
+
133
+ void ImagingResampleHorizontalConvolution8u(
134
+ uint8_t* C10_RESTRICT lineOut,
135
+ int64_t out_xsize,
136
+ const uint8_t* C10_RESTRICT lineIn,
137
+ int64_t in_xsize,
138
+ const int64_t* idx_ptr_xmin,
139
+ const int64_t* idx_ptr_size,
140
+ const int16_t* kk,
141
+ int kmax,
142
+ unsigned int coefs_precision,
143
+ int64_t num_channels,
144
+ bool is_last_line);
145
+
146
+ void ImagingResampleVerticalConvolution8u(
147
+ uint8_t* C10_RESTRICT lineOut,
148
+ const uint8_t* C10_RESTRICT lineIn,
149
+ int64_t xsize,
150
+ int64_t ids_min,
151
+ int64_t ids_size,
152
+ const int16_t* k,
153
+ unsigned int coefs_precision,
154
+ int64_t num_channels);
155
+
156
+ template<int num_channels>
157
+ void ImagingResampleHorizontal(
158
+ const at::Tensor & unpacked_output,
159
+ const at::Tensor & unpacked_input,
160
+ int ksize,
161
+ const std::vector<at::Tensor>& horiz_indices_weights,
162
+ unsigned int horiz_weights_precision) {
163
+
164
+ // Interpolation horizontal pass: we compute x-axis (image width) interpolation outputs.
165
+
166
+ // Input data is stored as
167
+ // input = [r[0], g[0], b[0], a[0], r[1], g[1], b[1], a[1], r[2], g[2], b[2], a[2], ...]
168
+ // Weights are float values computed for each output pixel and rescaled to uint16:
169
+ // weights[i] = [w[i, 0], w[i, 1], ..., w[i, K-1]]
170
+ // We want to compute the output as following:
171
+ // output = [oR[0], oG[0], oB[0], oA[0], oR[1], oG[1], oB[1], oA[1], ...]
172
+ // where
173
+ // oR[yoffset + i] = r[yoffset + xmin[i]] * w[i, 0] + ... + r[yoffset + xmin[i] + K-1] * w[i, K-1]
174
+ // oG[yoffset + i] = g[yoffset + xmin[i]] * w[i, 0] + ... + g[yoffset + xmin[i] + K-1] * w[i, K-1]
175
+ // oB[yoffset + i] = b[yoffset + xmin[i]] * w[i, 0] + ... + b[yoffset + xmin[i] + K-1] * w[i, K-1]
176
+ //
177
+
178
+ // TODO: we may want to merge that into the fallback code (currently called
179
+ // basic_loop_aa_horizontal<uint8_t>)
180
+ // Although this may not be needed if / when we port all this code to use
181
+ // Vec.h since this would potentially give us another fall-back implem
182
+
183
+ const int16_t* kk = (int16_t*)(horiz_indices_weights[3].data_ptr<double>());
184
+
185
+ auto xout = unpacked_output.size(2);
186
+ auto yout = unpacked_output.size(1);
187
+ auto xin = unpacked_input.size(2);
188
+ TORCH_INTERNAL_ASSERT(num_channels == unpacked_input.size(0));
189
+
190
+ const int64_t* idx_ptr_xmin = horiz_indices_weights[0].data_ptr<int64_t>();
191
+ const int64_t* idx_ptr_size = horiz_indices_weights[1].data_ptr<int64_t>();
192
+
193
+ uint8_t* unpacked_output_p = unpacked_output.data_ptr<uint8_t>();
194
+ const uint8_t* unpacked_input_p = unpacked_input.data_ptr<uint8_t>();
195
+
196
+ int64_t yy = 0;
197
+ auto xout_stride = xout * num_channels;
198
+ auto xin_stride = xin * num_channels;
199
+ for (; yy < yout - 3; yy += 4) {
200
+ ImagingResampleHorizontalConvolution8u4x(
201
+ unpacked_output_p + yy * xout_stride,
202
+ unpacked_output_p + (yy + 1) * xout_stride,
203
+ unpacked_output_p + (yy + 2) * xout_stride,
204
+ unpacked_output_p + (yy + 3) * xout_stride,
205
+ xout,
206
+ unpacked_input_p + yy * xin_stride,
207
+ unpacked_input_p + (yy + 1) * xin_stride,
208
+ unpacked_input_p + (yy + 2) * xin_stride,
209
+ unpacked_input_p + (yy + 3) * xin_stride,
210
+ xin,
211
+ idx_ptr_xmin,
212
+ idx_ptr_size,
213
+ kk,
214
+ ksize,
215
+ horiz_weights_precision,
216
+ num_channels,
217
+ yy + 3 == yout - 1);
218
+ }
219
+ for (; yy < yout; yy++) {
220
+ ImagingResampleHorizontalConvolution8u(
221
+ unpacked_output_p + yy * xout_stride,
222
+ xout,
223
+ unpacked_input_p + yy * xin_stride,
224
+ xin,
225
+ idx_ptr_xmin,
226
+ idx_ptr_size,
227
+ kk,
228
+ ksize,
229
+ horiz_weights_precision,
230
+ num_channels,
231
+ yy == yout - 1);
232
+ }
233
+ }
234
+
235
+ void ImagingResampleVertical(
236
+ const at::Tensor & unpacked_output,
237
+ const at::Tensor & unpacked_input,
238
+ int ksize,
239
+ const std::vector<at::Tensor>& vert_indices_weights,
240
+ unsigned int vert_weights_precision) {
241
+
242
+ // Interpolation vertical pass: we compute y-axis interpolation outputs.
243
+ // Input data is stored as
244
+ // input = [r[0], g[0], b[0], a[0], r[1], g[1], b[1], a[1], r[2], g[2], b[2], a[2], ...]
245
+ // Weights are float values computed for each output pixel and rescaled to uint16:
246
+ // weights[i] = [w[i, 0], w[i, 1], ..., w[i, K-1]]
247
+ // We want to compute the output as following:
248
+ // output = [oR[0], oG[0], oB[0], oA[0], oR[1], oG[1], oB[1], oA[1], ...]
249
+ // where
250
+ // oR[xoffset + i] = r[xoffset + ymin[i]] * w[i, 0] + ... + r[xoffset + ymin[i] + (K-1) * xsize] * w[i, K-1]
251
+ // oG[xoffset + i] = g[xoffset + ymin[i]] * w[i, 0] + ... + g[xoffset + ymin[i] + (K-1) * xsize] * w[i, K-1]
252
+ // oB[xoffset + i] = b[xoffset + ymin[i]] * w[i, 0] + ... + b[xoffset + ymin[i] + (K-1) * xsize] * w[i, K-1]
253
+
254
+ // TODO: we may want to merge that into the fallback code (currently called
255
+ // basic_loop_aa_vertical<uint8_t>)
256
+ // Although this may not be needed if / when we port all this code to use
257
+ // Vec.h since this would potentially give us another fall-back implem
258
+ const int16_t* kk = (int16_t*)(vert_indices_weights[3].data_ptr<double>());
259
+
260
+ const int64_t* idx_ptr_xmin = vert_indices_weights[0].data_ptr<int64_t>();
261
+ const int64_t* idx_ptr_size = vert_indices_weights[1].data_ptr<int64_t>();
262
+
263
+ uint8_t* unpacked_output_p = unpacked_output.data_ptr<uint8_t>();
264
+ const uint8_t* unpacked_input_p = unpacked_input.data_ptr<uint8_t>();
265
+
266
+ auto xout = unpacked_output.size(2);
267
+ auto yout = unpacked_output.size(1);
268
+ const auto num_channels = unpacked_input.size(0);
269
+ TORCH_INTERNAL_ASSERT(num_channels == unpacked_output.size(0));
270
+
271
+ auto xout_stride = xout * num_channels;
272
+ for (const auto yy : c10::irange(yout)) {
273
+ const auto* k = &kk[yy * ksize];
274
+ auto ids_min = idx_ptr_xmin[yy];
275
+ auto ids_size = idx_ptr_size[yy];
276
+ ImagingResampleVerticalConvolution8u(
277
+ unpacked_output_p + yy * xout_stride,
278
+ unpacked_input_p,
279
+ xout,
280
+ ids_min,
281
+ ids_size,
282
+ k,
283
+ vert_weights_precision,
284
+ num_channels);
285
+ }
286
+ }
287
+
288
+ // This is the only public entry point in this file. It supports bilinear or bicubic
289
+ // mode for uint8 dtype when C <= 4, with or without antialias. The
290
+ // implem is based on PIL-SIMD.
291
+ // Its equivalent implementation (fallback) for when AVX isn't supported or when
292
+ // C > 4 is separable_upsample_generic_Nd_kernel_impl() There are a bunch of
293
+ // future improvement that can be done: look for the TODOs in this file.
294
+ // For details on how the weights are computed and how the multiplications are
295
+ // run on int (instead of float weights), see
296
+ // [ Weights computation for uint8_t and multiplication trick ]
297
+ // For details on how the AVX kernels are implemented, see
298
+ // https://gist.github.com/NicolasHug/47c97d731f05eaad5694c173849b86f5
299
+ // See also [ Support for antialias=False as a subcase of antialias=True ] to
300
+ // learn more about how the antialias=False case is computed. The same holds
301
+ // here: all these kernels are general enough to handle an arbitrary number of
302
+ // weights, but when aa=False they could be optimized further.
303
+ template <typename scale_type, class F>
304
+ void upsample_avx_bilinear_bicubic_uint8(
305
+ const at::Tensor& input_,
306
+ const at::Tensor& output,
307
+ bool align_corners,
308
+ const scale_type& scales,
309
+ bool antialias) {
310
+ auto batch_size = input_.size(0);
311
+ auto num_channels = input_.size(1);
312
+ auto xin = input_.size(3);
313
+ auto yin = input_.size(2);
314
+ auto xout = output.size(3);
315
+ auto yout = output.size(2);
316
+
317
+ if (xin == xout && yin == yout) {
318
+ output.copy_(input_);
319
+ return;
320
+ }
321
+
322
+ at::Tensor input = input_;
323
+ if (!(input.is_contiguous() || input.is_contiguous(at::MemoryFormat::ChannelsLast))) {
324
+ // If input is not contiguous with memory format channels first or channels last,
325
+ // we explicitly convert the input to contiguous channels last memory format.
326
+ // This simplifies the rest of the code and let us assume that the format is only contiguous channels first or channels last,
327
+ // Most tensors going through this `if` block won't need to go through unpacking, but those having C < 3 may
328
+ // have to (this means 2 copies are made). We could avoid the extra copy by handling non-contiguous input
329
+ // directly within unpack_rgb() and pack_rgb(), but initial attempts showed that this is fairly complex.
330
+ input = input.contiguous(at::MemoryFormat::ChannelsLast);
331
+ }
332
+
333
+ auto need_horizontal = xout != xin;
334
+ auto need_vertical = yout != yin;
335
+
336
+ int ksize_horiz, ksize_vert;
337
+ std::vector<at::Tensor> horiz_indices_weights, vert_indices_weights;
338
+ unsigned int horiz_weights_precision, vert_weights_precision;
339
+
340
+ bool skip_unpacking = (num_channels == 3 || num_channels == 4) && input.is_contiguous(at::MemoryFormat::ChannelsLast);
341
+ bool skip_packing = (num_channels == 3 || num_channels == 4) && output.is_contiguous(at::MemoryFormat::ChannelsLast);
342
+
343
+ if (need_horizontal) {
344
+ int interp_dim = 3;
345
+ auto stride = (skip_unpacking) ? num_channels : 4;
346
+ std::tie(horiz_indices_weights, ksize_horiz, horiz_weights_precision) =
347
+ F::compute_index_ranges_int16_weights(
348
+ /*input_size=*/xin,
349
+ /*output_size=*/xout,
350
+ /*stride=*/stride,
351
+ /*ndims=*/4,
352
+ /*reshape_dim=*/interp_dim,
353
+ /*align_corners=*/align_corners,
354
+ /*opt_scale=*/scales[interp_dim - 2],
355
+ /*antialias=*/antialias,
356
+ /*align_i32=*/true);
357
+ }
358
+
359
+ if (need_vertical) {
360
+ int interp_dim = 2;
361
+ auto stride = (skip_unpacking) ? num_channels * xout : 4 * xout;
362
+ std::tie(vert_indices_weights, ksize_vert, vert_weights_precision) =
363
+ F::compute_index_ranges_int16_weights(
364
+ /*input_size=*/yin,
365
+ /*output_size=*/yout,
366
+ /*stride=*/stride,
367
+ /*ndims=*/4,
368
+ /*reshape_dim=*/interp_dim,
369
+ /*align_corners=*/align_corners,
370
+ /*opt_scale=*/scales[interp_dim - 2],
371
+ /*antialias=*/antialias,
372
+ /*align_i32=*/true);
373
+ }
374
+
375
+ at::Tensor buffer_horiz, buffer_vert;
376
+ // Minor optimization: we can avoid allocating an extra buffer if we're performing
377
+ // horizontal-only or vertical-only interpolation, and if the tensor doesn't
378
+ // need repacking
379
+ if (need_horizontal && (need_vertical || !skip_packing)) {
380
+ auto c = (skip_unpacking) ? num_channels : 4;
381
+ buffer_horiz = at::empty({c, yin, xout}, input.options());
382
+ }
383
+ if (need_vertical && !skip_packing) {
384
+ auto c = (skip_unpacking) ? num_channels : 4;
385
+ buffer_vert = at::empty({c, yout, xout}, input.options());
386
+ }
387
+
388
+ for (const auto i : c10::irange(batch_size)) {
389
+
390
+ at::Tensor unpacked_input = (skip_unpacking) ? input[i] : unpack_rgb(input[i]);
391
+ at::Tensor unpacked_output;
392
+
393
+ if (need_horizontal) {
394
+ at::Tensor unpacked_output_temp = (need_vertical || !skip_packing) ? buffer_horiz : output[i];
395
+
396
+ if (skip_unpacking && num_channels == 3) {
397
+ ImagingResampleHorizontal<3>(
398
+ unpacked_output_temp,
399
+ unpacked_input,
400
+ ksize_horiz,
401
+ horiz_indices_weights,
402
+ horiz_weights_precision);
403
+ } else {
404
+ ImagingResampleHorizontal<4>(
405
+ unpacked_output_temp,
406
+ unpacked_input,
407
+ ksize_horiz,
408
+ horiz_indices_weights,
409
+ horiz_weights_precision);
410
+ }
411
+ unpacked_output = unpacked_input = unpacked_output_temp;
412
+ }
413
+ if (need_vertical) {
414
+ unpacked_output = (skip_packing) ? output[i] : buffer_vert;
415
+
416
+ ImagingResampleVertical(
417
+ unpacked_output,
418
+ unpacked_input,
419
+ ksize_vert,
420
+ vert_indices_weights,
421
+ vert_weights_precision
422
+ );
423
+ }
424
+
425
+ TORCH_INTERNAL_ASSERT(unpacked_output.defined());
426
+
427
+ if (!skip_packing) {
428
+ pack_rgb(unpacked_output, output[i]);
429
+ }
430
+ }
431
+ }
432
+
433
+ void ImagingResampleHorizontalConvolution8u4x(
434
+ uint8_t* C10_RESTRICT lineOut0,
435
+ uint8_t* C10_RESTRICT lineOut1,
436
+ uint8_t* C10_RESTRICT lineOut2,
437
+ uint8_t* C10_RESTRICT lineOut3,
438
+ int64_t out_xsize,
439
+ const uint8_t* C10_RESTRICT lineIn0,
440
+ const uint8_t* C10_RESTRICT lineIn1,
441
+ const uint8_t* C10_RESTRICT lineIn2,
442
+ const uint8_t* C10_RESTRICT lineIn3,
443
+ int64_t in_xsize,
444
+ const int64_t* idx_ptr_xmin,
445
+ const int64_t* idx_ptr_size,
446
+ const int16_t* kk,
447
+ int kmax,
448
+ unsigned int coefs_precision,
449
+ int64_t num_channels,
450
+ bool is_last_line) {
451
+
452
+ // Interpolation horizontal pass processing together 4 vertical lines.
453
+ // - Input data format is RGBA or RGB with R,G,B,A being uint8. In case of RGBA
454
+ // we can encode 4 values as a single uint32 value.
455
+ // - We split the size of weight vector for a given output index as a sum:
456
+ // ids_size = num_blocks_4 * 4 + num_blocks_2 * 2 + num_blocks_1.
457
+ // - We load and process 4 weights values in a loop ("block 4") then we process 2 weights values
458
+ // in another loop ("block 2") and finally we process 1 weights value in the final loop ("block 1").
459
+
460
+ // Define shuffling masks (low/high) for num_channels 4 and 3
461
+ // Mask low casts lower half of each lane to epi16 and reorder RGBARGBA -> RRGGBBAA:
462
+ // [r1 g1 b1 a1 r2 g2 b2 a2 ... | R1 G1 B1 A1 R2 G2 B2 A2 ... ] ->
463
+ // [r1 0 r2 0 g1 0 g2 0 b1 0 b2 0 a1 0 a2 0 | R1 0 R2 0 G1 0 G2 0 B1 0 B2 0 A1 0 A2 0]
464
+ // Mask high casts upper half of each lane to epi16 and reorder RGBARGBA -> RRGGBBAA::
465
+ // [ ... r3 g3 b3 a3 r4 g4 b4 a4 | ... R3 G3 B3 A3 R4 G4 B4 A4 ] ->
466
+ // [r3 0 r4 0 g3 0 g4 0 b3 0 b4 0 a3 0 a4 0 | R3 0 R4 0 G3 0 G4 0 B3 0 B4 0 A3 0 A4 0]
467
+
468
+ const auto mask_low_c4 = _mm256_set_epi8(
469
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
470
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
471
+ const auto mask_high_c4 = _mm256_set_epi8(
472
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
473
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8);
474
+ const auto mask_low_c3 = _mm256_set_epi8(
475
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0,
476
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
477
+ const auto mask_high_c3 = _mm256_set_epi8(
478
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6,
479
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6);
480
+
481
+ const auto mask_low = (num_channels == 3) ? mask_low_c3 : mask_low_c4;
482
+ const auto mask_high = (num_channels == 3) ? mask_high_c3 : mask_high_c4;
483
+
484
+ const auto stride = num_channels * sizeof(uint8_t);
485
+
486
+ TORCH_INTERNAL_ASSERT(stride == 3 || stride == 4);
487
+
488
+ // out_xsize = output width, out_x = output x index
489
+ // ids_min is the input offset index corresponding to out_x
490
+ // ids_size is the interpolation size for out_x
491
+
492
+ // Let's precompute ids_size limits for block 4 and block 2.
493
+ //
494
+ // In block 4 (4 means we process 4 weight values together), we read input data
495
+ // with _mm_loadu_si128, i.e. 16 bytes, per one line:
496
+ // lineIn0 + stride * (i + ids_min) + 16 <= lineIn0 + stride * (ids_size + ids_min)
497
+ // --> i <= ids_size - 16.0 / stride
498
+ // Strict boundary:
499
+ // --> i < ids_size + 1 - int(ceil(16.0 / stride)) = ids_size - b4_delta
500
+ // Soft boundary for reading inside the buffer except its boundaries:
501
+ // --> i < ids_size + 1 - int(16.0 / stride) = ids_size - b4_delta_soft
502
+ // RGBA: b4_delta = b4_delta_soft = 3
503
+ // RGB : b4_delta = 5
504
+ // RGB : b4_delta_soft = 4
505
+ const auto b4_delta = (stride == 4) ? 3 : ((is_last_line) ? 5 : 4);
506
+
507
+ // In block 2 (2 means we process 2 weights values together), we read input data
508
+ // with _mm_loadl_epi64, i.e. 8 bytes, per one line:
509
+ // lineIn0 + stride * (i + ids_min) + 8 <= lineIn0 + stride * (ids_size + ids_min)
510
+ // --> i <= ids_size - 8.0 / stride
511
+ // Strict boundary:
512
+ // --> i < ids_size + 1 - int(ceil(8.0 / stride)) = ids_size - b2_delta
513
+ // Soft boundary for reading inside the buffer except its boundaries:
514
+ // --> i < ids_size + 1 - int(8.0 / stride) = ids_size - b2_delta_soft
515
+ // RGBA: b2_delta = b2_delta_soft = 1
516
+ // RGB : b2_delta = 2
517
+ // RGB : b2_delta_soft = 1
518
+ const auto b2_delta = (stride == 4) ? 1 : ((is_last_line) ? 2 : 1);
519
+
520
+ const auto max_out_x_strided = out_xsize * stride;
521
+ const auto max_in_x_strided = in_xsize * stride;
522
+
523
+ const auto zero = _mm256_setzero_si256();
524
+ const auto initial = _mm256_set1_epi32(1 << (coefs_precision - 1));
525
+
526
+ for (const auto out_x : c10::irange(out_xsize)) {
527
+ const auto ids_min = idx_ptr_xmin[out_x];
528
+ const auto ids_size = idx_ptr_size[out_x];
529
+ const auto * k = &kk[out_x * kmax];
530
+ int64_t i = 0;
531
+
532
+ auto sss0 = initial;
533
+ auto sss1 = initial;
534
+
535
+ const auto * lineIn0_min = lineIn0 + ids_min;
536
+ const auto * lineIn1_min = lineIn1 + ids_min;
537
+ const auto * lineIn2_min = lineIn2 + ids_min;
538
+ const auto * lineIn3_min = lineIn3 + ids_min;
539
+
540
+ // block 4
541
+ for (; i < ids_size - b4_delta; i += 4) {
542
+ // Load 4 values from weight vector
543
+ // mmk0 = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ...]
544
+ // mmk1 = [wl_2 wh_2 wl_3 wh_3 wl_2 wh_2 wl_3 wh_3 ...]
545
+ const auto mmk0 = _mm256_set1_epi32(*(int32_t*)&k[i]);
546
+ const auto mmk1 = _mm256_set1_epi32(*(int32_t*)&k[i + 2]);
547
+
548
+ // RGBA: Load 8 pixels (4 per line) from input lines 0 and 1:
549
+ // source = [
550
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
551
+ // R0 G0 B0 A0 R1 G1 B1 A1 R2 G2 B2 A2 R3 G3 B3 A3
552
+ // ]
553
+ // RGB: Load 10 pixels (5 per line)
554
+ // source = [
555
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
556
+ // R0 G0 B0 R1 G1 B1 R2 G2 B2 R3 G3 B3 R4 G4 B4 R5
557
+ // ]
558
+ auto source = _mm256_inserti128_si256(_mm256_castsi128_si256(
559
+ _mm_loadu_si128((__m128i *) (lineIn0_min + stride * i))),
560
+ _mm_loadu_si128((__m128i *) (lineIn1_min + stride * i)), 1);
561
+
562
+ // Apply mask_low:
563
+ // RGBA:
564
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 A0 0 A1 0]
565
+ // RGB:
566
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 0 0 0 0]
567
+ auto pix1 = _mm256_shuffle_epi8(source, mask_low);
568
+ // Compute output value as C += w0 * C0 + w1 * C1 for each channel in 32-bit precision
569
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk0));
570
+
571
+ // Apply mask_high:
572
+ // RGBA:
573
+ // [r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 a2 0 a3 0 | R2 0 R3 0 G2 0 G3 0 B2 0 B3 0 A2 0 A3 0]
574
+ // RGB:
575
+ // [r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 0 0 0 0 | R2 0 R3 0 G2 0 G3 0 B2 0 B3 0 0 0 0 0]
576
+ auto pix2 = _mm256_shuffle_epi8(source, mask_high);
577
+ // Compute output value as C += w2 * C2 + w3 * C3 for each channel in 32-bit precision
578
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix2, mmk1));
579
+
580
+ // Same as above to next lines 2 and 3:
581
+ auto source2 = _mm256_inserti128_si256(_mm256_castsi128_si256(
582
+ _mm_loadu_si128((__m128i *) (lineIn2_min + stride * i))),
583
+ _mm_loadu_si128((__m128i *) (lineIn3_min + stride * i)), 1);
584
+ auto pix3 = _mm256_shuffle_epi8(source2, mask_low);
585
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix3, mmk0));
586
+ auto pix4 = _mm256_shuffle_epi8(source2, mask_high);
587
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix4, mmk1));
588
+ }
589
+
590
+ // block 2
591
+ for (; i < ids_size - b2_delta; i += 2) {
592
+ // Load 2 values from weight vector
593
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ...]
594
+ const auto mmk = _mm256_set1_epi32(*(int32_t*)&k[i]);
595
+
596
+ // Load 4 pixels (2 per line) from input lines 0 and 1:
597
+ // RGBA: source1 = [
598
+ // r0 g0 b0 a0 r1 g1 b1 a1 0 0 0 0 0 0 0 0
599
+ // R0 G0 B0 A0 R1 G1 B1 A1 0 0 0 0 0 0 0 0
600
+ // ]
601
+ // RGB: source1 = [
602
+ // r0 g0 b0 r1 g1 b1 r2 0 0 0 0 0 0 0 0
603
+ // R0 G0 B0 R1 G1 B1 R2 0 0 0 0 0 0 0 0
604
+ // ]
605
+ auto source1 = _mm256_inserti128_si256(_mm256_castsi128_si256(
606
+ _mm_loadl_epi64((__m128i *) (lineIn0_min + stride * i))),
607
+ _mm_loadl_epi64((__m128i *) (lineIn1_min + stride * i)), 1);
608
+ // Apply mask_low:
609
+ // RGBA:
610
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 A0 0 A1 0]
611
+ // RGB:
612
+ // [r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0 | R0 0 R1 0 G0 0 G1 0 B0 0 B1 0 0 0 0 0]
613
+ auto pix1 = _mm256_shuffle_epi8(source1, mask_low);
614
+ // Compute output value as C += w0 * C0 + w1 * C1 for each channel in 32-bit precision
615
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
616
+
617
+ // Same as above for lines 2 and 3:
618
+ auto source2 = _mm256_inserti128_si256(_mm256_castsi128_si256(
619
+ _mm_loadl_epi64((__m128i *) (lineIn2_min + stride * i))),
620
+ _mm_loadl_epi64((__m128i *) (lineIn3_min + stride * i)), 1);
621
+ auto pix2 = _mm256_shuffle_epi8(source2, mask_low);
622
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
623
+ }
624
+
625
+ // block 1
626
+ const auto i32_aligned = num_channels == 4;
627
+ for (; i < ids_size - 1; i++) {
628
+ // Load 1 value from weight vector
629
+ // mmk = [wl_0 wh_0 0 0 wl_0 wh_0 0 0 ...]
630
+ const auto mmk = _mm256_set1_epi32(k[i]);
631
+
632
+ // Load 2 pixels (one per line) from input lines 0 and 1:
633
+ // RGBA: pix1 = [
634
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 a0 0 0 0
635
+ // R0 0 0 0 G0 0 0 0 B0 0 0 0 A0 0 0 0
636
+ // ]
637
+ // RGB: pix1 = [
638
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 r1 0 0 0
639
+ // R0 0 0 0 G0 0 0 0 B0 0 0 0 R1 0 0 0
640
+ // ]
641
+ auto pix1 = _mm256_inserti128_si256(_mm256_castsi128_si256(
642
+ mm_cvtepu8_epi32(lineIn0_min + stride * i, i32_aligned)),
643
+ mm_cvtepu8_epi32(lineIn1_min + stride * i, i32_aligned), 1);
644
+ // Compute output value as C += w0 * C0 for each channel in 32-bit precision
645
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
646
+
647
+ // Same as above for lines 2 and 3
648
+ auto pix2 = _mm256_inserti128_si256(_mm256_castsi128_si256(
649
+ mm_cvtepu8_epi32(lineIn2_min + stride * i, i32_aligned)),
650
+ mm_cvtepu8_epi32(lineIn3_min + stride * i, i32_aligned), 1);
651
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
652
+ }
653
+
654
+ if (i == ids_size - 1) {
655
+ // last element
656
+ auto mmk = _mm256_set1_epi32(k[i]);
657
+ // For num_channels == 3 (3 bytes = one pixel) we tolerate to read 4 bytes
658
+ // lines 0, 1 and 2 wont go out of allocated memory bounds
659
+ auto pix = _mm256_inserti128_si256(_mm256_castsi128_si256(
660
+ mm_cvtepu8_epi32(lineIn0_min + stride * i, i32_aligned)),
661
+ mm_cvtepu8_epi32(lineIn1_min + stride * i, i32_aligned), 1);
662
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
663
+
664
+ auto p0 = mm_cvtepu8_epi32(lineIn2_min + stride * i, i32_aligned);
665
+ __m128i p1;
666
+ if (num_channels == 3 && C10_UNLIKELY(is_last_line && ids_min + stride * i + 4 >= max_in_x_strided)) {
667
+ uint8_t input[4];
668
+ std::memcpy(input, lineIn3_min + stride * i, 3);
669
+ p1 = mm_cvtepu8_epi32(input, true);
670
+ } else {
671
+ p1 = mm_cvtepu8_epi32(lineIn3_min + stride * i, i32_aligned);
672
+ }
673
+ auto pix2 = _mm256_inserti128_si256(_mm256_castsi128_si256(p0), p1, 1);
674
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
675
+ }
676
+
677
+ // Convert fixed point values back to integers (truncating)
678
+ sss0 = _mm256_srai_epi32(sss0, coefs_precision);
679
+ sss1 = _mm256_srai_epi32(sss1, coefs_precision);
680
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
681
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d 0 0 0 0 0 0 0 0)
682
+ sss0 = _mm256_packs_epi32(sss0, zero);
683
+ sss1 = _mm256_packs_epi32(sss1, zero);
684
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
685
+ // (a a b b c c d d) -> (a b c d 0 0 0 0)
686
+ sss0 = _mm256_packus_epi16(sss0, zero);
687
+ sss1 = _mm256_packus_epi16(sss1, zero);
688
+
689
+ // Write the output into single uint32
690
+ // (a b c d) -> x_uint32
691
+ auto o0 = _mm_cvtsi128_si32(_mm256_castsi256_si128(sss0));
692
+ auto o1 = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss0, 1));
693
+ auto o2 = _mm_cvtsi128_si32(_mm256_castsi256_si128(sss1));
694
+ auto o3 = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss1, 1));
695
+
696
+ const auto out_x_strided = stride * out_x;
697
+
698
+ if (num_channels == 3 && C10_UNLIKELY(out_x_strided + 4 >= max_out_x_strided)) {
699
+ // Memcpy 4-bytes is faster than 3-bytes and this is a boundary case when we want to write
700
+ // 4 bytes (R G B | X) to the output buffer (X1 X2 X3 | R1).
701
+ // The 4th byte in the register (X) has a garbage value and 4th byte in the output buffer (R1) has a correct
702
+ // value which was previously computed by another line. In other words, it means that we can not overwrite
703
+ // it by simply writing 4 bytes from the register to the output. We'll do the following:
704
+ // v----------|
705
+ // Output = [... X1 X2 X3 | R1 G1 B1 R2 ...]
706
+ // First, we write R1 value to the 4th byte of (R G B | X) -> (R G B | R1)
707
+ // Second, we write 4 bytes from the register to the output: (X1 X2 X3 | R1) -> (R G B | R1)
708
+ // Output = [... R G B | R1 G1 B1 R2 ...]
709
+
710
+ _write_endline_rgb_as_uint32(lineOut0 + out_x_strided, o0);
711
+ _write_endline_rgb_as_uint32(lineOut1 + out_x_strided, o1);
712
+ _write_endline_rgb_as_uint32(lineOut2 + out_x_strided, o2);
713
+
714
+ if (C10_UNLIKELY(is_last_line)) {
715
+ // When we handle the last line, we can not access the next 4 bytes
716
+ // as they are out of memory bounds.
717
+ std::memcpy(lineOut3 + out_x_strided, (uint8_t *) &o3, num_channels);
718
+ } else {
719
+ _write_endline_rgb_as_uint32(lineOut3 + out_x_strided, o3);
720
+ }
721
+ } else if (num_channels == 3) {
722
+ // Memcpy 4-bytes is faster than 3-bytes and here
723
+ // we simply write 4 bytes (... R G B X 0 0 0 0 0 ...) where X is a garbage value
724
+ // that we will overwrite on the next iteration: (... R G B R G B X 0 0 ...)
725
+ std::memcpy(lineOut0 + out_x_strided, (uint8_t *) &o0, 4);
726
+ std::memcpy(lineOut1 + out_x_strided, (uint8_t *) &o1, 4);
727
+ std::memcpy(lineOut2 + out_x_strided, (uint8_t *) &o2, 4);
728
+ std::memcpy(lineOut3 + out_x_strided, (uint8_t *) &o3, 4);
729
+ } else {
730
+ // num_channels = 4 -> lineOutX + out_x_strided should be uint32 aligned
731
+ *(uint32_t *)(lineOut0 + out_x_strided) = o0;
732
+ *(uint32_t *)(lineOut1 + out_x_strided) = o1;
733
+ *(uint32_t *)(lineOut2 + out_x_strided) = o2;
734
+ *(uint32_t *)(lineOut3 + out_x_strided) = o3;
735
+ }
736
+ }
737
+ }
738
+
739
+ void ImagingResampleHorizontalConvolution8u(
740
+ uint8_t* C10_RESTRICT lineOut,
741
+ int64_t out_xsize,
742
+ const uint8_t* C10_RESTRICT lineIn,
743
+ int64_t in_xsize,
744
+ const int64_t* idx_ptr_xmin,
745
+ const int64_t* idx_ptr_size,
746
+ const int16_t* kk,
747
+ int kmax,
748
+ unsigned int coefs_precision,
749
+ int64_t num_channels,
750
+ bool is_last_line) {
751
+
752
+ // Interpolation horizontal pass processing only one vertical line.
753
+ // - Input data format is RGBA or RGB with R,G,B,A being uint8. In case of RGBA
754
+ // we can encode 4 values as a single uint32 value.
755
+ // - We split the size of weight vector for a given output index as a sum:
756
+ // ids_size = num_blocks_8 * 8 + num_blocks_4 * 4 + num_blocks_2 * 2 + num_blocks_1
757
+ // - We load and process 8 weights values in a loop ("block 8") then 4 weights and 2 weights values in
758
+ // in another loops ("block 4" and "block 2") and finally we process 1 weight value in the final loop ("block 1").
759
+
760
+ // Define various shuffling masks
761
+ const auto kmask_low = _mm256_set_epi8(
762
+ 11, 10, 9, 8, 11, 10, 9, 8, 11, 10, 9, 8, 11, 10, 9, 8,
763
+ 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0);
764
+ const auto kmask_high = _mm256_set_epi8(
765
+ 15, 14, 13, 12, 15, 14, 13, 12, 15, 14, 13, 12, 15, 14, 13, 12,
766
+ 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4);
767
+ const auto kmask_hl = _mm256_set_epi8(
768
+ 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4, 7, 6, 5, 4,
769
+ 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0);
770
+
771
+ const auto mask_low_c4 = _mm256_set_epi8(
772
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0,
773
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
774
+ const auto mask_high_c4 = _mm256_set_epi8(
775
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
776
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8);
777
+ const auto mask_low_c3 = _mm256_set_epi8(
778
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0,
779
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
780
+ const auto mask_high_c3 = _mm256_set_epi8(
781
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6,
782
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6);
783
+ const auto mask_hl_c3 = _mm256_set_epi8(
784
+ -1, -1, -1, -1, -1, 11, -1, 8, -1, 10, -1, 7, -1, 9, -1, 6,
785
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
786
+ const auto mask_hl_c4 = _mm256_set_epi8(
787
+ -1, 15, -1, 11, -1, 14, -1, 10, -1, 13, -1, 9, -1, 12, -1, 8,
788
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
789
+
790
+ const auto mask_low128_c3 = _mm_set_epi8(
791
+ -1, -1, -1, -1, -1, 5, -1, 2, -1, 4, -1, 1, -1, 3, -1, 0);
792
+ const auto mask_low128_c4 = _mm_set_epi8(
793
+ -1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0);
794
+
795
+ const auto mask_low = (num_channels == 3) ? mask_low_c3 : mask_low_c4;
796
+ const auto mask_high = (num_channels == 3) ? mask_high_c3 : mask_high_c4;
797
+ const auto mask_hl = (num_channels == 3) ? mask_hl_c3 : mask_hl_c4;
798
+ const auto mask_low128 = (num_channels == 3) ? mask_low128_c3 : mask_low128_c4;
799
+
800
+ // out_xsize = output width, out_x = output x index
801
+ // ids_min is the input offset index corresponding to out_x
802
+ // ids_size is the interpolation size for out_x
803
+
804
+ const auto stride = num_channels * sizeof(uint8_t);
805
+ const auto zero = _mm_setzero_si128();
806
+
807
+ TORCH_INTERNAL_ASSERT(stride == 3 || stride == 4);
808
+
809
+ // Let's precompute ids_size limits for block 8, block 4 and block 2
810
+ //
811
+ // In block 8 (8 means we process 8 weight values together), we read at
812
+ // most 32 bytes input data (16 + 16 bytes for RGBA and 12 + 16 bytes for RGB)
813
+ // lineIn + stride * (i + ids_min) + 32 <= lineIn + stride * (ids_size + ids_min)
814
+ // --> i <= ids_size - 32.0 / stride
815
+ // Strict boundary:
816
+ // --> i < ids_size + 1 - int(ceil(32.0 / stride)) = ids_size - b8_delta
817
+ // Soft boundary for reading inside the buffer except its boundaries:
818
+ // --> i < ids_size + 1 - int(32.0 / stride) = ids_size - b8_delta_soft
819
+ // RGBA: b8_delta = b8_delta_soft = 7
820
+ // RGB : b8_delta = 10
821
+ // RGB : b8_delta_soft = 9
822
+ const auto b8_delta = (stride == 4) ? 7 : ((is_last_line) ? 10 : 9);
823
+
824
+ // In block 4 (4 means we process 4 weight values together), we read
825
+ // 16 bytes of input data.
826
+ // lineIn + stride * (i + ids_min) + 16 <= lineIn0 + stride * (ids_size + ids_min)
827
+ // --> i <= ids_size - 16.0 / stride
828
+ // Strict boundary:
829
+ // --> i < ids_size + 1 - int(ceil(16.0 / stride)) = ids_size - b4_delta
830
+ // Soft boundary for reading inside the buffer except its boundaries:
831
+ // --> i < ids_size + 1 - int(16.0 / stride) = ids_size - b4_delta_soft
832
+ // RGBA: b4_delta = b4_delta_soft = 3
833
+ // RGB : b4_delta = 5
834
+ // RGB : b4_delta_soft = 4
835
+ const auto b4_delta = (stride == 4) ? 3 : ((is_last_line) ? 5 : 4);
836
+
837
+ // In block 2 (2 means we process 2 weight values together), we read
838
+ // 8 bytes of input data.
839
+ // lineIn0 + stride * (i + ids_min) + 8 <= lineIn0 + stride * (ids_size + ids_min)
840
+ // --> i <= ids_size - 8.0 / stride
841
+ // Strict boundary:
842
+ // --> i < ids_size + 1 - int(ceil(8.0 / stride)) = ids_size - b2_delta
843
+ // Soft boundary for reading inside the buffer except its boundaries:
844
+ // --> i < ids_size + 1 - int(8.0 / stride) = ids_size - b2_delta_soft
845
+ // RGBA: b2_delta = b2_delta_soft = 1
846
+ // RGB : b2_delta = 2
847
+ // RGB : b2_delta_soft = 1
848
+ const auto b2_delta = (stride == 4) ? 1 : ((is_last_line) ? 2 : 1);
849
+
850
+ const auto max_out_x_strided = out_xsize * stride;
851
+ const auto max_in_x_strided = in_xsize * stride;
852
+
853
+ for (const auto out_x : c10::irange(out_xsize)) {
854
+ __m128i sss;
855
+ const auto ids_min = idx_ptr_xmin[out_x];
856
+ const auto ids_size = idx_ptr_size[out_x];
857
+ const auto * k = &kk[out_x * kmax];
858
+ int64_t i = 0;
859
+
860
+ const auto * lineIn_min = lineIn + ids_min;
861
+
862
+ if (ids_size < 8) {
863
+ sss = _mm_set1_epi32(1 << (coefs_precision - 1));
864
+ } else {
865
+ // Lower part will be added to higher, use only half of the error
866
+ auto sss256 = _mm256_set1_epi32(1 << (coefs_precision - 2));
867
+
868
+ // block 8
869
+ for (; i < ids_size - b8_delta; i += 8) {
870
+ // Load 8 values from weight vector
871
+ auto tmp = _mm_loadu_si128((__m128i*)&k[i]);
872
+ // ksource = [
873
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 wl_4 wh_4 wl_5 wh_5 wl_6 wh_6 wl_7 wh_7
874
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 wl_4 wh_4 wl_5 wh_5 wl_6 wh_6 wl_7 wh_7
875
+ // ]
876
+ auto ksource = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
877
+
878
+ // RGBA: Load 8 pixels from input:
879
+ // source = [
880
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
881
+ // r4 g4 b4 a4 r5 g5 b5 a5 r6 g6 b6 a6 r7 g7 b7 a7
882
+ // ]
883
+ // RGB: Load 10 pixels from input (however we can process only 8 pixels):
884
+ // source = [
885
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
886
+ // r4 g4 b4 r5 g5 b5 r6 g6 b6 r7 g7 b7 r8 g8 b8 r9
887
+ // ]
888
+ auto source = _mm256_inserti128_si256(_mm256_castsi128_si256(
889
+ _mm_loadu_si128((__m128i *) (lineIn_min + stride * i))),
890
+ _mm_loadu_si128((__m128i *) (lineIn_min + stride * (i + 4))), 1);
891
+
892
+ // Extract lower part of each lane, cast to epi16 and reoder RGBARGBA -> RRGGBBAA
893
+ // RGBA: pix1 = [
894
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0
895
+ // r4 0 r5 0 g4 0 g5 0 b4 0 b5 0 a4 0 a5 0
896
+ // ]
897
+ // RGB: pix1 = [
898
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0
899
+ // r4 0 r5 0 g4 0 g5 0 b4 0 b5 0 0 0 0 0
900
+ // ]
901
+ auto pix1 = _mm256_shuffle_epi8(source, mask_low);
902
+ // mmk1 = [
903
+ // wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ...
904
+ // wl_4 wh_4 wl_5 wh_5 wl_4 wh_4 wl_5 wh_5 ... ...
905
+ // ]
906
+ auto mmk1 = _mm256_shuffle_epi8(ksource, kmask_low);
907
+ // Compute output value as
908
+ // C += w0 * C0 + w1 * C1
909
+ // C += w4 * C4 + w5 * C5 for each channel in 32-bit precision
910
+ sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix1, mmk1));
911
+
912
+ // Same as above for higher part of each lane
913
+ auto pix2 = _mm256_shuffle_epi8(source, mask_high);
914
+ auto mmk2 = _mm256_shuffle_epi8(ksource, kmask_high);
915
+ // Compute output value as
916
+ // C += w2 * C2 + w3 * C3
917
+ // C += w6 * C6 + w7 * C7 for each channel in 32-bit precision
918
+ sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix2, mmk2));
919
+ }
920
+
921
+ // block 4
922
+ for (; i < ids_size - b4_delta; i += 4) {
923
+ // Load 4 values from weight vector
924
+ auto tmp = _mm_loadl_epi64((__m128i *) &k[i]);
925
+ // ksource = [
926
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 0 0 0 0 0 0 0 0
927
+ // wl_0 wh_0 wl_1 wh_1 wl_2 wh_2 wl_3 wh_3 0 0 0 0 0 0 0 0
928
+ // ]
929
+ auto ksource = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
930
+
931
+ // Load pixels from input line
932
+ tmp = _mm_loadu_si128((__m128i *) (lineIn_min + stride * i));
933
+ // RGBA: source = [
934
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
935
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
936
+ // ]
937
+ // RGB: source = [
938
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
939
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
940
+ // ]
941
+ auto source = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
942
+
943
+ // Cast source to epi16 and reorder RGBARGBA -> RRGGBBAA
944
+ // RGBA: pix = [
945
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 a0 0 a1 0
946
+ // r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 a2 0 a3 0
947
+ // ]
948
+ // RGB: pix = [
949
+ // r0 0 r1 0 g0 0 g1 0 b0 0 b1 0 0 0 0 0
950
+ // r2 0 r3 0 g2 0 g3 0 b2 0 b3 0 0 0 0 0
951
+ // ]
952
+ auto pix = _mm256_shuffle_epi8(source, mask_hl);
953
+ // mmk = [
954
+ // wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ...
955
+ // wl_2 wh_2 wl_3 wh_3 wl_2 wh_2 wl_3 wh_3 ... ...
956
+ // ]
957
+ auto mmk = _mm256_shuffle_epi8(ksource, kmask_hl);
958
+ // Compute output value as
959
+ // C += w0 * C0 + w1 * C1
960
+ // C += w2 * C2 + w3 * C3 for each channel in 32-bit precision
961
+ sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
962
+ }
963
+
964
+ // Sum results between the lanes
965
+ sss = _mm_add_epi32(
966
+ _mm256_extracti128_si256(sss256, 0),
967
+ _mm256_extracti128_si256(sss256, 1));
968
+ }
969
+
970
+ // block 2
971
+ for (; i < ids_size - b2_delta; i += 2) {
972
+ // Load 2 values from weight vector
973
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ...]
974
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
975
+ // Load pixels from input line
976
+ // RGBA: source = [
977
+ // r0 g0 b0 a0 r1 g1 b1 a1 0 0 0 0 0 0 0 0
978
+ // ]
979
+ // RGB: source = [
980
+ // r0 g0 b0 r1 g1 b1 r2 g2 0 0 0 0 0 0 0 0
981
+ // ]
982
+ auto source = _mm_loadl_epi64((__m128i *) (lineIn_min + stride * i));
983
+ // Cast source to epi16 and reorder RGBARGBA -> RRGGBBAA
984
+ auto pix = _mm_shuffle_epi8(source, mask_low128);
985
+ // Compute output value as C += w0 * C0 + w1 * C1 for each channel in 32-bit precision
986
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
987
+ }
988
+
989
+ // block 1
990
+ const auto i32_aligned = num_channels == 4;
991
+ for (; i < ids_size - 1; i++) {
992
+ // Load 1 value from weight vector
993
+ // mmk = [wl_0 wh_0 0 0 wl_0 wh_0 0 0 ...]
994
+ auto mmk = _mm_set1_epi32(k[i]);
995
+ // Load one pixel from input line
996
+ // RGBA: pix = [
997
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 a0 0 0 0
998
+ // ]
999
+ // RGB: pix = [
1000
+ // r0 0 0 0 g0 0 0 0 b0 0 0 0 r1 0 0 0
1001
+ // ]
1002
+ auto pix = mm_cvtepu8_epi32(lineIn_min + stride * i, i32_aligned);
1003
+ // Compute output value as C += w0 * C0 for each channel in 32-bit precision
1004
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1005
+ }
1006
+
1007
+ if (i == ids_size - 1) {
1008
+ // last element
1009
+ auto mmk = _mm_set1_epi32(k[i]);
1010
+ __m128i pix;
1011
+ auto p = lineIn_min + stride * i;
1012
+ if (num_channels == 3 && C10_UNLIKELY(is_last_line && ids_min + stride * i + 4 >= max_in_x_strided)) {
1013
+ uint8_t input[4];
1014
+ std::memcpy(input, p, 3);
1015
+ pix = mm_cvtepu8_epi32(input, true);
1016
+ } else {
1017
+ pix = mm_cvtepu8_epi32(p, i32_aligned);
1018
+ }
1019
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1020
+ }
1021
+
1022
+ // Convert fixed point values back to integers (truncating)
1023
+ sss = _mm_srai_epi32(sss, coefs_precision);
1024
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1025
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d 0 0 0 0 0 0 0 0)
1026
+ sss = _mm_packs_epi32(sss, zero);
1027
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1028
+ // (a a b b c c d d) -> (a b c d 0 0 0 0)
1029
+ sss = _mm_packus_epi16(sss, zero);
1030
+ // Write the output into single uint32
1031
+ // (a b c d) -> x_uint32
1032
+ auto o = _mm_cvtsi128_si32(sss);
1033
+ const auto out_x_strided = stride * out_x;
1034
+ if (num_channels == 3 && C10_UNLIKELY(out_x_strided + 4 >= max_out_x_strided)) {
1035
+ if (C10_UNLIKELY(is_last_line)) {
1036
+ // When we handle the last line, we can not access the next 4 bytes
1037
+ // as they are out of memory bounds.
1038
+ std::memcpy(lineOut + out_x_strided, (uint8_t *) &o, 3);
1039
+ } else {
1040
+ // Memcpy 4-bytes is faster than 3-bytes and this is a boundary case when we want to write
1041
+ // 4 bytes (R G B | X) to the output buffer (X1 X2 X3 | R1).
1042
+ // The 4th byte in the register (X) has a garbage value and 4th byte in the output buffer (R1) has a correct
1043
+ // value which was previously computed by another line. In other words, it means that we can not overwrite
1044
+ // it by simply writing 4 bytes from the register to the output. We'll do the following:
1045
+ // v----------|
1046
+ // Output = [... X1 X2 X3 | R1 G1 B1 R2 ...]
1047
+ // First, we write R1 value to the 4th byte of (R G B | X) -> (R G B | R1)
1048
+ // Second, we write 4 bytes from the register to the output: (X1 X2 X3 | R1) -> (R G B | R1)
1049
+ // Output = [... R G B | R1 G1 B1 R2 ...]
1050
+ _write_endline_rgb_as_uint32(lineOut + out_x_strided, o);
1051
+ }
1052
+ } else if (num_channels == 3) {
1053
+ // Memcpy 4-bytes is faster than 3-bytes and here
1054
+ // we simply write 4 bytes (... R G B X 0 0 0 0 0 ...) where X is a garbage value
1055
+ // that we will overwrite on the next iteration: (... R G B R G B X 0 0 ...)
1056
+ std::memcpy(lineOut + out_x_strided, (uint8_t *) &o, 4);
1057
+ } else {
1058
+ // num_channels = 4 -> lineOut + out_x_strided should be uint32 aligned
1059
+ *(uint32_t *)(lineOut + out_x_strided) = o;
1060
+ }
1061
+ }
1062
+ }
1063
+
1064
+ void ImagingResampleVerticalConvolution8u(
1065
+ uint8_t* C10_RESTRICT lineOut,
1066
+ const uint8_t* C10_RESTRICT lineIn,
1067
+ int64_t xsize,
1068
+ int64_t ids_min,
1069
+ int64_t ids_size,
1070
+ const int16_t* k,
1071
+ unsigned int coefs_precision,
1072
+ int64_t num_channels) {
1073
+
1074
+ // Interpolation vertical pass processing one line.
1075
+ // - We process x-axis data with blocks of 8, 2 and 1
1076
+ // - We split the size of weight vector for a given output index as a sum: K = n * 2 + m.
1077
+
1078
+ // xsize = output width, also equals to input width
1079
+ // ids_size = interpolation size
1080
+ // ids_min = input y start index
1081
+ const auto stride = num_channels * sizeof(uint8_t);
1082
+
1083
+ TORCH_INTERNAL_ASSERT(stride == 3 || stride == 4);
1084
+
1085
+ const int64_t data_size = xsize * stride;
1086
+ const int64_t data_stride = stride;
1087
+ constexpr auto vec_size = 256 / 8;
1088
+
1089
+ const auto initial = _mm_set1_epi32(1 << (coefs_precision - 1));
1090
+ const auto initial_256 = _mm256_set1_epi32(1 << (coefs_precision - 1));
1091
+ const auto zero = _mm_setzero_si128();
1092
+ const auto zero_256 = _mm256_setzero_si256();
1093
+
1094
+ int64_t j = 0;
1095
+ // block 8
1096
+ const auto b8_usable_vec_stride = (vec_size / data_stride) * data_stride;
1097
+ for (; j < data_size - vec_size; j += b8_usable_vec_stride) {
1098
+ auto sss0 = initial_256;
1099
+ auto sss1 = initial_256;
1100
+ auto sss2 = initial_256;
1101
+ auto sss3 = initial_256;
1102
+ int64_t i = 0;
1103
+ const auto * lineIn_min = lineIn + j + ids_min;
1104
+
1105
+ for (; i < ids_size - 1; i += 2) {
1106
+ // Load 2 values from weight vector
1107
+ auto mmk = _mm256_set1_epi32(*(int32_t*)&k[i]);
1108
+
1109
+ // RGBA: Load 8 pixels per line
1110
+ // source1 = [
1111
+ // r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
1112
+ // r4 g4 b4 a4 r5 g5 b5 a5 r6 g6 b6 a6 r7 g7 b7 a7
1113
+ // ]
1114
+ // RGB: Load 10 pixels per line (however we can process only 8 pixels):
1115
+ // source1 = [
1116
+ // r0 g0 b0 r1 g1 b1 r2 g2 b2 r3 g3 b3 r4 g4 b4 r5
1117
+ // r4 g4 b4 r5 g5 b5 r6 g6 b6 r7 g7 b7 r8 g8 b8 r9
1118
+ // ]
1119
+ auto source1 =
1120
+ _mm256_loadu_si256((__m256i*)(lineIn_min + data_size * i));
1121
+ auto source2 =
1122
+ _mm256_loadu_si256((__m256i*)(lineIn_min + data_size * (i + 1)));
1123
+
1124
+ // Interleave source1 and source2 from the low half of each 128-bit lane
1125
+ // and cast the result to epi16
1126
+ // RGBA: pix1 = [
1127
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 a0 0 A0 0
1128
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 a1 0 A1 0
1129
+ // ]
1130
+ // RGB: pix1 = [
1131
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 0 0 0 0
1132
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 0 0 0 0
1133
+ // ]
1134
+ auto source_lo = _mm256_unpacklo_epi8(source1, source2);
1135
+ auto pix1 = _mm256_unpacklo_epi8(source_lo, zero_256);
1136
+ // Compute output value as
1137
+ // C += w0 * c0 + w1 * C0
1138
+ // C += w0 * c1 + w1 * C1 for each channel in 32-bit precision
1139
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
1140
+
1141
+ // RGBA: pix2 = [
1142
+ // r2 0 R2 0 g2 0 G2 0 b2 0 B2 0 a2 0 A2 0
1143
+ // r3 0 R3 0 g3 0 G3 0 b3 0 B3 0 a3 0 A3 0
1144
+ // ]
1145
+ // RGB: pix2 = [
1146
+ // r2 0 R2 0 g2 0 G2 0 b2 0 B2 0 0 0 0 0
1147
+ // r3 0 R3 0 g3 0 G3 0 b3 0 B3 0 0 0 0 0
1148
+ // ]
1149
+ auto pix2 = _mm256_unpackhi_epi8(source_lo, zero_256);
1150
+ // Compute output value as
1151
+ // C += w0 * c2 + w1 * C2
1152
+ // C += w0 * c3 + w1 * C3 for each channel in 32-bit precision
1153
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
1154
+
1155
+ // Same as above for the high half of each 128-bit lane
1156
+ auto source_hi = _mm256_unpackhi_epi8(source1, source2);
1157
+ auto pix3 = _mm256_unpacklo_epi8(source_hi, zero_256);
1158
+ sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix3, mmk));
1159
+ auto pix4 = _mm256_unpackhi_epi8(source_hi, zero_256);
1160
+ sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix4, mmk));
1161
+ }
1162
+ // Same processing as above but with a single weight value
1163
+ for (; i < ids_size; i += 1) {
1164
+ auto mmk = _mm256_set1_epi32(k[i]);
1165
+
1166
+ auto source1 = _mm256_loadu_si256((__m256i*)(lineIn_min + i * data_size));
1167
+
1168
+ auto source_lo = _mm256_unpacklo_epi8(source1, zero_256);
1169
+ auto pix1 = _mm256_unpacklo_epi8(source_lo, zero_256);
1170
+ sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix1, mmk));
1171
+ auto pix2 = _mm256_unpackhi_epi8(source_lo, zero_256);
1172
+ sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix2, mmk));
1173
+
1174
+ auto source_hi = _mm256_unpackhi_epi8(source1, zero_256);
1175
+ auto pix3 = _mm256_unpacklo_epi8(source_hi, _mm256_setzero_si256());
1176
+ sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix3, mmk));
1177
+ auto pix4 = _mm256_unpackhi_epi8(source_hi, _mm256_setzero_si256());
1178
+ sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix4, mmk));
1179
+ }
1180
+ // Convert fixed point values back to integers (truncating)
1181
+ sss0 = _mm256_srai_epi32(sss0, coefs_precision);
1182
+ sss1 = _mm256_srai_epi32(sss1, coefs_precision);
1183
+ sss2 = _mm256_srai_epi32(sss2, coefs_precision);
1184
+ sss3 = _mm256_srai_epi32(sss3, coefs_precision);
1185
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1186
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d)
1187
+ sss0 = _mm256_packs_epi32(sss0, sss1);
1188
+ sss2 = _mm256_packs_epi32(sss2, sss3);
1189
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1190
+ // (a a b b c c d d) -> (a b c d)
1191
+ sss0 = _mm256_packus_epi16(sss0, sss2);
1192
+
1193
+ // Stores 32 bytes
1194
+ _mm256_storeu_si256((__m256i*)(lineOut + j), sss0);
1195
+ }
1196
+
1197
+ // TODO: Do we also need block 4 ???
1198
+ // block 2
1199
+ const auto b2_usable_vec_stride = (8 / data_stride) * data_stride;
1200
+ for (; j < data_size - vec_size / 4; j += b2_usable_vec_stride) {
1201
+ auto sss0 = initial;
1202
+ auto sss1 = initial;
1203
+ int64_t i = 0;
1204
+ const auto * lineIn_min = lineIn + j + ids_min;
1205
+
1206
+ for (; i < ids_size - 1; i += 2) {
1207
+ // Load 2 values from weight vector
1208
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ]
1209
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
1210
+
1211
+ // Load 2 pixels per line
1212
+ // RGBA: source1 = [
1213
+ // r0 g0 b0 a0 r1 g1 b1 a1 0 0 0 0 0 0 0 0
1214
+ // ]
1215
+ // RGB: source1 = [
1216
+ // r0 g0 b0 r1 g1 b1 r2 g2 0 0 0 0 0 0 0 0
1217
+ // ]
1218
+ auto source1 = _mm_loadl_epi64((__m128i *) (lineIn_min + i * data_size));
1219
+ auto source2 = _mm_loadl_epi64((__m128i *) (lineIn_min + (i + 1) * data_size));
1220
+ // Interleave source1 and source2 and cast the result to epi16
1221
+ // RGBA: pix = [
1222
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 a0 0 A0 0
1223
+ // ]
1224
+ // RGB: pix = [
1225
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 0 0 0 0
1226
+ // ]
1227
+ auto source = _mm_unpacklo_epi8(source1, source2);
1228
+ auto pix = _mm_unpacklo_epi8(source, zero);
1229
+ // Compute output value as C += w0 * c0 + w1 * C0 for each channel in 32-bit precision
1230
+ sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix, mmk));
1231
+ // RGBA: pix = [
1232
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 a1 0 A1 0
1233
+ // ]
1234
+ // RGB: pix = [
1235
+ // r1 0 R1 0 g1 0 G1 0 b1 0 B1 0 0 0 0 0
1236
+ // ]
1237
+ pix = _mm_unpackhi_epi8(source, zero);
1238
+ // Compute output value as C += w0 * c1 + w1 * C1 for each channel in 32-bit precision
1239
+ sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix, mmk));
1240
+ }
1241
+ // Same processing as above but with a single weight value
1242
+ for (; i < ids_size; i += 1) {
1243
+ auto mmk = _mm_set1_epi32(k[i]);
1244
+
1245
+ auto source1 = _mm_loadl_epi64((__m128i*) (lineIn_min + i * data_size));
1246
+
1247
+ auto source = _mm_unpacklo_epi8(source1, zero);
1248
+ auto pix1 = _mm_unpacklo_epi8(source, zero);
1249
+ sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix1, mmk));
1250
+ auto pix2 = _mm_unpackhi_epi8(source, zero);
1251
+ sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix2, mmk));
1252
+ }
1253
+ // Convert fixed point values back to integers (truncating)
1254
+ sss0 = _mm_srai_epi32(sss0, coefs_precision);
1255
+ sss1 = _mm_srai_epi32(sss1, coefs_precision);
1256
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1257
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d)
1258
+ sss0 = _mm_packs_epi32(sss0, sss1);
1259
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1260
+ // (a a b b c c d d) -> (a b c d)
1261
+ sss0 = _mm_packus_epi16(sss0, sss0);
1262
+ // Store 2 pixels to the output
1263
+ _mm_storel_epi64((__m128i*)(lineOut + j), sss0);
1264
+ }
1265
+
1266
+ // block 1
1267
+ const auto b1_usable_vec_stride = (4 / data_stride) * data_stride;
1268
+ const auto i32_aligned = num_channels == 4;
1269
+ for (; j < data_size - 4; j += b1_usable_vec_stride) {
1270
+ auto sss = initial;
1271
+ int64_t i = 0;
1272
+ const auto * lineIn_min = lineIn + j + ids_min;
1273
+
1274
+ for (; i < ids_size - 1; i += 2) {
1275
+ // Load 2 values from weight vector
1276
+ // mmk = [wl_0 wh_0 wl_1 wh_1 wl_0 wh_0 wl_1 wh_1 ... ]
1277
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
1278
+
1279
+ // Load one pixel per line
1280
+ // RGBA: source1 = [
1281
+ // r0 g0 b0 a0 0 0 0 0 0 0 0 0 0 0 0 0
1282
+ // ]
1283
+ // RGB: source1 = [
1284
+ // r0 g0 b0 r1 0 0 0 0 0 0 0 0 0 0 0 0
1285
+ // ]
1286
+ auto source1 = mm_cvtsi32_si128(lineIn_min + i * data_size, i32_aligned);
1287
+ auto source2 = mm_cvtsi32_si128(lineIn_min + (i + 1) * data_size, i32_aligned);
1288
+
1289
+ // Interleave source1 and source2 and cast the result to epi16
1290
+ // RGBA: pix = [
1291
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 a0 0 A0 0
1292
+ // ]
1293
+ // RGB: pix = [
1294
+ // r0 0 R0 0 g0 0 G0 0 b0 0 B0 0 0 0 0 0
1295
+ // ]
1296
+ auto source = _mm_unpacklo_epi8(source1, source2);
1297
+ auto pix = _mm_unpacklo_epi8(source, zero);
1298
+ // Compute output value as C += w0 * c0 + w1 * C0 for each channel in 32-bit precision
1299
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1300
+ }
1301
+
1302
+ for (; i < ids_size; i++) {
1303
+ auto mmk = _mm_set1_epi32(k[i]);
1304
+ auto pix = mm_cvtepu8_epi32(lineIn_min + i * data_size, i32_aligned);
1305
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1306
+ }
1307
+ sss = _mm_srai_epi32(sss, coefs_precision);
1308
+ sss = _mm_packs_epi32(sss, zero);
1309
+ sss = _mm_packus_epi16(sss, zero);
1310
+
1311
+ auto o = _mm_cvtsi128_si32(sss);
1312
+
1313
+ // Here we write 4 bytes to the output even if num_channels < 4, e.g o = {r,g,b,X} for num_channels=3
1314
+ // It is OK to write 4th byte (e.g. X) as on the next step we will overwrite it with new data.
1315
+ // We also wont go out of bounds of lineOut memory allocation
1316
+ std::memcpy(lineOut + j, (uint8_t *) &o, 4);
1317
+ }
1318
+
1319
+ for (; j < data_size; j += data_stride) {
1320
+ auto sss = initial;
1321
+ int64_t i = 0;
1322
+ const auto * lineIn_min = lineIn + j + ids_min;
1323
+ // For RGBA we can use (ids_size - 1) as tighter limit but for RGB we can read outside memory boundary
1324
+ // for the last remaining line
1325
+ for (; i < ids_size - 2; i += 2) {
1326
+ // Load two coefficients at once
1327
+ auto mmk = _mm_set1_epi32(*(int32_t*)&k[i]);
1328
+
1329
+ // Load 2 lines
1330
+ auto source1 = mm_cvtsi32_si128(lineIn_min + i * data_size, i32_aligned);
1331
+ auto source2 = mm_cvtsi32_si128(lineIn_min + (i + 1) * data_size, i32_aligned);
1332
+
1333
+ auto source = _mm_unpacklo_epi8(source1, source2);
1334
+ auto pix = _mm_unpacklo_epi8(source, zero);
1335
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1336
+ }
1337
+
1338
+ // Same processing as above but with a single weight value
1339
+ for (; i < ids_size; i++) {
1340
+ auto mmk = _mm_set1_epi32(k[i]);
1341
+
1342
+ const uint8_t * p = lineIn_min + i * data_size;
1343
+ __m128i pix;
1344
+ // There is no much perf gain using more detailed condition like
1345
+ // num_channels == 3 && ids_min + j + data_size * i + 4 >= in_max_size
1346
+ // const int64_t in_max_size = data_size * in_ysize;
1347
+ if (num_channels == 3) {
1348
+ uint8_t input[4];
1349
+ std::memcpy(input, p, 3);
1350
+ pix = mm_cvtepu8_epi32(input, true);
1351
+ } else {
1352
+ pix = mm_cvtepu8_epi32(p, true);
1353
+ }
1354
+ sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
1355
+ }
1356
+
1357
+ // Convert fixed point values back to integers (truncating)
1358
+ sss = _mm_srai_epi32(sss, coefs_precision);
1359
+ // Convert packed signed 32-bit integers to packed 16-bit integers using signed saturation
1360
+ // (a a a a b b b b c c c c d d d d) -> (a a b b c c d d)
1361
+ sss = _mm_packs_epi32(sss, zero);
1362
+ // Convert packed signed 16-bit integers to packed 8-bit integers using unsigned saturation
1363
+ // (a a b b c c d d) -> (a b c d)
1364
+ sss = _mm_packus_epi16(sss, zero);
1365
+ // Store one pixel to the output
1366
+ auto o = _mm_cvtsi128_si32(sss);
1367
+ if (num_channels == 3 && C10_UNLIKELY(j + 4 >= data_size)) {
1368
+ std::memcpy(lineOut + j, (uint8_t *) &o, 3);
1369
+ } else {
1370
+ std::memcpy(lineOut + j, (uint8_t *) &o, 4);
1371
+ }
1372
+ }
1373
+ }
1374
+
1375
+ } // anonymous namespace
1376
+ #endif // CPU_CAPABILITY_AVX2
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/WeightNormKernel.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <cstdint>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at { namespace native {
10
+
11
+ using weight_norm_fn = void(*)(
12
+ TensorBase&, TensorBase&, const TensorBase&, const TensorBase&, int64_t);
13
+ using weight_norm_backward_fn = void(*)(
14
+ TensorBase&, TensorBase&, const TensorBase&, const TensorBase&,
15
+ const TensorBase&, const TensorBase&, int64_t);
16
+
17
+ DECLARE_DISPATCH(weight_norm_fn, weight_norm_stub);
18
+ DECLARE_DISPATCH(weight_norm_backward_fn, weight_norm_backward_stub);
19
+
20
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/avx_mathfun.h ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ /*
3
+ AVX implementation of sin, cos, sincos, exp and log
4
+
5
+ Based on "sse_mathfun.h", by Julien Pommier
6
+ http://gruntthepeon.free.fr/ssemath/
7
+
8
+ Copyright (C) 2012 Giovanni Garberoglio
9
+ Interdisciplinary Laboratory for Computational Science (LISC)
10
+ Fondazione Bruno Kessler and University of Trento
11
+ via Sommarive, 18
12
+ I-38123 Trento (Italy)
13
+
14
+ This software is provided 'as-is', without any express or implied
15
+ warranty. In no event will the authors be held liable for any damages
16
+ arising from the use of this software.
17
+
18
+ Permission is granted to anyone to use this software for any purpose,
19
+ including commercial applications, and to alter it and redistribute it
20
+ freely, subject to the following restrictions:
21
+
22
+ 1. The origin of this software must not be misrepresented; you must not
23
+ claim that you wrote the original software. If you use this software
24
+ in a product, an acknowledgment in the product documentation would be
25
+ appreciated but is not required.
26
+ 2. Altered source versions must be plainly marked as such, and must not be
27
+ misrepresented as being the original software.
28
+ 3. This notice may not be removed or altered from any source distribution.
29
+
30
+ (this is the zlib license)
31
+ */
32
+
33
+ #include <ATen/native/cpu/Intrinsics.h>
34
+
35
+ /* The original source of this file has been modified. */
36
+ #if defined(CPU_CAPABILITY_AVX2)
37
+
38
+ #if defined(__GNUC__)
39
+ # define ALIGN32_BEG __attribute__((aligned(32)))
40
+ #elif defined(_WIN32)
41
+ # define ALIGN32_BEG __declspec(align(32))
42
+ #endif
43
+
44
+ typedef __m256 v8sf; // vector of 8 float (avx2)
45
+ typedef __m256i v8si; // vector of 8 int (avx2)
46
+
47
+ /* declare some AVX constants -- why can't I figure a better way to do that? */
48
+ #define _PS256_CONST(Name, Val) \
49
+ static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
50
+ #define _PI32_CONST256(Name, Val) \
51
+ static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
52
+ #define _PS256_CONST_TYPE(Name, Type, Val) \
53
+ static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
54
+
55
+ _PS256_CONST(1 , 1.0f);
56
+ _PS256_CONST(0p5, 0.5f);
57
+ /* the smallest non denormalized float number */
58
+ _PS256_CONST_TYPE(min_norm_pos, int, 0x00800000);
59
+ _PS256_CONST_TYPE(mant_mask, int, 0x7f800000);
60
+ _PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
61
+
62
+ _PS256_CONST_TYPE(sign_mask, int, (int)0x80000000);
63
+ _PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
64
+
65
+ _PI32_CONST256(0, 0);
66
+ _PI32_CONST256(1, 1);
67
+ _PI32_CONST256(inv1, ~1);
68
+ _PI32_CONST256(2, 2);
69
+ _PI32_CONST256(4, 4);
70
+ _PI32_CONST256(0x7f, 0x7f);
71
+
72
+ _PS256_CONST(cephes_SQRTHF, 0.707106781186547524);
73
+ _PS256_CONST(cephes_log_p0, 7.0376836292E-2);
74
+ _PS256_CONST(cephes_log_p1, - 1.1514610310E-1);
75
+ _PS256_CONST(cephes_log_p2, 1.1676998740E-1);
76
+ _PS256_CONST(cephes_log_p3, - 1.2420140846E-1);
77
+ _PS256_CONST(cephes_log_p4, + 1.4249322787E-1);
78
+ _PS256_CONST(cephes_log_p5, - 1.6668057665E-1);
79
+ _PS256_CONST(cephes_log_p6, + 2.0000714765E-1);
80
+ _PS256_CONST(cephes_log_p7, - 2.4999993993E-1);
81
+ _PS256_CONST(cephes_log_p8, + 3.3333331174E-1);
82
+ _PS256_CONST(cephes_log_q1, -2.12194440e-4);
83
+ _PS256_CONST(cephes_log_q2, 0.693359375);
84
+
85
+
86
+ /* natural logarithm computed for 8 simultaneous float
87
+ return NaN for x <= 0
88
+ */
89
+ inline v8sf log256_ps(v8sf x) {
90
+ v8si imm0;
91
+ v8sf one = *(v8sf*)_ps256_1;
92
+
93
+ //v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
94
+ v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
95
+
96
+ x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
97
+
98
+ // can be done with AVX2
99
+ imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23);
100
+
101
+ /* keep only the fractional part */
102
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
103
+ x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
104
+
105
+ // this is again another AVX2 instruction
106
+ imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
107
+ v8sf e = _mm256_cvtepi32_ps(imm0);
108
+
109
+ e = _mm256_add_ps(e, one);
110
+
111
+ /* part2:
112
+ if( x < SQRTHF ) {
113
+ e -= 1;
114
+ x = x + x - 1.0;
115
+ } else { x = x - 1.0; }
116
+ */
117
+ //v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
118
+ v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
119
+ v8sf tmp = _mm256_and_ps(x, mask);
120
+ x = _mm256_sub_ps(x, one);
121
+ e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
122
+ x = _mm256_add_ps(x, tmp);
123
+
124
+ v8sf z = _mm256_mul_ps(x,x);
125
+
126
+ v8sf y = *(v8sf*)_ps256_cephes_log_p0;
127
+ y = _mm256_mul_ps(y, x);
128
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
129
+ y = _mm256_mul_ps(y, x);
130
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
131
+ y = _mm256_mul_ps(y, x);
132
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
133
+ y = _mm256_mul_ps(y, x);
134
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
135
+ y = _mm256_mul_ps(y, x);
136
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
137
+ y = _mm256_mul_ps(y, x);
138
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
139
+ y = _mm256_mul_ps(y, x);
140
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
141
+ y = _mm256_mul_ps(y, x);
142
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
143
+ y = _mm256_mul_ps(y, x);
144
+
145
+ y = _mm256_mul_ps(y, z);
146
+
147
+ tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
148
+ y = _mm256_add_ps(y, tmp);
149
+
150
+
151
+ tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
152
+ y = _mm256_sub_ps(y, tmp);
153
+
154
+ tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
155
+ x = _mm256_add_ps(x, y);
156
+ x = _mm256_add_ps(x, tmp);
157
+ x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
158
+ return x;
159
+ }
160
+
161
+ _PS256_CONST(exp_hi, 88.3762626647949f);
162
+ _PS256_CONST(exp_lo, -88.3762626647949f);
163
+
164
+ _PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
165
+ _PS256_CONST(cephes_exp_C1, 0.693359375);
166
+ _PS256_CONST(cephes_exp_C2, -2.12194440e-4);
167
+
168
+ _PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
169
+ _PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
170
+ _PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
171
+ _PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
172
+ _PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
173
+ _PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
174
+
175
+ inline v8sf exp256_ps(v8sf x) {
176
+ v8sf tmp = _mm256_setzero_ps(), fx;
177
+ v8si imm0;
178
+ v8sf one = *(v8sf*)_ps256_1;
179
+
180
+ x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
181
+ x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
182
+
183
+ /* express exp(x) as exp(g + n*log(2)) */
184
+ fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
185
+ fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
186
+
187
+ /* how to perform a floorf with SSE: just below */
188
+ //imm0 = _mm256_cvttps_epi32(fx);
189
+ //tmp = _mm256_cvtepi32_ps(imm0);
190
+
191
+ tmp = _mm256_floor_ps(fx);
192
+
193
+ /* if greater, subtract 1 */
194
+ //v8sf mask = _mm256_cmpgt_ps(tmp, fx);
195
+ v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
196
+ mask = _mm256_and_ps(mask, one);
197
+ fx = _mm256_sub_ps(tmp, mask);
198
+
199
+ tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
200
+ v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
201
+ x = _mm256_sub_ps(x, tmp);
202
+ x = _mm256_sub_ps(x, z);
203
+
204
+ z = _mm256_mul_ps(x,x);
205
+
206
+ v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
207
+ y = _mm256_mul_ps(y, x);
208
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
209
+ y = _mm256_mul_ps(y, x);
210
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
211
+ y = _mm256_mul_ps(y, x);
212
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
213
+ y = _mm256_mul_ps(y, x);
214
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
215
+ y = _mm256_mul_ps(y, x);
216
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
217
+ y = _mm256_mul_ps(y, z);
218
+ y = _mm256_add_ps(y, x);
219
+ y = _mm256_add_ps(y, one);
220
+
221
+ /* build 2^n */
222
+ imm0 = _mm256_cvttps_epi32(fx);
223
+ // another two AVX2 instructions
224
+ imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
225
+ imm0 = _mm256_slli_epi32(imm0, 23);
226
+ v8sf pow2n = _mm256_castsi256_ps(imm0);
227
+ y = _mm256_mul_ps(y, pow2n);
228
+ return y;
229
+ }
230
+
231
+ _PS256_CONST(minus_cephes_DP1, -0.78515625);
232
+ _PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
233
+ _PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
234
+ _PS256_CONST(sincof_p0, -1.9515295891E-4);
235
+ _PS256_CONST(sincof_p1, 8.3321608736E-3);
236
+ _PS256_CONST(sincof_p2, -1.6666654611E-1);
237
+ _PS256_CONST(coscof_p0, 2.443315711809948E-005);
238
+ _PS256_CONST(coscof_p1, -1.388731625493765E-003);
239
+ _PS256_CONST(coscof_p2, 4.166664568298827E-002);
240
+ _PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
241
+
242
+
243
+ /* evaluation of 8 sines at onces using AVX intrinsics
244
+
245
+ The code is the exact rewriting of the cephes sinf function.
246
+ Precision is excellent as long as x < 8192 (I did not bother to
247
+ take into account the special handling they have for greater values
248
+ -- it does not return garbage for arguments over 8192, though, but
249
+ the extra precision is missing).
250
+
251
+ Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
252
+ surprising but correct result.
253
+
254
+ */
255
+ inline v8sf sin256_ps(v8sf x) { // any x
256
+ v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
257
+ v8si imm0, imm2;
258
+
259
+ sign_bit = x;
260
+ /* take the absolute value */
261
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
262
+ /* extract the sign bit (upper one) */
263
+ sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
264
+
265
+ /* scale by 4/Pi */
266
+ y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
267
+
268
+ /*
269
+ Here we start a series of integer operations, which are in the
270
+ realm of AVX2.
271
+ If we don't have AVX, let's perform them using SSE2 directives
272
+ */
273
+
274
+ /* store the integer part of y in mm0 */
275
+ imm2 = _mm256_cvttps_epi32(y);
276
+ /* j=(j+1) & (~1) (see the cephes sources) */
277
+ // another two AVX2 instruction
278
+ imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
279
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
280
+ y = _mm256_cvtepi32_ps(imm2);
281
+
282
+ /* get the swap sign flag */
283
+ imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
284
+ imm0 = _mm256_slli_epi32(imm0, 29);
285
+ /* get the polynom selection mask
286
+ there is one polynom for 0 <= x <= Pi/4
287
+ and another one for Pi/4<x<=Pi/2
288
+
289
+ Both branches will be computed.
290
+ */
291
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
292
+ imm2 = _mm256_cmpeq_epi32(imm2,*(v8si*)_pi32_256_0);
293
+
294
+ v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
295
+ v8sf poly_mask = _mm256_castsi256_ps(imm2);
296
+ sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
297
+
298
+ /* The magic pass: "Extended precision modular arithmetic"
299
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
300
+ xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
301
+ xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
302
+ xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
303
+ xmm1 = _mm256_mul_ps(y, xmm1);
304
+ xmm2 = _mm256_mul_ps(y, xmm2);
305
+ xmm3 = _mm256_mul_ps(y, xmm3);
306
+ x = _mm256_add_ps(x, xmm1);
307
+ x = _mm256_add_ps(x, xmm2);
308
+ x = _mm256_add_ps(x, xmm3);
309
+
310
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
311
+ y = *(v8sf*)_ps256_coscof_p0;
312
+ v8sf z = _mm256_mul_ps(x,x);
313
+
314
+ y = _mm256_mul_ps(y, z);
315
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
316
+ y = _mm256_mul_ps(y, z);
317
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
318
+ y = _mm256_mul_ps(y, z);
319
+ y = _mm256_mul_ps(y, z);
320
+ v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
321
+ y = _mm256_sub_ps(y, tmp);
322
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
323
+
324
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
325
+
326
+ v8sf y2 = *(v8sf*)_ps256_sincof_p0;
327
+ y2 = _mm256_mul_ps(y2, z);
328
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
329
+ y2 = _mm256_mul_ps(y2, z);
330
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
331
+ y2 = _mm256_mul_ps(y2, z);
332
+ y2 = _mm256_mul_ps(y2, x);
333
+ y2 = _mm256_add_ps(y2, x);
334
+
335
+ /* select the correct result from the two polynoms */
336
+ xmm3 = poly_mask;
337
+ y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
338
+ y = _mm256_andnot_ps(xmm3, y);
339
+ y = _mm256_add_ps(y,y2);
340
+ /* update the sign */
341
+ y = _mm256_xor_ps(y, sign_bit);
342
+
343
+ return y;
344
+ }
345
+
346
+ /* almost the same as sin_ps */
347
+ inline v8sf cos256_ps(v8sf x) { // any x
348
+ v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
349
+ v8si imm0, imm2;
350
+
351
+ /* take the absolute value */
352
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
353
+
354
+ /* scale by 4/Pi */
355
+ y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
356
+
357
+ /* store the integer part of y in mm0 */
358
+ imm2 = _mm256_cvttps_epi32(y);
359
+ /* j=(j+1) & (~1) (see the cephes sources) */
360
+ imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
361
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
362
+ y = _mm256_cvtepi32_ps(imm2);
363
+ imm2 = _mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
364
+
365
+ /* get the swap sign flag */
366
+ imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
367
+ imm0 = _mm256_slli_epi32(imm0, 29);
368
+ /* get the polynom selection mask */
369
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
370
+ imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
371
+
372
+ v8sf sign_bit = _mm256_castsi256_ps(imm0);
373
+ v8sf poly_mask = _mm256_castsi256_ps(imm2);
374
+
375
+ /* The magic pass: "Extended precision modular arithmetic"
376
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
377
+ xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
378
+ xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
379
+ xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
380
+ xmm1 = _mm256_mul_ps(y, xmm1);
381
+ xmm2 = _mm256_mul_ps(y, xmm2);
382
+ xmm3 = _mm256_mul_ps(y, xmm3);
383
+ x = _mm256_add_ps(x, xmm1);
384
+ x = _mm256_add_ps(x, xmm2);
385
+ x = _mm256_add_ps(x, xmm3);
386
+
387
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
388
+ y = *(v8sf*)_ps256_coscof_p0;
389
+ v8sf z = _mm256_mul_ps(x,x);
390
+
391
+ y = _mm256_mul_ps(y, z);
392
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
393
+ y = _mm256_mul_ps(y, z);
394
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
395
+ y = _mm256_mul_ps(y, z);
396
+ y = _mm256_mul_ps(y, z);
397
+ v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
398
+ y = _mm256_sub_ps(y, tmp);
399
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
400
+
401
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
402
+
403
+ v8sf y2 = *(v8sf*)_ps256_sincof_p0;
404
+ y2 = _mm256_mul_ps(y2, z);
405
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
406
+ y2 = _mm256_mul_ps(y2, z);
407
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
408
+ y2 = _mm256_mul_ps(y2, z);
409
+ y2 = _mm256_mul_ps(y2, x);
410
+ y2 = _mm256_add_ps(y2, x);
411
+
412
+ /* select the correct result from the two polynoms */
413
+ xmm3 = poly_mask;
414
+ y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
415
+ y = _mm256_andnot_ps(xmm3, y);
416
+ y = _mm256_add_ps(y,y2);
417
+ /* update the sign */
418
+ y = _mm256_xor_ps(y, sign_bit);
419
+
420
+ return y;
421
+ }
422
+
423
+ /* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
424
+ it is almost as fast, and gives you a free cosine with your sine */
425
+ inline void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
426
+
427
+ v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
428
+ v8si imm0, imm2, imm4;
429
+
430
+ sign_bit_sin = x;
431
+ /* take the absolute value */
432
+ x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
433
+ /* extract the sign bit (upper one) */
434
+ sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
435
+
436
+ /* scale by 4/Pi */
437
+ y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
438
+
439
+ /* store the integer part of y in imm2 */
440
+ imm2 = _mm256_cvttps_epi32(y);
441
+
442
+ /* j=(j+1) & (~1) (see the cephes sources) */
443
+ imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
444
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
445
+
446
+ y = _mm256_cvtepi32_ps(imm2);
447
+ imm4 = imm2;
448
+
449
+ /* get the swap sign flag for the sine */
450
+ imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
451
+ imm0 = _mm256_slli_epi32(imm0, 29);
452
+ //v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
453
+
454
+ /* get the polynom selection mask for the sine*/
455
+ imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
456
+ imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
457
+ //v8sf poly_mask = _mm256_castsi256_ps(imm2);
458
+
459
+ v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
460
+ v8sf poly_mask = _mm256_castsi256_ps(imm2);
461
+
462
+ /* The magic pass: "Extended precision modular arithmetic"
463
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
464
+ xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
465
+ xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
466
+ xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
467
+ xmm1 = _mm256_mul_ps(y, xmm1);
468
+ xmm2 = _mm256_mul_ps(y, xmm2);
469
+ xmm3 = _mm256_mul_ps(y, xmm3);
470
+ x = _mm256_add_ps(x, xmm1);
471
+ x = _mm256_add_ps(x, xmm2);
472
+ x = _mm256_add_ps(x, xmm3);
473
+
474
+ imm4 = _mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
475
+ imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
476
+ imm4 = _mm256_slli_epi32(imm4, 29);
477
+
478
+ v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
479
+
480
+ sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
481
+
482
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
483
+ v8sf z = _mm256_mul_ps(x,x);
484
+ y = *(v8sf*)_ps256_coscof_p0;
485
+
486
+ y = _mm256_mul_ps(y, z);
487
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
488
+ y = _mm256_mul_ps(y, z);
489
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
490
+ y = _mm256_mul_ps(y, z);
491
+ y = _mm256_mul_ps(y, z);
492
+ v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
493
+ y = _mm256_sub_ps(y, tmp);
494
+ y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
495
+
496
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
497
+
498
+ v8sf y2 = *(v8sf*)_ps256_sincof_p0;
499
+ y2 = _mm256_mul_ps(y2, z);
500
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
501
+ y2 = _mm256_mul_ps(y2, z);
502
+ y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
503
+ y2 = _mm256_mul_ps(y2, z);
504
+ y2 = _mm256_mul_ps(y2, x);
505
+ y2 = _mm256_add_ps(y2, x);
506
+
507
+ /* select the correct result from the two polynoms */
508
+ xmm3 = poly_mask;
509
+ v8sf ysin2 = _mm256_and_ps(xmm3, y2);
510
+ v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
511
+ y2 = _mm256_sub_ps(y2,ysin2);
512
+ y = _mm256_sub_ps(y, ysin1);
513
+
514
+ xmm1 = _mm256_add_ps(ysin1,ysin2);
515
+ xmm2 = _mm256_add_ps(y,y2);
516
+
517
+ /* update the sign */
518
+ *s = _mm256_xor_ps(xmm1, sign_bit_sin);
519
+ *c = _mm256_xor_ps(xmm2, sign_bit_cos);
520
+ }
521
+
522
+ #endif // CPU_CAPABILITY_AVX2
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/utils.h ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <ATen/cpu/vec/vec.h>
5
+ #include <c10/util/llvmMathExtras.h>
6
+
7
+ #ifdef USE_FBGEMM
8
+ #include <fbgemm/Fbgemm.h>
9
+ #endif
10
+
11
+ namespace at {
12
+ namespace native {
13
+
14
+ template <typename T>
15
+ inline void _store(T* dst, at::vec::Vectorized<T> src) {
16
+ src.store(dst);
17
+ }
18
+
19
+ inline void _store(at::BFloat16* dst, at::vec::Vectorized<float> src) {
20
+ auto res = at::vec::convert_float_bfloat16(src, src);
21
+ res.store(dst, at::vec::Vectorized<float>::size());
22
+ }
23
+
24
+ inline void _store(at::Half* dst, at::vec::Vectorized<float> src) {
25
+ auto res = at::vec::convert_float_half(src, src);
26
+ res.store(dst, at::vec::Vectorized<float>::size());
27
+ }
28
+
29
+ inline namespace CPU_CAPABILITY {
30
+
31
+ template <typename T>
32
+ inline T data_index_init(T offset) {
33
+ return offset;
34
+ }
35
+
36
+ template <typename T, typename... Args>
37
+ inline T data_index_init(T offset, T& x, const T& X, Args&&... args) {
38
+ offset = data_index_init(offset, std::forward<Args>(args)...);
39
+ x = offset % X;
40
+ return offset / X;
41
+ }
42
+
43
+ inline bool data_index_step() {
44
+ return true;
45
+ }
46
+
47
+ template <typename T, typename... Args>
48
+ inline bool data_index_step(T& x, const T& X, Args&&... args) {
49
+ if (data_index_step(std::forward<Args>(args)...)) {
50
+ x = ((x + 1) == X) ? 0 : (x + 1);
51
+ return x == 0;
52
+ }
53
+ return false;
54
+ }
55
+
56
+ // Helper struct for bfloat16 vectorization
57
+ // Useful when you need float as immediate dtype or accumulate dtype
58
+ using namespace vec;
59
+ struct Vec2 {
60
+ Vectorized<float> val0, val1;
61
+ Vec2(Vectorized<float> v0, Vectorized<float> v1) : val0(v0), val1(v1) {}
62
+ Vec2(float v) : val0(v), val1(v) {}
63
+ static Vec2 loadu(const BFloat16* ptr) {
64
+ auto [v0, v1] = convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr));
65
+ return {v0, v1};
66
+ }
67
+ static Vec2 loadu(const float* ptr) {
68
+ return {Vectorized<float>::loadu(ptr), Vectorized<float>::loadu(ptr + Vectorized<float>::size())};
69
+ }
70
+ void store(BFloat16* ptr) const {
71
+ Vectorized<BFloat16> val = convert_float_bfloat16(val0, val1);
72
+ val.store(ptr);
73
+ }
74
+ void store(float* ptr) const {
75
+ val0.store(ptr);
76
+ val1.store(ptr + Vectorized<float>::size());
77
+ }
78
+ };
79
+ inline Vec2 operator+(const Vec2& a, const Vec2& b) { return {a.val0 + b.val0, a.val1 + b.val1}; }
80
+ inline Vec2 operator*(const Vec2& a, const Vec2& b) { return {a.val0 * b.val0, a.val1 * b.val1}; }
81
+ inline Vec2 operator-(const Vec2& a, const Vec2& b) { return {a.val0 - b.val0, a.val1 - b.val1}; }
82
+ inline Vec2 operator/(const Vec2& a, const Vec2& b) { return {a.val0 / b.val0, a.val1 / b.val1}; }
83
+ inline Vec2 maximum(const Vec2& a, const Vec2& b) { return {vec::maximum(a.val0, b.val0), vec::maximum(a.val1, b.val1)}; }
84
+ inline Vec2 minimum(const Vec2& a, const Vec2& b) { return {vec::minimum(a.val0, b.val0), vec::minimum(a.val1, b.val1)}; }
85
+
86
+ template <typename scalar_t> struct VectorizedType { using type = Vectorized<scalar_t>; };
87
+ template <> struct VectorizedType<BFloat16> { using type = Vec2; };
88
+ template <typename scalar_t> using VecType = typename VectorizedType<scalar_t>::type;
89
+
90
+ // Helper for mixed data type parameter Vec::load
91
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const BFloat16* ptr) {
92
+ return convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr));
93
+ }
94
+
95
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const Half* ptr) {
96
+ return convert_half_float(Vectorized<Half>::loadu(ptr));
97
+ }
98
+
99
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const float* ptr) {
100
+ using Vec = Vectorized<float>;
101
+ return std::make_tuple(Vec::loadu(ptr), Vec::loadu(ptr + Vec::size()));
102
+ }
103
+
104
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const BFloat16* ptr, int64_t count) {
105
+ return convert_bfloat16_float(Vectorized<BFloat16>::loadu(ptr, count));
106
+ }
107
+
108
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const Half* ptr, int64_t count) {
109
+ return convert_half_float(Vectorized<Half>::loadu(ptr, count));
110
+ }
111
+
112
+ inline std::tuple<Vectorized<float>, Vectorized<float>> load2f(const float* ptr, int64_t count) {
113
+ using Vec = Vectorized<float>;
114
+ if (count > Vec::size()) {
115
+ return std::make_tuple(Vec::loadu(ptr), Vec::loadu(ptr + Vec::size(), count - Vec::size()));
116
+ } else {
117
+ return std::make_tuple(Vec::loadu(ptr, count), Vec(0));
118
+ }
119
+ }
120
+
121
+ } // namespace
122
+
123
+ namespace utils {
124
+
125
+ template <typename T>
126
+ T CeilLog2(const T& x) {
127
+ if (x <= 2) {
128
+ return 1;
129
+ }
130
+ // Last set bit is floor(log2(x)), floor + 1 is ceil
131
+ // except when x is an exact powers of 2, so subtract 1 first
132
+ return static_cast<T>(llvm::findLastSet(static_cast<uint64_t>(x) - 1)) + 1;
133
+ }
134
+
135
+ // matrix transpose:
136
+ // src has shape of M by N, with leading dimension of ld_src
137
+ // dst has shape of N by M, with leading dimension of ld_dst
138
+ template <typename T>
139
+ inline void transpose(int64_t M, int64_t N, const T* src, int64_t ld_src, T* dst, int64_t ld_dst) {
140
+ for (int64_t j = 0; j < N; j++) {
141
+ for (int64_t i = 0; i < M; i++) {
142
+ dst[j * ld_dst + i] = src[i * ld_src + j];
143
+ }
144
+ }
145
+ }
146
+
147
+ #ifdef USE_FBGEMM
148
+ template <>
149
+ inline void transpose<float>(int64_t M, int64_t N, const float* src, int64_t ld_src, float* dst, int64_t ld_dst) {
150
+ TORCH_CHECK(fbgemm::fbgemmSupportedCPU(), "Your CPU does not support FBGEMM.");
151
+ fbgemm::transpose_simd<float>(M, N, src, ld_src, dst, ld_dst);
152
+ }
153
+ #endif
154
+
155
+ template <typename index_t, typename F>
156
+ inline void parallel_sparse_csr(
157
+ const TensorAccessor<index_t, 1>& crow_acc,
158
+ const int64_t M,
159
+ const int64_t nnz,
160
+ const F& f) {
161
+ TORCH_CHECK(crow_acc.size(0) == M + 1);
162
+
163
+ // directly parallel on `M` may lead to load imbalance,
164
+ // statically determine thread partition here to average payload
165
+ // for each thread.
166
+ int num_threads = at::get_num_threads();
167
+ std::vector<int64_t> thread_splits(num_threads + 1, M);
168
+
169
+ int64_t thread_averge_payload = std::max((int64_t)1, divup(nnz, num_threads));
170
+
171
+ thread_splits[0] = 0;
172
+ int64_t sum = 0;
173
+ int64_t t = 1;
174
+ for (const auto m : c10::irange(M)) {
175
+ int64_t row_start = crow_acc[m];
176
+ int64_t row_end = crow_acc[m + 1];
177
+ sum += row_end - row_start;
178
+ if (sum > t * thread_averge_payload) {
179
+ thread_splits[t] = m;
180
+ t++;
181
+ }
182
+ }
183
+ // need to restore the last index,
184
+ // due to rounding error when calculating `thread_averge_payload`.
185
+ thread_splits[num_threads] = M;
186
+
187
+ at::parallel_for(0, num_threads, 1, [&](int64_t cbegin, int64_t cend) {
188
+ int tid = at::get_thread_num();
189
+ int64_t begin = thread_splits[tid];
190
+ int64_t end = thread_splits[tid + 1];
191
+ f(begin, end);
192
+ });
193
+ }
194
+
195
+ } // namespace utils
196
+
197
+ } // namespace native
198
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cpu/zmath.h ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Complex number math operations that act as no-ops for other dtypes.
4
+ #include <c10/util/complex.h>
5
+ #include <c10/util/MathConstants.h>
6
+ #include<ATen/NumericUtils.h>
7
+
8
+ namespace at { namespace native {
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
12
+ inline VALUE_TYPE zabs (SCALAR_TYPE z) {
13
+ return z;
14
+ }
15
+
16
+ template<>
17
+ inline c10::complex<float> zabs <c10::complex<float>> (c10::complex<float> z) {
18
+ return c10::complex<float>(std::abs(z));
19
+ }
20
+
21
+ template<>
22
+ inline float zabs <c10::complex<float>, float> (c10::complex<float> z) {
23
+ return std::abs(z);
24
+ }
25
+
26
+ template<>
27
+ inline c10::complex<double> zabs <c10::complex<double>> (c10::complex<double> z) {
28
+ return c10::complex<double>(std::abs(z));
29
+ }
30
+
31
+ template<>
32
+ inline double zabs <c10::complex<double>, double> (c10::complex<double> z) {
33
+ return std::abs(z);
34
+ }
35
+
36
+ // This overload corresponds to non-complex dtypes.
37
+ // The function is consistent with its NumPy equivalent
38
+ // for non-complex dtypes where `pi` is returned for
39
+ // negative real numbers and `0` is returned for 0 or positive
40
+ // real numbers.
41
+ // Note: `nan` is propagated.
42
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
43
+ inline VALUE_TYPE angle_impl (SCALAR_TYPE z) {
44
+ if (at::_isnan(z)) {
45
+ return z;
46
+ }
47
+ return z < 0 ? c10::pi<double> : 0;
48
+ }
49
+
50
+ template<>
51
+ inline c10::complex<float> angle_impl <c10::complex<float>> (c10::complex<float> z) {
52
+ return c10::complex<float>(std::arg(z), 0.0);
53
+ }
54
+
55
+ template<>
56
+ inline float angle_impl <c10::complex<float>, float> (c10::complex<float> z) {
57
+ return std::arg(z);
58
+ }
59
+
60
+ template<>
61
+ inline c10::complex<double> angle_impl <c10::complex<double>> (c10::complex<double> z) {
62
+ return c10::complex<double>(std::arg(z), 0.0);
63
+ }
64
+
65
+ template<>
66
+ inline double angle_impl <c10::complex<double>, double> (c10::complex<double> z) {
67
+ return std::arg(z);
68
+ }
69
+
70
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
71
+ constexpr VALUE_TYPE real_impl (SCALAR_TYPE z) {
72
+ return z; //No-Op
73
+ }
74
+
75
+ template<>
76
+ constexpr c10::complex<float> real_impl <c10::complex<float>> (c10::complex<float> z) {
77
+ return c10::complex<float>(z.real(), 0.0);
78
+ }
79
+
80
+ template<>
81
+ constexpr float real_impl <c10::complex<float>, float> (c10::complex<float> z) {
82
+ return z.real();
83
+ }
84
+
85
+ template<>
86
+ constexpr c10::complex<double> real_impl <c10::complex<double>> (c10::complex<double> z) {
87
+ return c10::complex<double>(z.real(), 0.0);
88
+ }
89
+
90
+ template<>
91
+ constexpr double real_impl <c10::complex<double>, double> (c10::complex<double> z) {
92
+ return z.real();
93
+ }
94
+
95
+ template <typename SCALAR_TYPE, typename VALUE_TYPE=SCALAR_TYPE>
96
+ constexpr VALUE_TYPE imag_impl (SCALAR_TYPE /*z*/) {
97
+ return 0;
98
+ }
99
+
100
+ template<>
101
+ constexpr c10::complex<float> imag_impl <c10::complex<float>> (c10::complex<float> z) {
102
+ return c10::complex<float>(z.imag(), 0.0);
103
+ }
104
+
105
+ template<>
106
+ constexpr float imag_impl <c10::complex<float>, float> (c10::complex<float> z) {
107
+ return z.imag();
108
+ }
109
+
110
+ template<>
111
+ constexpr c10::complex<double> imag_impl <c10::complex<double>> (c10::complex<double> z) {
112
+ return c10::complex<double>(z.imag(), 0.0);
113
+ }
114
+
115
+ template<>
116
+ constexpr double imag_impl <c10::complex<double>, double> (c10::complex<double> z) {
117
+ return z.imag();
118
+ }
119
+
120
+ template <typename TYPE>
121
+ inline TYPE conj_impl (TYPE z) {
122
+ return z; //No-Op
123
+ }
124
+
125
+ template<>
126
+ inline c10::complex<at::Half> conj_impl <c10::complex<at::Half>> (c10::complex<at::Half> z) {
127
+ return c10::complex<at::Half>{z.real(), -z.imag()};
128
+ }
129
+
130
+ template<>
131
+ inline c10::complex<float> conj_impl <c10::complex<float>> (c10::complex<float> z) {
132
+ return c10::complex<float>(z.real(), -z.imag());
133
+ }
134
+
135
+ template<>
136
+ inline c10::complex<double> conj_impl <c10::complex<double>> (c10::complex<double> z) {
137
+ return c10::complex<double>(z.real(), -z.imag());
138
+ }
139
+
140
+ template <typename TYPE>
141
+ inline TYPE ceil_impl (TYPE z) {
142
+ return std::ceil(z);
143
+ }
144
+
145
+ template <>
146
+ inline c10::complex<float> ceil_impl (c10::complex<float> z) {
147
+ return c10::complex<float>(std::ceil(z.real()), std::ceil(z.imag()));
148
+ }
149
+
150
+ template <>
151
+ inline c10::complex<double> ceil_impl (c10::complex<double> z) {
152
+ return c10::complex<double>(std::ceil(z.real()), std::ceil(z.imag()));
153
+ }
154
+
155
+ template<typename T>
156
+ inline c10::complex<T> sgn_impl (c10::complex<T> z) {
157
+ if (z == c10::complex<T>(0, 0)) {
158
+ return c10::complex<T>(0, 0);
159
+ } else {
160
+ return z / zabs(z);
161
+ }
162
+ }
163
+
164
+ template <typename TYPE>
165
+ inline TYPE floor_impl (TYPE z) {
166
+ return std::floor(z);
167
+ }
168
+
169
+ template <>
170
+ inline c10::complex<float> floor_impl (c10::complex<float> z) {
171
+ return c10::complex<float>(std::floor(z.real()), std::floor(z.imag()));
172
+ }
173
+
174
+ template <>
175
+ inline c10::complex<double> floor_impl (c10::complex<double> z) {
176
+ return c10::complex<double>(std::floor(z.real()), std::floor(z.imag()));
177
+ }
178
+
179
+ template <typename TYPE>
180
+ inline TYPE round_impl (TYPE z) {
181
+ return std::nearbyint(z);
182
+ }
183
+
184
+ template <>
185
+ inline c10::complex<float> round_impl (c10::complex<float> z) {
186
+ return c10::complex<float>(std::nearbyint(z.real()), std::nearbyint(z.imag()));
187
+ }
188
+
189
+ template <>
190
+ inline c10::complex<double> round_impl (c10::complex<double> z) {
191
+ return c10::complex<double>(std::nearbyint(z.real()), std::nearbyint(z.imag()));
192
+ }
193
+
194
+ template <typename TYPE>
195
+ inline TYPE trunc_impl (TYPE z) {
196
+ return std::trunc(z);
197
+ }
198
+
199
+ template <>
200
+ inline c10::complex<float> trunc_impl (c10::complex<float> z) {
201
+ return c10::complex<float>(std::trunc(z.real()), std::trunc(z.imag()));
202
+ }
203
+
204
+ template <>
205
+ inline c10::complex<double> trunc_impl (c10::complex<double> z) {
206
+ return c10::complex<double>(std::trunc(z.real()), std::trunc(z.imag()));
207
+ }
208
+
209
+ template <typename TYPE, std::enable_if_t<!c10::is_complex<TYPE>::value, int> = 0>
210
+ inline TYPE max_impl (TYPE a, TYPE b) {
211
+ if (_isnan<TYPE>(a) || _isnan<TYPE>(b)) {
212
+ return std::numeric_limits<TYPE>::quiet_NaN();
213
+ } else {
214
+ return std::max(a, b);
215
+ }
216
+ }
217
+
218
+ template <typename TYPE, std::enable_if_t<c10::is_complex<TYPE>::value, int> = 0>
219
+ inline TYPE max_impl (TYPE a, TYPE b) {
220
+ if (_isnan<TYPE>(a)) {
221
+ return a;
222
+ } else if (_isnan<TYPE>(b)) {
223
+ return b;
224
+ } else {
225
+ return std::abs(a) > std::abs(b) ? a : b;
226
+ }
227
+ }
228
+
229
+ template <typename TYPE, std::enable_if_t<!c10::is_complex<TYPE>::value, int> = 0>
230
+ inline TYPE min_impl (TYPE a, TYPE b) {
231
+ if (_isnan<TYPE>(a) || _isnan<TYPE>(b)) {
232
+ return std::numeric_limits<TYPE>::quiet_NaN();
233
+ } else {
234
+ return std::min(a, b);
235
+ }
236
+ }
237
+
238
+ template <typename TYPE, std::enable_if_t<c10::is_complex<TYPE>::value, int> = 0>
239
+ inline TYPE min_impl (TYPE a, TYPE b) {
240
+ if (_isnan<TYPE>(a)) {
241
+ return a;
242
+ } else if (_isnan<TYPE>(b)) {
243
+ return b;
244
+ } else {
245
+ return std::abs(a) < std::abs(b) ? a : b;
246
+ }
247
+ }
248
+
249
+ } // end namespace
250
+ }} //end at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Copy.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ struct TensorIteratorBase;
5
+
6
+ namespace native {
7
+
8
+ void direct_copy_kernel_cuda(TensorIteratorBase &iter);
9
+
10
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/DistributionTemplates.h ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/AccumulateType.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/Dispatch_v2.h>
6
+ #include <ATen/ExpandBase.h>
7
+ #include <ATen/OpMathType.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+ #include <ATen/native/cuda/Loops.cuh>
10
+ #include <c10/util/Half.h>
11
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
12
+ #include <ATen/cuda/CUDAContext.h>
13
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
14
+ #include <ATen/cuda/CUDAGraphsUtils.cuh>
15
+ #include <ATen/detail/FunctionTraits.h>
16
+ #include <ATen/core/DistributionsHelper.h>
17
+
18
+ #include <curand.h>
19
+ #include <curand_kernel.h>
20
+ #include <curand_philox4x32_x.h>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <utility>
24
+ #include <mutex>
25
+ #include <tuple>
26
+ #include <type_traits>
27
+
28
+ namespace at {
29
+ namespace native {
30
+ namespace {
31
+
32
+ // launch bounds used for kernels utilizing TensorIterator
33
+ const uint32_t block_size_bound = 256;
34
+ const uint32_t grid_size_bound = 4;
35
+ // number of randoms given by distributions like curand_uniform4, curand_uniform2_double
36
+ // used in calculating philox offset.
37
+ const uint32_t curand4_engine_calls = 4;
38
+
39
+ // utility function that calculates proper philox_offset
40
+ // for distributions utilizing TensorIterator. For distributions using
41
+ // TensorIterator, we are using a grid-stride loop with each
42
+ // thread yielding one element per thread. For the edge of the grid-stride
43
+ // loop, if the tensor size is large, the unroll loop will kick in and the float4
44
+ // from curand4 will start getting utilized (for common tensor sizes, we end up
45
+ // using rand.x from each thread). Hence, the philox_offset is
46
+ // (number of elements per thread * number of engine calls), which makes
47
+ // sure that philox offset increment is not less than the number of randoms used
48
+ // in each thread.
49
+ std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
50
+ const uint64_t numel = static_cast<uint64_t>(total_elements);
51
+ const uint32_t block_size = block_size_bound;
52
+ const uint32_t unroll = curand4_engine_calls;
53
+ dim3 dim_block(block_size);
54
+ dim3 grid((numel + block_size - 1) / block_size);
55
+ uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
56
+ grid.x = std::min(
57
+ static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
58
+ grid.x);
59
+ //number of times random will be generated per thread, to offset philox counter in thc random state
60
+ uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
61
+ * curand4_engine_calls;
62
+ return std::make_tuple(counter_offset, grid, dim_block);
63
+ }
64
+
65
+ // grid stride loop kernel for distributions
66
+ template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
67
+ C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
68
+ __global__ void distribution_elementwise_grid_stride_kernel(int numel,
69
+ PhiloxCudaState philox_args,
70
+ const dist_t dist_func,
71
+ const transform_t transform_func) {
72
+ auto seeds = at::cuda::philox::unpack(philox_args);
73
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
74
+ curandStatePhilox4_32_10_t state;
75
+ curand_init(std::get<0>(seeds),
76
+ idx,
77
+ std::get<1>(seeds),
78
+ &state);
79
+
80
+ int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
81
+ blockDim.x * gridDim.x * unroll_factor;
82
+ for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
83
+ auto rand = dist_func(&state);
84
+ #pragma unroll
85
+ for (int ii = 0; ii < unroll_factor; ii++) {
86
+ int li = linear_index + blockDim.x * gridDim.x * ii;
87
+ if (li < numel) {
88
+ transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
89
+ }
90
+ }
91
+ __syncthreads();
92
+ }
93
+ }
94
+
95
+ /**
96
+ * distribution_nullary_kernel is analogous to gpu_kernel in
97
+ * ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
98
+ * TensorIterator to launch a kernel. However, the differences are
99
+ * - it launches a grid-stride loop based kernel. The kernel is not
100
+ * generic like elementwise_kernel in Loops.cuh and is specialized
101
+ * for the distribution kernels here.
102
+ * - For big size tensors, we can launch multiple kernels recursively
103
+ * (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
104
+ * offset calculation is done in this function.
105
+ *
106
+ * FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
107
+ * to have grid-stride loop kernel and then use that to launch our distribution
108
+ * kernels? Note that we need a grid-stride loop kernel because, we found by testing
109
+ * that it achieves peak effective bandwidth.
110
+ */
111
+ template<typename scalar_t,
112
+ typename accscalar_t,
113
+ int unroll_factor,
114
+ typename RNG,
115
+ typename dist_t,
116
+ typename transform_t>
117
+ void distribution_nullary_kernel(at::TensorIteratorBase& iter,
118
+ RNG gen,
119
+ const dist_t& dist_func,
120
+ const transform_t transform_func) {
121
+ static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
122
+ int64_t numel = iter.numel();
123
+ if (numel == 0) {
124
+ return;
125
+ }
126
+
127
+ auto execution_policy = calc_execution_policy(numel);
128
+ auto counter_offset = std::get<0>(execution_policy);
129
+ auto grid = std::get<1>(execution_policy);
130
+ auto block = std::get<2>(execution_policy);
131
+ PhiloxCudaState rng_engine_inputs;
132
+ {
133
+ // See Note [Acquire lock when using random generators]
134
+ std::lock_guard<std::mutex> lock(gen->mutex_);
135
+ rng_engine_inputs = gen->philox_cuda_state(counter_offset);
136
+ }
137
+
138
+ if (!iter.can_use_32bit_indexing()) {
139
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
140
+ distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
141
+ gen, dist_func, transform_func);
142
+ }
143
+ return;
144
+ }
145
+
146
+ char* out_data = (char*)iter.data_ptr(0);
147
+
148
+ auto stream = at::cuda::getCurrentCUDAStream();
149
+ if (iter.is_trivial_1d()) {
150
+ auto strides = iter.get_inner_strides();
151
+ int stride0 = strides[0];
152
+ distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
153
+ numel,
154
+ rng_engine_inputs,
155
+ dist_func,
156
+ [=]__device__(int idx, accscalar_t rand) {
157
+ scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
158
+ *out = transform_func(rand);
159
+ }
160
+ );
161
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
162
+ } else {
163
+ auto offset_calc = make_offset_calculator<1>(iter);
164
+ distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
165
+ numel,
166
+ rng_engine_inputs,
167
+ dist_func,
168
+ [=]__device__(int idx, accscalar_t rand) {
169
+ auto offsets = offset_calc.get(idx);
170
+ scalar_t* out = (scalar_t*)&out_data[offsets[0]];
171
+ *out = transform_func(rand);
172
+ }
173
+ );
174
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
175
+ }
176
+ }
177
+
178
+ // Binary kernel
179
+ template <typename func_t, typename inp_offset_calc_t, typename out_offset_calc_t>
180
+ __global__ void distribution_binary_elementwise_kernel(
181
+ int numel,
182
+ func_t f,
183
+ PhiloxCudaState philox_args,
184
+ typename function_traits<func_t>::result_type *output_data,
185
+ const typename function_traits<func_t>::template arg<1>::type *input_data_1,
186
+ const typename function_traits<func_t>::template arg<2>::type *input_data_2,
187
+ inp_offset_calc_t inp_calc,
188
+ out_offset_calc_t out_calc) {
189
+ auto seeds = at::cuda::philox::unpack(philox_args);
190
+
191
+ using input_t_1 = typename function_traits<func_t>::template arg<1>::type;
192
+ using input_t_2 = typename function_traits<func_t>::template arg<2>::type;
193
+
194
+ input_t_1 inputs_1[thread_work_size()];
195
+ input_t_2 inputs_2[thread_work_size()];
196
+
197
+ int base_index = block_work_size() * blockIdx.x;
198
+ int remaining = std::min<int>(numel - base_index, block_work_size());
199
+
200
+ curandStatePhilox4_32_10_t state;
201
+ curand_init(std::get<0>(seeds),
202
+ blockIdx.x * blockDim.x + threadIdx.x,
203
+ std::get<1>(seeds),
204
+ &state);
205
+
206
+ // load data into registers
207
+ int thread_idx = threadIdx.x;
208
+ #pragma unroll
209
+ for (int i = 0; i < thread_work_size(); i++) {
210
+ if (thread_idx >= remaining) {
211
+ break;
212
+ }
213
+ int input_idx = thread_idx + base_index;
214
+ auto offsets = inp_calc.get(input_idx);
215
+ inputs_1[i] = input_data_1[offsets[0]];
216
+ inputs_2[i] = input_data_2[offsets[1]];
217
+
218
+ thread_idx += num_threads();
219
+ }
220
+
221
+ // compute and store
222
+ thread_idx = threadIdx.x;
223
+ #pragma unroll
224
+ for (int i = 0; i < thread_work_size(); i++) {
225
+ if (thread_idx >= remaining) {
226
+ break;
227
+ }
228
+ int input_idx = thread_idx + base_index;
229
+ auto offsets = out_calc.get(input_idx);
230
+ output_data[offsets[0]] = f(state, inputs_1[i], inputs_2[i]);
231
+ thread_idx += num_threads();
232
+ }
233
+ }
234
+
235
+ template <typename func_t>
236
+ void distribution_binary_kernel(TensorIteratorBase &iter, PhiloxCudaState philox_args, const func_t &f) {
237
+ static_assert(std::is_same<typename function_traits<func_t>::template arg<0>::type, curandStatePhilox4_32_10_t&>::value, "the first argument of functor must be curandStatePhilox4_32_10_t");
238
+ using input_t_1 = typename function_traits<func_t>::template arg<1>::type;
239
+ using input_t_2 = typename function_traits<func_t>::template arg<2>::type;
240
+ using output_t = typename function_traits<func_t>::result_type;
241
+
242
+ if (!iter.can_use_32bit_indexing()) {
243
+ for (auto& sub_iter : iter.with_32bit_indexing()) {
244
+ distribution_binary_kernel(sub_iter, philox_args, f);
245
+ }
246
+ return;
247
+ }
248
+
249
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(iter.can_use_32bit_indexing());
250
+
251
+ int64_t numel = iter.numel();
252
+ if (numel == 0) {
253
+ return;
254
+ }
255
+
256
+ output_t *output_data = static_cast<output_t *>(iter.data_ptr(0));
257
+ const input_t_1 *input_data_1 = static_cast<const input_t_1 *>(iter.data_ptr(1));
258
+ const input_t_2 *input_data_2 = static_cast<const input_t_2 *>(iter.data_ptr(2));
259
+
260
+ int64_t grid = (numel + block_work_size() - 1) / block_work_size();
261
+ auto stream = at::cuda::getCurrentCUDAStream();
262
+
263
+ if (iter.is_contiguous()) {
264
+ distribution_binary_elementwise_kernel<<<grid,num_threads(), 0, stream>>>(
265
+ numel, f, philox_args, output_data, input_data_1, input_data_2,
266
+ TrivialOffsetCalculator<2>(), TrivialOffsetCalculator<1>());
267
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
268
+ } else {
269
+ distribution_binary_elementwise_kernel<<<grid, num_threads(), 0, stream>>>(
270
+ numel, f, philox_args, output_data, input_data_1, input_data_2,
271
+ make_input_offset_calculator<2>(iter), make_output_offset_calculator(iter));
272
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
273
+ }
274
+ }
275
+
276
+ } // namespace
277
+ }} // namespace at::native
278
+
279
+
280
+ namespace at {
281
+ namespace native {
282
+ namespace templates {
283
+ namespace cuda {
284
+
285
+ // ==================================================== Random ========================================================
286
+
287
+ template<typename RNG>
288
+ void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG gen) {
289
+ AT_DISPATCH_V2(iter.dtype(), "random_from_to_kernel_cuda", AT_WRAP([&] {
290
+ if ((
291
+ std::is_same<scalar_t, int64_t>::value ||
292
+ std::is_same<scalar_t, double>::value ||
293
+ std::is_same<scalar_t, float>::value ||
294
+ std::is_same<scalar_t, at::BFloat16>::value) && range >= 1ULL << 32)
295
+ {
296
+ // define lambda to mod with range and add base
297
+ auto random_func = [range, base] __device__ (uint64_t rand) {
298
+ return transformation::uniform_int_from_to<scalar_t>(rand, range, base);
299
+ };
300
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
301
+ gen,
302
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
303
+ ulonglong2 ret;
304
+ uint4 rand_val = curand4(state);
305
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
306
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
307
+ return ret;
308
+ },
309
+ random_func);
310
+ } else {
311
+ auto random_func = [range, base] __device__ (uint32_t rand) {
312
+ return transformation::uniform_int_from_to<scalar_t>(rand, range, base);
313
+ };
314
+ distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
315
+ gen,
316
+ [] __device__ (curandStatePhilox4_32_10_t* state) {
317
+ return curand4(state);
318
+ },
319
+ random_func);
320
+ }
321
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, kBFloat16, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
322
+ }
323
+
324
+ // This is the special kernel to handle single specific case:
325
+ // from(inclusive) = std::numeric_limits<int64_t>::lowest()
326
+ // to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
327
+ template<typename RNG>
328
+ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) {
329
+ AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cuda", [&] {
330
+ if (std::is_same<scalar_t, int64_t>::value ||
331
+ std::is_same<scalar_t, double>::value ||
332
+ std::is_same<scalar_t, float>::value ||
333
+ std::is_same<scalar_t, at::BFloat16>::value) {
334
+ auto random_func = [] __device__ (uint64_t rand) {
335
+ return transformation::uniform_int_full_range<scalar_t>(rand);
336
+ };
337
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
338
+ gen,
339
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
340
+ ulonglong2 ret;
341
+ uint4 rand_val = curand4(state);
342
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
343
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
344
+ return ret;
345
+ },
346
+ random_func);
347
+ } else {
348
+ TORCH_CHECK(false, "random_full_64_bits_range_kernel_cuda handles only int64, double, float and bfloat16");
349
+ }
350
+ });
351
+ }
352
+
353
+ template<typename RNG>
354
+ struct RandomFromToKernel {
355
+ void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen) {
356
+ random_from_to_kernel(iter, range, base, check_generator<RNG>(gen));
357
+ }
358
+ void operator()(TensorIteratorBase& iter, c10::optional<Generator> gen) {
359
+ random_full_64_bits_range_kernel(iter, check_generator<RNG>(gen));
360
+ }
361
+ };
362
+
363
+ template<typename RNG>
364
+ void random_kernel(TensorIteratorBase& iter, RNG gen) {
365
+ AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cuda", [&] {
366
+ if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
367
+ auto random_func = [] __device__ (uint64_t rand) {
368
+ return transformation::uniform_int<scalar_t>(rand);
369
+ };
370
+ distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter, gen,
371
+ [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
372
+ ulonglong2 ret;
373
+ uint4 rand_val = curand4(state);
374
+ ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
375
+ ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
376
+ return ret;
377
+ },
378
+ random_func);
379
+ } else {
380
+ auto random_func = [] __device__ (uint32_t rand) {
381
+ return transformation::uniform_int<scalar_t>(rand);
382
+ };
383
+ distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
384
+ gen,
385
+ [] __device__ (curandStatePhilox4_32_10_t* state) {
386
+ return curand4(state);
387
+ },
388
+ random_func);
389
+ }
390
+ });
391
+ }
392
+
393
+ template<typename RNG>
394
+ struct RandomKernel {
395
+ void operator()(TensorIteratorBase& iter, RNG gen) {
396
+ random_kernel(iter, gen);
397
+ }
398
+ };
399
+
400
+ // ====================================================================================================================
401
+
402
+ template<typename scalar_t, typename accscalar_t, size_t curand4_engine_calls, typename RNG, typename transform_t>
403
+ void uniform_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) {
404
+ if (std::is_same<scalar_t, double>::value) {
405
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
406
+ gen,
407
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
408
+ transform);
409
+ } else {
410
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
411
+ gen,
412
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
413
+ transform);
414
+ }
415
+ }
416
+
417
+ template<typename scalar_t, typename accscalar_t, size_t curand4_engine_calls, typename RNG, typename transform_t>
418
+ void normal_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) {
419
+ if (std::is_same<scalar_t, double>::value) {
420
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
421
+ gen,
422
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
423
+ transform);
424
+ } else {
425
+ distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
426
+ gen,
427
+ [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
428
+ transform);
429
+ }
430
+ }
431
+
432
+ // ==================================================== Normal ========================================================
433
+
434
+ template<typename RNG>
435
+ void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) {
436
+ auto iter = TensorIterator::borrowing_nullary_op(self);
437
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_kernel_cuda", [&] {
438
+ using accscalar_t = at::acc_type<scalar_t, true>;
439
+ auto mean = static_cast<accscalar_t>(mean_);
440
+ auto std = static_cast<accscalar_t>(std_);
441
+ // define lambda to multiply std and add mean
442
+ auto normal_func = [mean, std] __device__ (accscalar_t rand) {
443
+ return static_cast<scalar_t>(transformation::normal<accscalar_t>(rand, mean, std));
444
+ };
445
+ normal_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, normal_func);
446
+ });
447
+ }
448
+
449
+ template<typename RNG>
450
+ struct NormalKernel {
451
+ void operator()(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
452
+ normal_kernel(self, mean, std, check_generator<RNG>(gen));
453
+ }
454
+ };
455
+
456
+ // ==================================================== Uniform ========================================================
457
+
458
+ template<typename RNG>
459
+ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) {
460
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_kernel_cuda", [&] {
461
+ auto from = static_cast<scalar_t>(from_);
462
+ auto to = static_cast<scalar_t>(to_);
463
+ using opmath_t = at::opmath_type<scalar_t>;
464
+ auto range = static_cast<opmath_t>(to-from);
465
+ // define lambda to reverse bounds, multiply 'range' and add 'from_'
466
+ auto uniform_func = [range, from, to] __device__ (opmath_t rand) {
467
+ // Compute output value before reversing the bounds
468
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/96947
469
+ auto value = static_cast<scalar_t>(rand * range + from);
470
+ // reverse the bounds of curand4 from (0, 1] to [0, 1)
471
+ // Note that this method is from legacy THCTensorRandom and is likely to give
472
+ // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
473
+ // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
474
+ // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706
475
+ auto reverse_bound_value = value == to ? from : value;
476
+ return reverse_bound_value;
477
+ };
478
+ uniform_and_transform<scalar_t, opmath_t, curand4_engine_calls>(iter, gen, uniform_func);
479
+ });
480
+ }
481
+
482
+ template<typename RNG>
483
+ struct UniformKernel {
484
+ void operator()(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
485
+ uniform_kernel(iter, from, to, check_generator<RNG>(gen));
486
+ }
487
+ };
488
+
489
+ // ================================================== LogNormal =======================================================
490
+
491
+ template<typename RNG>
492
+ void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG gen) {
493
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cuda", [&] {
494
+ using accscalar_t = at::acc_type<scalar_t, true>;
495
+ auto mean = static_cast<accscalar_t>(mean_);
496
+ auto std = static_cast<accscalar_t>(std_);
497
+ // define lambda for log_normal transformation
498
+ auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
499
+ return static_cast<scalar_t>(transformation::log_normal<accscalar_t>(transformation::normal<accscalar_t>(rand, mean, std)));
500
+ };
501
+ normal_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, log_normal_func);
502
+ });
503
+ }
504
+
505
+ template<typename RNG>
506
+ struct LogNormalKernel {
507
+ void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
508
+ log_normal_kernel(iter, mean, std, check_generator<RNG>(gen));
509
+ }
510
+ };
511
+
512
+ // =================================================== Geometric ======================================================
513
+
514
+ template<typename RNG>
515
+ void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) {
516
+ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cuda", [&] {
517
+ using accscalar_t = at::DiscreteDistributionType<scalar_t>::type;
518
+ // define lambda for geometric transformation
519
+ auto geometric_func = [p] __device__ (accscalar_t rand) {
520
+ return static_cast<scalar_t>(transformation::geometric<accscalar_t>(rand, p));
521
+ };
522
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, geometric_func);
523
+ });
524
+ }
525
+
526
+ template<typename RNG>
527
+ struct GeometricKernel {
528
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
529
+ geometric_kernel(iter, p, check_generator<RNG>(gen));
530
+ }
531
+ };
532
+
533
+ // ================================================== Exponential =====================================================
534
+
535
+ template<typename RNG>
536
+ void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) {
537
+ TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype());
538
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cuda", [&] {
539
+ using accscalar_t = at::acc_type<scalar_t, true>;
540
+ auto lambda = static_cast<accscalar_t>(lambda_);
541
+ // define lambda for exponential transformation
542
+ auto exponential_func = [lambda] __device__ (accscalar_t rand) {
543
+ return static_cast<scalar_t>(transformation::exponential<accscalar_t>(rand, lambda));
544
+ };
545
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, exponential_func);
546
+ });
547
+ }
548
+
549
+ template<typename RNG>
550
+ struct ExponentialKernel {
551
+ void operator()(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
552
+ exponential_kernel(iter, lambda, check_generator<RNG>(gen));
553
+ }
554
+ };
555
+
556
+ // ==================================================== Cauchy ========================================================
557
+
558
+ template<typename RNG>
559
+ void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG gen) {
560
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_cuda", [&] {
561
+ using accscalar_t = at::acc_type<scalar_t, true>;
562
+ auto median = static_cast<accscalar_t>(median_);
563
+ auto sigma = static_cast<accscalar_t>(sigma_);
564
+ // define lambda for cauchy transformation
565
+ auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
566
+ return static_cast<scalar_t>(transformation::cauchy<accscalar_t>(rand, median, sigma));
567
+ };
568
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, cauchy_func);
569
+ });
570
+ }
571
+
572
+ template<typename RNG>
573
+ struct CauchyKernel {
574
+ void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
575
+ cauchy_kernel(iter, median, sigma, check_generator<RNG>(gen));
576
+ }
577
+ };
578
+
579
+ // ==================================================== Bernoulli =====================================================
580
+
581
+ template<typename scalar_t, typename prob_t>
582
+ void bernoulli_tensor_cuda_kernel(
583
+ const TensorBase &ret, const at::TensorBase &p,
584
+ PhiloxCudaState philox_args) {
585
+ auto functor = [philox_args] __device__(
586
+ int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
587
+ const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
588
+ auto seeds = at::cuda::philox::unpack(philox_args);
589
+ curandStatePhilox4_32_10_t state;
590
+ curand_init(std::get<0>(seeds),
591
+ blockIdx.x * blockDim.x + threadIdx.x,
592
+ std::get<1>(seeds),
593
+ &state);
594
+
595
+ // See Note [Register spilling in curand call for CUDA < 10]
596
+ float4 rand = curand_uniform4(&state);
597
+ switch (n) {
598
+ case 4: {
599
+ CUDA_KERNEL_ASSERT(0 <= p4 && p4 <= 1);
600
+ v4 = static_cast<scalar_t>(rand.w <= p4);
601
+ // fallthrough
602
+ }
603
+ case 3: {
604
+ CUDA_KERNEL_ASSERT(0 <= p3 && p3 <= 1);
605
+ v3 = static_cast<scalar_t>(rand.z <= p3);
606
+ // fallthrough
607
+ }
608
+ case 2: {
609
+ CUDA_KERNEL_ASSERT(0 <= p2 && p2 <= 1);
610
+ v2 = static_cast<scalar_t>(rand.y <= p2);
611
+ // fallthrough
612
+ }
613
+ case 1: {
614
+ CUDA_KERNEL_ASSERT(0 <= p1 && p1 <= 1);
615
+ v1 = static_cast<scalar_t>(rand.x <= p1);
616
+ }
617
+ }
618
+ };
619
+ // The template argument `4` below indicates that we want to operate on four
620
+ // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
621
+ at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4, decltype(functor),
622
+ /*max_threads_per_block=*/512,
623
+ /*min_blocks_per_sm==*/2>(ret, p, functor);
624
+ }
625
+
626
+ template<typename RNG>
627
+ void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG gen) {
628
+ PhiloxCudaState rng_engine_inputs;
629
+ {
630
+ // See Note [Acquire lock when using random generators]
631
+ std::lock_guard<std::mutex> lock(gen->mutex_);
632
+ rng_engine_inputs = gen->philox_cuda_state(10);
633
+ }
634
+ TORCH_CHECK(at::isFloatingType(p_.scalar_type()), "expected probabilities tensor to have floating type, got ", p_.scalar_type());
635
+ // cast probabilities tensor to double for double `self` tensor, and to `float` for everything else
636
+ const auto p_type = self.dtype() == at::kDouble ? at::kDouble : at::kFloat;
637
+ auto p_cuda = p_.to(TensorOptions().device(self.device()).dtype(p_type));
638
+ auto p = expand_inplace(self, p_cuda);
639
+ AT_DISPATCH_ALL_TYPES_AND3(
640
+ at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
641
+ if (std::is_same<scalar_t, double>::value) {
642
+ return bernoulli_tensor_cuda_kernel<double, double>(self, *p, rng_engine_inputs);
643
+ } else {
644
+ return bernoulli_tensor_cuda_kernel<scalar_t, float>(self, *p, rng_engine_inputs);
645
+ }
646
+ });
647
+ }
648
+
649
+ template<typename RNG>
650
+ void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) {
651
+ AT_DISPATCH_ALL_TYPES_AND3(
652
+ at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
653
+ using accscalar_t = at::DiscreteDistributionType<scalar_t>::type;
654
+ // define lambda for bernoulli transformation
655
+ auto bernoulli_func = [p] __device__ (accscalar_t rand) {
656
+ return static_cast<scalar_t>(transformation::bernoulli<accscalar_t>(rand, p));
657
+ };
658
+ uniform_and_transform<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, bernoulli_func);
659
+ });
660
+ }
661
+
662
+ template<typename RNG>
663
+ struct BernoulliKernel {
664
+ void operator()(TensorIteratorBase& iter, double p, c10::optional<Generator> gen) {
665
+ bernoulli_kernel(iter, p, check_generator<RNG>(gen));
666
+ }
667
+ void operator()(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen) {
668
+ bernoulli_kernel(self, p_, check_generator<RNG>(gen));
669
+ }
670
+ };
671
+
672
+ }}}}
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/cuda/Atomic.cuh>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/TensorUtils.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ Tensor embedding_backward_cuda_kernel(
11
+ const Tensor &grad,
12
+ const Tensor &orig_indices,
13
+ const Tensor &sorted_indices,
14
+ const Tensor &count,
15
+ int64_t num_weights,
16
+ int padding_idx = -1,
17
+ bool mode_mean = false,
18
+ const Tensor &offset2bag = Tensor(),
19
+ const Tensor &bag_size = Tensor(),
20
+ const Tensor &per_sample_weights = Tensor());
21
+
22
+ }}
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/LaunchUtils.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include<algorithm>
3
+
4
+ namespace at {
5
+ namespace native {
6
+
7
+ // returns 2**floor(log2(n))
8
+ static int lastPow2(unsigned int n) {
9
+ n |= (n >> 1);
10
+ n |= (n >> 2);
11
+ n |= (n >> 4);
12
+ n |= (n >> 8);
13
+ n |= (n >> 16);
14
+ return std::max<int>(1, n - (n >> 1));
15
+ }
16
+
17
+ } // namespace native
18
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Math.cuh ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MiscUtils.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/Exceptions.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/CUDAConfig.h>
5
+ #include <ATen/cuda/PinnedMemoryAllocator.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ static inline int cuda_int_cast(int64_t value, const char* varname) {
11
+ auto result = static_cast<int>(value);
12
+ TORCH_CHECK(static_cast<int64_t>(result) == value,
13
+ "cuda_int_cast: The value of ", varname, "(", (long long)value,
14
+ ") is too large to fit into a int (", sizeof(int), " bytes)");
15
+ return result;
16
+ }
17
+
18
+ // Creates an array of size elements of type T, backed by pinned memory
19
+ // wrapped in a Storage
20
+ template<class T>
21
+ static inline Storage pin_memory(int64_t size) {
22
+ auto* allocator = cuda::getPinnedMemoryAllocator();
23
+ int64_t adjusted_size = size * sizeof(T);
24
+ return Storage(
25
+ Storage::use_byte_size_t(),
26
+ adjusted_size,
27
+ allocator,
28
+ /*resizable=*/false);
29
+ }
30
+
31
+ } // namespace native
32
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MultiTensorApply.cuh ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <c10/cuda/CUDAGuard.h>
5
+ #include <ATen/native/cuda/Loops.cuh>
6
+ #include <ATen/native/cuda/MemoryAccess.cuh>
7
+ #include <vector>
8
+
9
+ namespace at::native {
10
+
11
+ namespace {
12
+
13
+ static constexpr int64_t kILP = 4;
14
+ static constexpr int64_t kChunkSize = 65536;
15
+ static constexpr int64_t kBlockSize = 512;
16
+
17
+ // TODO(crcrpar): Add `n>5` for `low prec params & their higher prec copy`
18
+ // TensorListMetadata has to be < 4KB - the limit for kernel launch argument
19
+ static constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
20
+ static constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
21
+ static constexpr int depth_to_max_tensors_scalarlist[5] = {96, 64, 48, 36, 30};
22
+ static constexpr int depth_to_max_tensors_scalarlist_of_complex_double[2] = {
23
+ 72,
24
+ 60};
25
+
26
+ template <typename T>
27
+ __device__ __forceinline__ bool is_aligned(T* p) {
28
+ return ((uint64_t)p) % (kILP * sizeof(T)) == 0;
29
+ }
30
+
31
+ template <typename T>
32
+ __device__ __forceinline__ void load_store(
33
+ T* dst,
34
+ T* src,
35
+ int64_t dst_offset,
36
+ int64_t src_offset) {
37
+ using LT = at::native::memory::aligned_vector<T, kILP>;
38
+ ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
39
+ }
40
+
41
+ template <int n>
42
+ struct TensorListMetadata {
43
+ const void* addresses[n][depth_to_max_tensors[n - 1]];
44
+ int64_t numel_for_tensor[depth_to_max_tensors[n - 1]];
45
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
46
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
47
+ int start_tensor_this_launch;
48
+ };
49
+
50
+ template <typename scalar_vals_t, int n>
51
+ struct TensorListScalarListMetadata {
52
+ const void* addresses[n][depth_to_max_tensors_scalarlist[n - 1]];
53
+ int64_t numel_for_tensor[depth_to_max_tensors_scalarlist[n - 1]];
54
+ scalar_vals_t scalar_vals[depth_to_max_tensors_scalarlist[n - 1]];
55
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
56
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
57
+ };
58
+
59
+ // note(mkozuki): `n` of 1&2 violate the limit of cuda kernel argument size of
60
+ // 4kb with `c10::complex<double>`
61
+ template <>
62
+ struct TensorListScalarListMetadata<c10::complex<double>, 1> {
63
+ const void* addresses[1]
64
+ [depth_to_max_tensors_scalarlist_of_complex_double[0]];
65
+ int64_t
66
+ numel_for_tensor[depth_to_max_tensors_scalarlist_of_complex_double[0]];
67
+ c10::complex<double>
68
+ scalar_vals[depth_to_max_tensors_scalarlist_of_complex_double[0]];
69
+ unsigned char block_to_tensor[depth_to_max_blocks[1 - 1]];
70
+ int block_to_chunk[depth_to_max_blocks[1 - 1]];
71
+ };
72
+
73
+ template <>
74
+ struct TensorListScalarListMetadata<c10::complex<double>, 2> {
75
+ const void* addresses[2]
76
+ [depth_to_max_tensors_scalarlist_of_complex_double[1]];
77
+ int64_t
78
+ numel_for_tensor[depth_to_max_tensors_scalarlist_of_complex_double[1]];
79
+ c10::complex<double>
80
+ scalar_vals[depth_to_max_tensors_scalarlist_of_complex_double[1]];
81
+ unsigned char block_to_tensor[depth_to_max_blocks[2 - 1]];
82
+ int block_to_chunk[depth_to_max_blocks[2 - 1]];
83
+ };
84
+
85
+ // NOTE(crcrpar): This is a conservative resolution to handle `state_steps`
86
+ // whose each element is `at::Tensor` of 1 element representing the number of
87
+ // `step`s called so far.
88
+ template <int n>
89
+ struct FusedOptimizerTensorListMetadata {
90
+ const void* addresses[n][depth_to_max_tensors[n - 1]];
91
+ int64_t numel_for_tensor[depth_to_max_tensors[n - 1]];
92
+ const void* state_steps_addresses[depth_to_max_tensors_scalarlist[n - 1]];
93
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
94
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
95
+ int start_tensor_this_launch;
96
+ };
97
+
98
+ template <typename T, typename U, typename... ArgTypes>
99
+ C10_LAUNCH_BOUNDS_1(kBlockSize)
100
+ __global__ void multi_tensor_apply_kernel(
101
+ T tensorListMeta,
102
+ U callable,
103
+ ArgTypes... args) {
104
+ // Hand the chunk information to the user-supplied functor to process however
105
+ // it likes.
106
+ callable(kChunkSize, tensorListMeta, args...);
107
+ }
108
+
109
+ } // namespace
110
+
111
+ // multi_tensor_apply enables horizontal fusion across lists of tensors.
112
+ // For example, whereas you once had a for-loop of a + b = c, where a, b,
113
+ // and c are individual tensors in lists as, bs, and cs, you can now with
114
+ // fewer kernel launches compute as + bs = cs.
115
+ //
116
+ // You can also imagine bs to be a scalar list vs a tensor list.
117
+ //
118
+ // The function below takes in tensor lists, scalars, and a callable and
119
+ // chunks up the computation to launch as few kernels as possible by iterating
120
+ // through every "chunk" in every tensor (thus the nested for loops). In the
121
+ // simplest case, everything gets bundled into just one kernel launch, but
122
+ // due to blocksize constraints, we may need to launch multiple kernels.
123
+ // Each kernel launch is defined by one tensorListMeta construct, which we
124
+ // use to track and reset the necessary metadata for each launch.
125
+ template <int depth, typename scalar_T, typename T, typename... ArgTypes>
126
+ void multi_tensor_apply(
127
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
128
+ at::ArrayRef<Scalar> scalars,
129
+ T callable,
130
+ ArgTypes... args) {
131
+ TORCH_CHECK(
132
+ tensor_lists.size() == depth,
133
+ "Number of tensor lists has to match the depth.");
134
+ const size_t n_tensors = tensor_lists[0].size();
135
+ using scalar_vals_t = typename T::opmath_t;
136
+ TensorListScalarListMetadata<scalar_vals_t, depth> tensorListMeta;
137
+
138
+ int loc_block_info = 0;
139
+ int loc_tensor_info = 0;
140
+ for (size_t t = 0; t < n_tensors; t++) {
141
+ // short-circuit to avoid adding empty tensors to tensorListMeta
142
+ if (tensor_lists[0][t].numel() == 0) {
143
+ continue;
144
+ }
145
+ tensorListMeta.scalar_vals[loc_tensor_info] = scalars[t].to<scalar_T>();
146
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
147
+ tensor_lists[0][t].numel();
148
+ for (int d = 0; d < depth; d++) {
149
+ tensorListMeta.addresses[d][loc_tensor_info] =
150
+ tensor_lists[d][t].const_data_ptr();
151
+ }
152
+ loc_tensor_info++;
153
+
154
+ // now we enter [chunking territory].
155
+ // we will launch a kernel when EITHER the blocks get filled up OR
156
+ // the tensors get filled up. There will always be at least one block
157
+ // per tensor since the zero-sized ones will not enter the loop, so
158
+ // the nested forloop within represents iterating through the chunks
159
+ // of a single tensor.
160
+ const auto numel = tensor_lists[0][t].numel();
161
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
162
+ for (auto chunk = 0; chunk < chunks; chunk++) {
163
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
164
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
165
+ loc_block_info++;
166
+
167
+ // a tensor is not considered full unless all its chunks have been
168
+ // processed
169
+ const bool tensors_full =
170
+ (loc_tensor_info == depth_to_max_tensors_scalarlist[depth - 1] &&
171
+ chunk == chunks - 1);
172
+ const bool blocks_full =
173
+ (loc_block_info == depth_to_max_blocks[depth - 1]);
174
+
175
+ if (tensors_full || blocks_full) {
176
+ multi_tensor_apply_kernel<<<
177
+ loc_block_info,
178
+ kBlockSize,
179
+ 0,
180
+ at::cuda::getCurrentCUDAStream()>>>(
181
+ tensorListMeta, callable, args...);
182
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
183
+
184
+ // Reset.
185
+ loc_block_info = 0;
186
+ // all chunks have already been handled in the kernel
187
+ if (chunk == chunks - 1) {
188
+ loc_tensor_info = 0;
189
+ } else { // blocks were full and tensor chunks remain
190
+ tensorListMeta.numel_for_tensor[0] =
191
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
192
+ tensorListMeta.scalar_vals[0] =
193
+ tensorListMeta.scalar_vals[loc_tensor_info - 1];
194
+ for (int d = 0; d < depth; d++) {
195
+ tensorListMeta.addresses[d][0] =
196
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
197
+ }
198
+ loc_tensor_info = 1;
199
+ }
200
+ }
201
+ }
202
+ }
203
+
204
+ // note: [finishing what we started]
205
+ // if there's remaining work to be done but the tensors/blocks aren't full
206
+ // yet we are at the end, submit the kernel to do the work!
207
+ if (loc_block_info != 0) {
208
+ multi_tensor_apply_kernel<<<
209
+ loc_block_info,
210
+ kBlockSize,
211
+ 0,
212
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
213
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
214
+ }
215
+ }
216
+
217
+ template <int depth, typename T, typename... ArgTypes>
218
+ void multi_tensor_apply(
219
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
220
+ T callable,
221
+ ArgTypes... args) {
222
+ TORCH_CHECK(
223
+ tensor_lists.size() == depth,
224
+ "Number of tensor lists has to match the depth.");
225
+ const size_t n_tensors = tensor_lists[0].size();
226
+ TensorListMetadata<depth> tensorListMeta;
227
+ tensorListMeta.start_tensor_this_launch = 0;
228
+
229
+ int loc_block_info = 0;
230
+ int loc_tensor_info = 0;
231
+ for (size_t t = 0; t < n_tensors; t++) {
232
+ // short-circuit to avoid adding empty tensors to tensorListMeta
233
+ if (tensor_lists[0][t].numel() == 0) {
234
+ continue;
235
+ }
236
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
237
+ tensor_lists[0][t].numel();
238
+ for (int d = 0; d < depth; d++) {
239
+ tensorListMeta.addresses[d][loc_tensor_info] =
240
+ tensor_lists[d][t].const_data_ptr();
241
+ }
242
+ loc_tensor_info++;
243
+
244
+ // see note: [chunking territory].
245
+ const auto numel = tensor_lists[0][t].numel();
246
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
247
+ for (auto chunk = 0; chunk < chunks; chunk++) {
248
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
249
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
250
+ loc_block_info++;
251
+
252
+ const bool tensors_full =
253
+ (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
254
+ chunk == chunks - 1);
255
+ const bool blocks_full =
256
+ (loc_block_info == depth_to_max_blocks[depth - 1]);
257
+
258
+ if (tensors_full || blocks_full) {
259
+ multi_tensor_apply_kernel<<<
260
+ loc_block_info,
261
+ kBlockSize,
262
+ 0,
263
+ at::cuda::getCurrentCUDAStream()>>>(
264
+ tensorListMeta, callable, args...);
265
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
266
+
267
+ // Reset.
268
+ loc_block_info = 0;
269
+ if (chunk == chunks - 1) {
270
+ loc_tensor_info = 0;
271
+ tensorListMeta.start_tensor_this_launch = t + 1;
272
+ } else {
273
+ tensorListMeta.numel_for_tensor[0] =
274
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
275
+ for (int d = 0; d < depth; d++) {
276
+ tensorListMeta.addresses[d][0] =
277
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
278
+ }
279
+ loc_tensor_info = 1;
280
+ tensorListMeta.start_tensor_this_launch = t;
281
+ }
282
+ }
283
+ }
284
+ }
285
+
286
+ // see note: [finishing what we started]
287
+ if (loc_block_info != 0) {
288
+ multi_tensor_apply_kernel<<<
289
+ loc_block_info,
290
+ kBlockSize,
291
+ 0,
292
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
293
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
294
+ }
295
+ }
296
+
297
+ template <int depth, typename T, typename... ArgTypes>
298
+ void multi_tensor_apply_for_fused_optimizer(
299
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
300
+ at::TensorList state_steps,
301
+ T callable,
302
+ ArgTypes... args) {
303
+ TORCH_CHECK(
304
+ tensor_lists.size() == depth,
305
+ "Number of tensor lists has to match the depth");
306
+ const auto num_tensors = tensor_lists[0].size();
307
+ FusedOptimizerTensorListMetadata<depth> tensorListMeta;
308
+
309
+ int loc_block_info = 0;
310
+ int loc_tensor_info = 0;
311
+ for (const auto& tensor_index : c10::irange(num_tensors)) {
312
+ // short-circuit to avoid adding empty tensors to tensorListMeta
313
+ if (tensor_lists[0][tensor_index].numel() == 0) {
314
+ continue;
315
+ }
316
+ tensorListMeta.state_steps_addresses[loc_tensor_info] =
317
+ state_steps[tensor_index].const_data_ptr();
318
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
319
+ tensor_lists[0][tensor_index].numel();
320
+ for (const auto& d : c10::irange(depth)) {
321
+ tensorListMeta.addresses[d][loc_tensor_info] =
322
+ tensor_lists[d][tensor_index].const_data_ptr();
323
+ }
324
+ loc_tensor_info++;
325
+
326
+ // see above note: [chunking territory]
327
+ const auto numel = tensor_lists[0][tensor_index].numel();
328
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
329
+ TORCH_CHECK(chunks > -1);
330
+ for (const auto& chunk : c10::irange(chunks)) {
331
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
332
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
333
+ loc_block_info++;
334
+
335
+ const auto tensor_full =
336
+ (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
337
+ chunk == chunks - 1);
338
+ const auto blocks_full = loc_block_info == depth_to_max_blocks[depth - 1];
339
+
340
+ if (tensor_full || blocks_full) {
341
+ multi_tensor_apply_kernel<<<
342
+ loc_block_info,
343
+ kBlockSize,
344
+ 0,
345
+ at::cuda::getCurrentCUDAStream()>>>(
346
+ tensorListMeta, callable, args...);
347
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
348
+
349
+ // Reset.
350
+ loc_block_info = 0;
351
+ if (chunk == chunks - 1) {
352
+ loc_tensor_info = 0;
353
+ } else {
354
+ tensorListMeta.numel_for_tensor[0] =
355
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
356
+ tensorListMeta.state_steps_addresses[0] =
357
+ tensorListMeta.state_steps_addresses[loc_tensor_info - 1];
358
+ for (const auto& d : c10::irange(depth)) {
359
+ tensorListMeta.addresses[d][0] =
360
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
361
+ }
362
+ loc_tensor_info = 1;
363
+ }
364
+ }
365
+ }
366
+ }
367
+
368
+ // see above note: [finishing what we've started]
369
+ if (loc_block_info != 0) {
370
+ multi_tensor_apply_kernel<<<
371
+ loc_block_info,
372
+ kBlockSize,
373
+ 0,
374
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
375
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
376
+ }
377
+ }
378
+
379
+ } // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/PersistentSoftmax.cuh ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cfloat>
4
+ #include <limits>
5
+ #include <stdint.h>
6
+ #include <cuda_fp16.h>
7
+ #include <c10/macros/Macros.h>
8
+
9
+ #include <ATen/cuda/DeviceUtils.cuh>
10
+
11
+ namespace {
12
+
13
+ int log2_ceil(int value) {
14
+ int log2_value = 0;
15
+ while ((1 << log2_value) < value) ++log2_value;
16
+ return log2_value;
17
+ }
18
+
19
+ template<typename T>
20
+ struct Add {
21
+ __device__ __forceinline__ T operator()(T a, T b) const {
22
+ return a + b;
23
+ }
24
+ };
25
+
26
+ template<typename T>
27
+ struct Max {
28
+ __device__ __forceinline__ T operator()(T a, T b) const {
29
+ return a < b ? b : a;
30
+ }
31
+ };
32
+
33
+ template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
34
+ __device__ __forceinline__ void warp_reduce(acc_t* sum) {
35
+ ReduceOp<acc_t> r;
36
+ #pragma unroll
37
+ for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
38
+ #pragma unroll
39
+ for (int i = 0; i < WARP_BATCH; ++i) {
40
+ acc_t b = WARP_SHFL_XOR(sum[i], offset, WARP_SIZE);
41
+ sum[i] = r(sum[i], b);
42
+ }
43
+ }
44
+ }
45
+
46
+ // The softmax_warp_* methods perform softmax forward and backward propagation on samples spanning the fast dimension.
47
+ // Each sample contains element_count scalar elements. element_count can be any integer value <= 1024.
48
+ // The template arguments have the following meaning:
49
+ // One "WARP" works on one "BATCH". One "BATCH" contains "WARP_BATCH" samples.
50
+ // WARP_BATCH is equal to 1 when element_count is large, and > 1 when element_count is small.
51
+ // A "WARP" contains "C10_WARPS_SIZE" threads, these treads are guaranteed to belong to the same warp.
52
+ // This is important because it means only __shfl_ instructions are required for reductions.
53
+ // Note that this means WARP_SIZE must be a power of two and <= architecture warp size.
54
+ // CUDA warp size is 32 for all existing GPU architectures, but there is no guarantee this will not change for future arch.
55
+ // ROCm warp size is 64 for all currently ROCm-supported GPU architectures, but this may change for future archs.
56
+ // is_log_softmax is a flag indicating whether SoftMax or LogSoftMax should be computed.
57
+ // is_masked is a flag indicating whether SoftMax or MaskedSoftMax should be computed.
58
+ // The template can be instantiated with any floating point type for the type arguments input_t, output_t and acc_t.
59
+ // This allows SoftMax to be fused with a cast immediately following the SoftMax.
60
+ // The mask should have the same shape as input, with a boolean indicate if the value is masked.
61
+ // The head_chunk_size is only used for transformer mask softmax, equals to H * D * D.
62
+ // For instance:
63
+ // input_t=half, acc_t=float, output_t=half => read half tensor, float accumulators, write half tensor.
64
+ // input_t=half, acc_t=float, output_t=float => read half tensor, float accumulators, write float tensor.
65
+ // input_t_float, acc_t=float, output_t=half => read float tensor, float accumulators, write half tensor.
66
+
67
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax, bool is_masked>
68
+ __global__ void softmax_warp_forward(output_t *dst, const input_t *src, int batch_size, int stride, int element_count, const bool *mask = nullptr, const int head_chunk_size = -1, bool is_transformer_mask = false)
69
+ {
70
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_forward_kernel.
71
+ constexpr int next_power_of_two = 1 << log2_elements;
72
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
73
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
74
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
75
+
76
+ int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
77
+
78
+ // batch_size might not be a multiple of WARP_BATCH. Check how
79
+ // many batches have to computed within this WARP.
80
+ int local_batches = batch_size - first_batch;
81
+ if (local_batches > WARP_BATCH)
82
+ local_batches = WARP_BATCH;
83
+
84
+ // there might be multiple batches per warp. compute the index within the batch
85
+ int local_idx = threadIdx.x;
86
+ int idx_offset = first_batch * stride + local_idx;
87
+
88
+ src += idx_offset;
89
+ dst += idx_offset;
90
+
91
+ if (is_transformer_mask) {
92
+ mask += ((first_batch * stride) / head_chunk_size) * stride + local_idx;
93
+ } else {
94
+ mask += idx_offset;
95
+ }
96
+ // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
97
+ // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
98
+ // the nested loops.
99
+ // This should have no impact on performance because the loops are unrolled anyway.
100
+
101
+ // load data from global memory
102
+ acc_t elements[WARP_BATCH][WARP_ITERATIONS];
103
+ for (int i = 0; i < WARP_BATCH; ++i) {
104
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
105
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
106
+ int element_index = local_idx + it * WARP_SIZE;
107
+ if (element_index < batch_element_count) {
108
+ elements[i][it] = src[i*element_count+it*WARP_SIZE];
109
+ } else {
110
+ elements[i][it] = -std::numeric_limits<acc_t>::infinity();
111
+ }
112
+ }
113
+ }
114
+
115
+ // compute max_value
116
+ acc_t max_value[WARP_BATCH];
117
+ #pragma unroll
118
+ for (int i = 0; i < WARP_BATCH; ++i) {
119
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
120
+ bool is_meaningful_max = false;
121
+ max_value[i] = elements[i][0];
122
+ #pragma unroll
123
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
124
+ if (is_masked) {
125
+ int idx = it*WARP_SIZE;
126
+ if ((idx + local_idx) < batch_element_count) {
127
+ if (!is_transformer_mask) {
128
+ idx += i*element_count;
129
+ }
130
+ if (!mask[idx]) {
131
+ max_value[i] = (is_meaningful_max && max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
132
+ is_meaningful_max = true;
133
+ }
134
+ }
135
+ } else {
136
+ max_value[i] = max_value[i] > elements[i][it] ? max_value[i] : elements[i][it];
137
+ }
138
+ }
139
+ if (is_masked) {
140
+ if (!is_meaningful_max) {
141
+ max_value[i] = -std::numeric_limits<acc_t>::infinity();
142
+ }
143
+ }
144
+ }
145
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
146
+
147
+ acc_t sum[WARP_BATCH] { 0.0f };
148
+ #pragma unroll
149
+ for (int i = 0; i < WARP_BATCH; ++i) {
150
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
151
+ #pragma unroll
152
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
153
+ if (!is_masked) {
154
+ if (is_log_softmax) {
155
+ sum[i] += std::exp(elements[i][it] - max_value[i]);
156
+ } else {
157
+ elements[i][it] = std::exp(elements[i][it] - max_value[i]);
158
+ sum[i] += elements[i][it];
159
+ }
160
+ } else {
161
+ int idx = it*WARP_SIZE;
162
+ bool valid = (idx + local_idx) < batch_element_count;
163
+ if (!is_transformer_mask) {
164
+ idx += i*element_count;
165
+ }
166
+ if (valid) {
167
+ if (!mask[idx]) {
168
+ if (is_log_softmax) {
169
+ sum[i] += std::exp(elements[i][it] - max_value[i]);
170
+ } else {
171
+ elements[i][it] = std::exp(elements[i][it] - max_value[i]);
172
+ sum[i] += elements[i][it];
173
+ }
174
+ } else {
175
+ if (!is_log_softmax) {
176
+ // Masked values are treated as -infinity, and std::exp(-infinity) is 0.
177
+ elements[i][it] = 0;
178
+ }
179
+ }
180
+ } else {
181
+ if (!is_log_softmax) {
182
+ elements[i][it] = 0.;
183
+ }
184
+ }
185
+ }
186
+ }
187
+ }
188
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
189
+
190
+ // store result
191
+ #pragma unroll
192
+ for (int i = 0; i < WARP_BATCH; ++i) {
193
+ if (i >= local_batches)
194
+ break;
195
+ if (is_log_softmax) sum[i] = std::log(sum[i]);
196
+ #pragma unroll
197
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
198
+ int element_index = local_idx + it * WARP_SIZE;
199
+ if (element_index < element_count) {
200
+ if (is_log_softmax) {
201
+ dst[i*element_count+it*WARP_SIZE] = elements[i][it] - max_value[i] - sum[i];
202
+ } else if (sum[i] == 0) {
203
+ dst[i*element_count+it*WARP_SIZE] = std::numeric_limits<acc_t>::quiet_NaN();
204
+ } else {
205
+ dst[i*element_count+it*WARP_SIZE] = elements[i][it] / sum[i];
206
+ }
207
+ } else {
208
+ break;
209
+ }
210
+ }
211
+ }
212
+ }
213
+
214
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax, bool is_masked>
215
+ __global__ void softmax_warp_backward(output_t *gradInput, const input_t *grad, const input_t *output, int batch_size, int stride, int element_count, const bool *mask = nullptr)
216
+ {
217
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_backward_kernel.
218
+ constexpr int next_power_of_two = 1 << log2_elements;
219
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
220
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
221
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
222
+
223
+ int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
224
+
225
+ // batch_size might not be a multiple of WARP_BATCH. Check how
226
+ // many batches have to computed within this WARP.
227
+ int local_batches = batch_size - first_batch;
228
+ if (local_batches > WARP_BATCH)
229
+ local_batches = WARP_BATCH;
230
+
231
+ // there might be multiple batches per warp. compute the index within the batch
232
+ int local_idx = threadIdx.x % WARP_SIZE;
233
+
234
+ // the first element to process by the current thread
235
+ int thread_offset = first_batch * stride + local_idx;
236
+ grad += thread_offset;
237
+ output += thread_offset;
238
+ gradInput += thread_offset;
239
+ if (is_masked) {
240
+ mask += thread_offset;
241
+ }
242
+
243
+ // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
244
+ // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
245
+ // the nested loops.
246
+ // This should have no impact on performance because the loops are unrolled anyway.
247
+
248
+ // load data from global memory
249
+ acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS];
250
+ acc_t output_reg[WARP_BATCH][WARP_ITERATIONS];
251
+ for (int i = 0; i < WARP_BATCH; ++i) {
252
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
253
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
254
+ int element_index = local_idx + it * WARP_SIZE;
255
+ if (element_index < batch_element_count) {
256
+ grad_reg[i][it] = grad[i*element_count+it*WARP_SIZE];
257
+ output_reg[i][it] = output[i*element_count+it*WARP_SIZE];
258
+ } else {
259
+ grad_reg[i][it] = acc_t(0);
260
+ output_reg[i][it] = acc_t(0);
261
+ }
262
+ }
263
+ }
264
+
265
+ acc_t sum[WARP_BATCH] { 0.0f };
266
+ #pragma unroll
267
+ for (int i = 0; i < WARP_BATCH; ++i) {
268
+ #pragma unroll
269
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
270
+ if (!is_masked || !mask[i*element_count+it*WARP_SIZE]) {
271
+ sum[i] += grad_reg[i][it];
272
+ }
273
+ }
274
+ }
275
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
276
+
277
+ // store result
278
+ #pragma unroll
279
+ for (int i = 0; i < WARP_BATCH; ++i) {
280
+ if (i >= local_batches)
281
+ break;
282
+ #pragma unroll
283
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
284
+ int element_index = local_idx + it * WARP_SIZE;
285
+ if (element_index < element_count) {
286
+ if (is_masked && mask[i*element_count+it*WARP_SIZE]) {
287
+ gradInput[i*element_count+it*WARP_SIZE] = 0;
288
+ }
289
+ // compute gradients
290
+ else if (is_log_softmax) {
291
+ gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - std::exp(output_reg[i][it]) * sum[i]);
292
+ } else {
293
+ gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - output_reg[i][it] * sum[i]);
294
+ }
295
+ }
296
+ }
297
+ }
298
+ }
299
+
300
+ } // end of anonymous namespace
301
+
302
+ template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax, bool is_masked>
303
+ void dispatch_softmax_forward(output_t *dst, const input_t *src, int softmax_elements, int softmax_elements_stride, int batch_count, const bool *mask = nullptr, int chunk_size = -1, bool is_transformer_mask = false)
304
+ {
305
+ TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
306
+ if (softmax_elements == 0) {
307
+ return;
308
+ } else {
309
+ int log2_elements = log2_ceil(softmax_elements);
310
+ const int next_power_of_two = 1 << log2_elements;
311
+
312
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
313
+ int warp_size = at::cuda::warp_size();
314
+ warp_size = (next_power_of_two < warp_size) ? next_power_of_two : warp_size;
315
+
316
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
317
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
318
+
319
+ // use 128 threads per block to maximize gpu utilization
320
+ constexpr int threads_per_block = 128;
321
+
322
+ int warps_per_block = (threads_per_block / warp_size);
323
+ int batches_per_block = warps_per_block * batches_per_warp;
324
+ int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
325
+ dim3 threads(warp_size, warps_per_block, 1);
326
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
327
+ switch (log2_elements) {
328
+ #define LAUNCH_SOFTMAX_WARP_FORWARD(L2E) case L2E: \
329
+ softmax_warp_forward<input_t, output_t, acc_t, L2E, is_log_softmax, is_masked> \
330
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, \
331
+ src, batch_count, softmax_elements_stride, softmax_elements, mask, chunk_size, is_transformer_mask); \
332
+ C10_CUDA_KERNEL_LAUNCH_CHECK(); \
333
+ break;
334
+
335
+ LAUNCH_SOFTMAX_WARP_FORWARD(0); // 1
336
+ LAUNCH_SOFTMAX_WARP_FORWARD(1); // 2
337
+ LAUNCH_SOFTMAX_WARP_FORWARD(2); // 4
338
+ LAUNCH_SOFTMAX_WARP_FORWARD(3); // 8
339
+ LAUNCH_SOFTMAX_WARP_FORWARD(4); // 16
340
+ LAUNCH_SOFTMAX_WARP_FORWARD(5); // 32
341
+ LAUNCH_SOFTMAX_WARP_FORWARD(6); // 64
342
+ LAUNCH_SOFTMAX_WARP_FORWARD(7); // 128
343
+ LAUNCH_SOFTMAX_WARP_FORWARD(8); // 256
344
+ LAUNCH_SOFTMAX_WARP_FORWARD(9); // 512
345
+ LAUNCH_SOFTMAX_WARP_FORWARD(10); ; // 1024
346
+ default:
347
+ break;
348
+ }
349
+ }
350
+ }
351
+
352
+ template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax, bool is_masked>
353
+ void dispatch_softmax_backward(output_t *grad_input, const input_t *grad, const input_t *output, int softmax_elements, int softmax_elements_stride, int batch_count, const bool *mask = nullptr)
354
+ {
355
+ TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
356
+ if (softmax_elements == 0) {
357
+ return;
358
+ } else {
359
+ int log2_elements = log2_ceil(softmax_elements);
360
+ const int next_power_of_two = 1 << log2_elements;
361
+
362
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
363
+ int warp_size = at::cuda::warp_size();
364
+ warp_size = (next_power_of_two < warp_size) ? next_power_of_two : warp_size;
365
+
366
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
367
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
368
+
369
+ // use 128 threads per block to maximize gpu utilization
370
+ constexpr int threads_per_block = 128;
371
+
372
+ int warps_per_block = (threads_per_block / warp_size);
373
+ int batches_per_block = warps_per_block * batches_per_warp;
374
+ int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
375
+ dim3 threads(warp_size, warps_per_block, 1);
376
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
377
+ switch (log2_elements) {
378
+ #define LAUNCH_SOFTMAX_WARP_BACKWARD(L2E) case L2E: \
379
+ softmax_warp_backward<input_t, output_t, acc_t, L2E, is_log_softmax, is_masked> \
380
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> \
381
+ (grad_input, grad, output, batch_count, softmax_elements_stride, \
382
+ softmax_elements, mask); \
383
+ C10_CUDA_KERNEL_LAUNCH_CHECK(); \
384
+ break;
385
+
386
+ LAUNCH_SOFTMAX_WARP_BACKWARD(0); // 1
387
+ LAUNCH_SOFTMAX_WARP_BACKWARD(1); // 2
388
+ LAUNCH_SOFTMAX_WARP_BACKWARD(2); // 4
389
+ LAUNCH_SOFTMAX_WARP_BACKWARD(3); // 8
390
+ LAUNCH_SOFTMAX_WARP_BACKWARD(4); // 16
391
+ LAUNCH_SOFTMAX_WARP_BACKWARD(5); // 32
392
+ LAUNCH_SOFTMAX_WARP_BACKWARD(6); // 64
393
+ LAUNCH_SOFTMAX_WARP_BACKWARD(7); // 128
394
+ LAUNCH_SOFTMAX_WARP_BACKWARD(8); // 256
395
+ LAUNCH_SOFTMAX_WARP_BACKWARD(9); // 512
396
+ LAUNCH_SOFTMAX_WARP_BACKWARD(10); // 1024
397
+ default:
398
+ break;
399
+ }
400
+ }
401
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/Sorting.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at {
9
+ namespace native {
10
+
11
+ void launch_kthvalue_kernel(
12
+ const TensorBase &values, const TensorBase &indices,
13
+ const TensorBase &self, int64_t dim, int64_t k);
14
+ void launch_median_kernel(
15
+ const TensorBase &vals, const TensorBase &inds,
16
+ const TensorBase &in, int64_t dim, bool ignore_nan);
17
+
18
+ }} // namespace at::native
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/SortingCommon.cuh ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+ #include <ATen/ceil_div.h>
4
+ #include <ATen/NumericUtils.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <stdlib.h>
7
+ #include <ATen/cuda/detail/IndexUtils.cuh>
8
+ #include <ATen/cuda/detail/TensorInfo.cuh>
9
+
10
+ namespace at {
11
+ namespace native {
12
+
13
+ // Is this questionable namespace pollution?
14
+ #if defined(USE_ROCM)
15
+ constexpr int MAX_BLOCK_SIZE = 256;
16
+
17
+ #else
18
+ constexpr int MAX_BLOCK_SIZE = 1024;
19
+ #endif
20
+
21
+ // Maximum size per grid dimension that we assume (compute capability >= 2.0)
22
+ constexpr int64_t MAX_GRID_SIZE = 65535LL;
23
+
24
+ static bool getGridFromTiles(int64_t gridTiles, dim3& grid) {
25
+ if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) {
26
+ return false;
27
+ }
28
+
29
+ int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
30
+ int64_t gridY = 1;
31
+ int64_t gridZ = 1;
32
+
33
+ if (gridTiles > MAX_GRID_SIZE) {
34
+ gridTiles = ceil_div(gridTiles, MAX_GRID_SIZE);
35
+ gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
36
+
37
+ if (gridTiles > MAX_GRID_SIZE) {
38
+ gridTiles = ceil_div(gridTiles, MAX_GRID_SIZE);
39
+ gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
40
+ }
41
+ }
42
+
43
+ grid = dim3(gridX, gridY, gridZ);
44
+ return true;
45
+ }
46
+
47
+ template <typename scalar_t, bool handleNaN = false>
48
+ struct GTOp {
49
+ __device__ bool operator()(const scalar_t& lhs, const scalar_t& rhs) const {
50
+ return (handleNaN && at::_isnan(lhs) && !at::_isnan(rhs)) || (lhs > rhs);
51
+ }
52
+ };
53
+
54
+ template <typename scalar_t, bool handleNaN = false>
55
+ struct LTOp {
56
+ __device__ bool operator()(const scalar_t& lhs, const scalar_t& rhs) const {
57
+ return (handleNaN && at::_isnan(rhs) && !at::_isnan(lhs)) || (lhs < rhs);
58
+ }
59
+ };
60
+
61
+ template <typename index_t>
62
+ __device__ __forceinline__ index_t getLinearBlockId() {
63
+ return blockIdx.z * gridDim.y * gridDim.x + blockIdx.y * gridDim.x +
64
+ blockIdx.x;
65
+ }
66
+
67
+ // For slice sorting in Thrust; extracts a slice index from a linear
68
+ // index and uses that for comparison
69
+ struct SliceComp {
70
+ SliceComp(int64_t size) : sliceSize(size) {}
71
+
72
+ __device__ bool operator()(const int64_t& a, const int64_t& b) const {
73
+ // Since the slices are guaranteed to be innermost,
74
+ // the segment is just via int64_t division
75
+ int64_t segA = a / sliceSize;
76
+ int64_t segB = b / sliceSize;
77
+ return segA < segB;
78
+ }
79
+
80
+ const int64_t sliceSize;
81
+ };
82
+
83
+ // For sorting in Thurst; extracts a within-slice index from a linear index
84
+ struct GlobalIndexToPerSliceIndex {
85
+ GlobalIndexToPerSliceIndex(int64_t size) : sliceSize(size) {}
86
+
87
+ __device__ inline void operator()(int64_t& v) const {
88
+ v = v % sliceSize;
89
+ }
90
+
91
+ const int64_t sliceSize;
92
+ };
93
+
94
+ // Returns 2^(ceil(lg(n)) from Stanford bit twiddling hacks
95
+ static uint64_t nextHighestPowerOf2(uint64_t n) {
96
+ n--;
97
+ n |= n >> 1;
98
+ n |= n >> 2;
99
+ n |= n >> 4;
100
+ n |= n >> 8;
101
+ n |= n >> 16;
102
+ #ifndef _MSC_VER
103
+ n |= n >> 32;
104
+ #endif
105
+ n++;
106
+
107
+ return n;
108
+ }
109
+
110
+
111
+ // WARNING: This function assumes input tensors are contiguous
112
+ template <typename scalar_t, typename index_t, typename Launcher>
113
+ void run_launcher(
114
+ const TensorBase &values,
115
+ const TensorBase &indices,
116
+ const TensorBase &self,
117
+ int64_t dim,
118
+ Launcher l) {
119
+ auto self_info = cuda::detail::getTensorInfo<const scalar_t, index_t>(self);
120
+ auto values_info = cuda::detail::getTensorInfo<scalar_t, index_t>(values);
121
+ auto indices_info = cuda::detail::getTensorInfo<int64_t, index_t>(indices);
122
+
123
+ int64_t slice_size = self.size(dim);
124
+ /* We use these structures solely to find the offset to */
125
+ /* each slice we are operating on */
126
+ self_info.reduceDim(dim);
127
+ values_info.reduceDim(dim);
128
+ indices_info.reduceDim(dim);
129
+
130
+ /* Collapse all other dims */
131
+ int collapse_self_dim = self_info.collapseDims(dim);
132
+ int collapse_values_dim = values_info.collapseDims(dim);
133
+ int collapse_indices_dim = indices_info.collapseDims(dim);
134
+
135
+ int64_t num_slices = 1;
136
+ for (int i = 0; i < self_info.dims; ++i) {
137
+ num_slices *= self_info.sizes[i];
138
+ }
139
+
140
+ /* This is used as a template parameter to calculate indices. */
141
+ /* We only specialize it if all collapsed dim sizes are the */
142
+ /* same; otherwise, we use -1 which is the specialization */
143
+ /* parameter for arbitrary dimensions */
144
+ int all_dims = self_info.dims;
145
+ if (values_info.dims != all_dims || indices_info.dims != all_dims) {
146
+ all_dims = -1;
147
+ }
148
+
149
+ if (all_dims == 1) {
150
+ l.template launch<scalar_t, index_t, 1>(
151
+ values_info,
152
+ collapse_values_dim,
153
+ indices_info,
154
+ collapse_indices_dim,
155
+ self_info,
156
+ collapse_self_dim,
157
+ num_slices,
158
+ slice_size);
159
+ } else if (all_dims == 2) {
160
+ l.template launch<scalar_t, index_t, 2>(
161
+ values_info,
162
+ collapse_values_dim,
163
+ indices_info,
164
+ collapse_indices_dim,
165
+ self_info,
166
+ collapse_self_dim,
167
+ num_slices,
168
+ slice_size);
169
+ } else if (all_dims == 3) {
170
+ l.template launch<scalar_t, index_t, 3>(
171
+ values_info,
172
+ collapse_values_dim,
173
+ indices_info,
174
+ collapse_indices_dim,
175
+ self_info,
176
+ collapse_self_dim,
177
+ num_slices,
178
+ slice_size);
179
+ } else {
180
+ l.template launch<scalar_t, index_t, -1>(
181
+ values_info,
182
+ collapse_values_dim,
183
+ indices_info,
184
+ collapse_indices_dim,
185
+ self_info,
186
+ collapse_self_dim,
187
+ num_slices,
188
+ slice_size);
189
+ }
190
+ }
191
+
192
+ } // namespace native
193
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/fused_adamw_amsgrad_impl.cuh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+
4
+ namespace at {
5
+ namespace native {
6
+
7
+ void _fused_adamw_amsgrad_cuda_impl_(
8
+ at::TensorList params,
9
+ at::TensorList grads,
10
+ at::TensorList exp_avgs,
11
+ at::TensorList exp_avg_sqs,
12
+ at::TensorList max_exp_avg_sqs,
13
+ at::TensorList state_steps,
14
+ const double lr,
15
+ const double beta1,
16
+ const double beta2,
17
+ const double weight_decay,
18
+ const double eps,
19
+ const bool maximize,
20
+ const c10::optional<at::Tensor>& grad_scale,
21
+ const c10::optional<at::Tensor>& found_inf);
22
+
23
+ void _fused_adamw_amsgrad_cuda_impl_(
24
+ at::TensorList params,
25
+ at::TensorList grads,
26
+ at::TensorList exp_avgs,
27
+ at::TensorList exp_avg_sqs,
28
+ at::TensorList max_exp_avg_sqs,
29
+ at::TensorList state_steps,
30
+ const at::Tensor& lr,
31
+ const double beta1,
32
+ const double beta2,
33
+ const double weight_decay,
34
+ const double eps,
35
+ const bool maximize,
36
+ const c10::optional<at::Tensor>& grad_scale,
37
+ const c10::optional<at::Tensor>& found_inf);
38
+
39
+ } // namespace native
40
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/fused_adamw_impl.cuh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+
4
+ namespace at {
5
+ namespace native {
6
+
7
+ void _fused_adamw_cuda_impl_(
8
+ at::TensorList params,
9
+ at::TensorList grads,
10
+ at::TensorList exp_avgs,
11
+ at::TensorList exp_avg_sqs,
12
+ at::TensorList state_steps,
13
+ const double lr,
14
+ const double beta1,
15
+ const double beta2,
16
+ const double weight_decay,
17
+ const double eps,
18
+ const bool maximize,
19
+ const c10::optional<at::Tensor>& grad_scale,
20
+ const c10::optional<at::Tensor>& found_inf);
21
+
22
+ void _fused_adamw_cuda_impl_(
23
+ at::TensorList params,
24
+ at::TensorList grads,
25
+ at::TensorList exp_avgs,
26
+ at::TensorList exp_avg_sqs,
27
+ at::TensorList state_steps,
28
+ const at::Tensor& lr,
29
+ const double beta1,
30
+ const double beta2,
31
+ const double weight_decay,
32
+ const double eps,
33
+ const bool maximize,
34
+ const c10::optional<at::Tensor>& grad_scale,
35
+ const c10::optional<at::Tensor>& found_inf);
36
+
37
+ } // namespace native
38
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/reduction_template.cuh ADDED
@@ -0,0 +1,680 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ namespace at {
2
+ namespace cuda {
3
+ //windows doesn't like large string literals, so split in two
4
+ const std::string reduction_template_0 = R"ESCAPE(
5
+ #define C10_HOST_DEVICE __host__ __device__
6
+ #define C10_DEVICE __device__
7
+ #if defined(__clang__) && defined(__HIP__)
8
+ #ifndef __forceinline__
9
+ #define __forceinline__ inline __attribute__((always_inline))
10
+ #endif
11
+ // until ROCm support for kernel asserts is restored
12
+ #define assert(expr) (static_cast<void>(0))
13
+ #endif
14
+
15
+ template <typename T>
16
+ __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
17
+ {
18
+ #if defined(__clang__) && defined(__HIP__)
19
+ return __shfl_down(value, delta, width);
20
+ #else
21
+ return __shfl_down_sync(mask, value, delta, width);
22
+ #endif
23
+ }
24
+
25
+
26
+ #if ${complex}
27
+ template <typename T>
28
+ __device__ __forceinline__ std::complex<T> WARP_SHFL_DOWN(std::complex<T> value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
29
+ {
30
+ return std::complex<T>(
31
+ #if defined(__clang__) && defined(__HIP__)
32
+ __shfl_down(value.real(), delta, width),
33
+ __shfl_down(value.imag(), delta, width));
34
+ #else
35
+ __shfl_down_sync(mask, value.real(), delta, width),
36
+ __shfl_down_sync(mask, value.imag(), delta, width));
37
+ #endif
38
+ }
39
+ #endif
40
+
41
+ // aligned vector generates vectorized load/store on CUDA
42
+ template<typename scalar_t, int vec_size>
43
+ struct alignas(sizeof(scalar_t) * vec_size) aligned_vector {
44
+ scalar_t val[vec_size];
45
+ };
46
+
47
+
48
+ C10_HOST_DEVICE static void reduce_fraction(size_t &numerator, size_t &denominator) {
49
+ // get GCD of num and denom using Euclid's algorithm.
50
+ // Can replace this with std::gcd if we ever support c++17.
51
+ size_t a = denominator;
52
+ size_t b = numerator;
53
+ while (b != 0) {
54
+ a %= b;
55
+ // swap(a,b)
56
+ size_t tmp = a;
57
+ a = b;
58
+ b = tmp;
59
+ }
60
+
61
+ // a is now the GCD
62
+ numerator /= a;
63
+ denominator /= a;
64
+ }
65
+
66
+
67
+
68
+
69
+ struct ReduceConfig {
70
+ //has to match host-side ReduceConfig in the eager code
71
+ static constexpr int BLOCK_X = 0;
72
+ static constexpr int BLOCK_Y = 1;
73
+ static constexpr int CTA = 2;
74
+
75
+ static constexpr int input_vec_size = 4;
76
+ int element_size_bytes;
77
+ int num_inputs;
78
+ int num_outputs;
79
+ int step_input = 1;
80
+ int step_output = 1;
81
+ int ctas_per_output = 1;
82
+ int input_mult[3] = {0, 0, 0};
83
+ int output_mult[2] = {0, 0};
84
+
85
+ int block_width;
86
+ int block_height;
87
+ int num_threads;
88
+
89
+ bool vectorize_input = false;
90
+ int output_vec_size = 1;
91
+
92
+ C10_HOST_DEVICE bool should_block_x_reduce() const {
93
+ return input_mult[BLOCK_X] != 0;
94
+ }
95
+
96
+ C10_HOST_DEVICE bool should_block_y_reduce() const {
97
+ return input_mult[BLOCK_Y] != 0;
98
+ }
99
+
100
+ C10_HOST_DEVICE bool should_global_reduce() const {
101
+ return input_mult[CTA] != 0;
102
+ }
103
+
104
+ C10_DEVICE bool should_store(int output_idx) const {
105
+ return output_idx < num_outputs &&
106
+ (!should_block_x_reduce() || threadIdx.x == 0) &&
107
+ (!should_block_y_reduce() || threadIdx.y == 0);
108
+ }
109
+
110
+ C10_DEVICE bool should_reduce_tail() const {
111
+ return (!should_block_y_reduce() || threadIdx.y == 0) &&
112
+ (!should_global_reduce() || blockIdx.y == 0);
113
+ }
114
+
115
+ C10_HOST_DEVICE int input_idx() const {
116
+ int lane = threadIdx.x;
117
+ int warp = threadIdx.y;
118
+ int cta2 = blockIdx.y;
119
+ return (lane * input_mult[BLOCK_X] +
120
+ warp * input_mult[BLOCK_Y] +
121
+ cta2 * input_mult[CTA]);
122
+ }
123
+
124
+ template <int output_vec_size>
125
+ C10_HOST_DEVICE int output_idx() const {
126
+ int lane = threadIdx.x;
127
+ int warp = threadIdx.y;
128
+ int cta1 = blockIdx.x;
129
+ return (lane * output_mult[BLOCK_X] +
130
+ warp * output_mult[BLOCK_Y] +
131
+ cta1 * step_output) * output_vec_size;
132
+ }
133
+
134
+ C10_DEVICE int shared_memory_offset(int offset) const {
135
+ return threadIdx.x + (threadIdx.y + offset) * blockDim.x;
136
+ }
137
+
138
+ C10_DEVICE int staging_memory_offset(int cta2) const {
139
+ int offset = cta2 + blockIdx.x * gridDim.y;
140
+ if (!should_block_x_reduce()) {
141
+ offset = threadIdx.x + offset * blockDim.x;
142
+ }
143
+ return offset;
144
+ }
145
+
146
+
147
+ };
148
+
149
+
150
+ //TODO this will need to be different for more generic reduction functions
151
+ namespace reducer {
152
+
153
+ using scalar_t = ${scalar_type};
154
+ using arg_t = ${reduction_accum_type};
155
+ using out_scalar_t = ${result_type};
156
+
157
+
158
+ inline __device__ ${functor}
159
+
160
+ inline __device__ out_scalar_t project(arg_t arg) {
161
+ return (out_scalar_t) arg;
162
+ }
163
+
164
+ inline __device__ arg_t warp_shfl_down(arg_t arg, int offset) {
165
+ return WARP_SHFL_DOWN(arg, offset);
166
+ }
167
+
168
+ inline __device__ arg_t translate_idx(arg_t acc, int64_t /*idx*/) {
169
+ return acc;
170
+ }
171
+
172
+ // wrap a normal reduction that ignores the index
173
+ inline __device__ arg_t reduce(arg_t acc, arg_t val, int64_t idx) {
174
+ return combine(acc, val);
175
+ }
176
+ }
177
+
178
+
179
+ struct ReduceJitOp {
180
+ using scalar_t = ${scalar_type};
181
+ using arg_t = ${reduction_accum_type};
182
+ using out_scalar_t = ${result_type};
183
+
184
+ using InputCalculator = OffsetCalculator<1>;
185
+ using OutputCalculator = OffsetCalculator<2>;
186
+
187
+ // static constexpr bool can_accumulate_in_output =
188
+ // std::is_convertible<arg_t, out_scalar_t>::value
189
+ // && std::is_convertible<out_scalar_t, arg_t>::value;
190
+
191
+ static constexpr int input_vec_size = ReduceConfig::input_vec_size;
192
+
193
+ arg_t ident;
194
+ ReduceConfig config;
195
+ InputCalculator input_calc;
196
+ OutputCalculator output_calc;
197
+ const void* src;
198
+ const char* dst[2]; //it accepts at most two destinations
199
+ // acc_buf used for accumulation among sub Tensor Iterator when accumulation on
200
+ // output is not permissible
201
+ void* acc_buf;
202
+ // cta_buf used for accumulation between blocks during global reduction
203
+ void* cta_buf;
204
+ int* semaphores;
205
+ int64_t base_idx;
206
+ bool accumulate;
207
+ bool final_output;
208
+ int noutputs;
209
+
210
+
211
+ C10_DEVICE void run() const {
212
+ extern __shared__ char shared_memory[];
213
+ uint32_t output_idx = config.output_idx<${output_vec_size}>();
214
+ uint32_t input_idx = config.input_idx();
215
+ auto base_offsets1 = output_calc.get(output_idx)[1];
216
+
217
+ using arg_vec_t = Array<arg_t, ${output_vec_size}>;
218
+ arg_vec_t value;
219
+
220
+ if (output_idx < config.num_outputs && input_idx < config.num_inputs) {
221
+ const scalar_t* input_slice = (const scalar_t*)((const char*)src + base_offsets1);
222
+
223
+ value = thread_reduce<${output_vec_size}>(input_slice);
224
+ }
225
+
226
+ if (config.should_block_y_reduce()) {
227
+ value = block_y_reduce<${output_vec_size}>(value, shared_memory);
228
+ }
229
+ if (config.should_block_x_reduce()) {
230
+ value = block_x_reduce<${output_vec_size}>(value, shared_memory);
231
+ }
232
+
233
+ using out_ptr_vec_t = Array<out_scalar_t*, ${output_vec_size}>;
234
+ using offset_vec_t = Array<uint32_t, ${output_vec_size}>;
235
+ offset_vec_t base_offsets;
236
+ out_ptr_vec_t out;
237
+
238
+ #pragma unroll
239
+ for (int i = 0; i < ${output_vec_size}; i++) {
240
+ base_offsets[i] = output_calc.get(output_idx + i)[0];
241
+ out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]);
242
+ }
243
+
244
+ arg_vec_t* acc = nullptr;
245
+ if (acc_buf != nullptr) {
246
+ size_t numerator = sizeof(arg_t);
247
+ size_t denominator = sizeof(out_scalar_t);
248
+ reduce_fraction(numerator, denominator);
249
+ acc = (arg_vec_t*)((char*)acc_buf + (base_offsets[0] * numerator / denominator));
250
+ }
251
+
252
+ if (config.should_global_reduce()) {
253
+ value = global_reduce<${output_vec_size}>(value, acc, shared_memory);
254
+ } else if (config.should_store(output_idx)) {
255
+ if (accumulate) {
256
+ #pragma unroll
257
+ for (int i = 0; i < ${output_vec_size}; i++) {
258
+ value[i] = reducer::translate_idx(value[i], base_idx);
259
+ }
260
+ }
261
+
262
+ if (acc == nullptr) {
263
+ if (accumulate) {
264
+ value = accumulate_in_output<${output_vec_size}>(out, value);
265
+ }
266
+ if (final_output) {
267
+ set_results_to_output<${output_vec_size}>(value, base_offsets);
268
+ } else {
269
+ #pragma unroll
270
+ for (int i = 0; i < ${output_vec_size}; i++) {
271
+ *(out[i]) = get_accumulated_output(out[i], value[i]);
272
+ }
273
+ }
274
+ } else {
275
+ if (accumulate) {
276
+ #pragma unroll
277
+ for (int i = 0; i < ${output_vec_size}; i++) {
278
+ value[i] = reducer::combine((*acc)[i], value[i]);
279
+ }
280
+ }
281
+ if (final_output) {
282
+ set_results_to_output<${output_vec_size}>(value, base_offsets);
283
+ } else {
284
+ *acc = value;
285
+ }
286
+ }
287
+ }
288
+ }
289
+
290
+ template <int output_vec_size>
291
+ C10_DEVICE Array<arg_t, output_vec_size> thread_reduce(const scalar_t* data) const {
292
+ if (config.vectorize_input) {
293
+ assert(output_vec_size == 1);
294
+ // reduce at the header of input_slice where memory is not aligned,
295
+ // so that thread_reduce will have an aligned memory to work on.
296
+ return {input_vectorized_thread_reduce_impl(data)};
297
+ } else {
298
+ uint32_t element_stride = input_calc.strides_[0][0] / sizeof(scalar_t);
299
+ bool is_contiguous = (input_calc.dims == 1 && element_stride == 1);
300
+ if (is_contiguous) {
301
+ return thread_reduce_impl<output_vec_size>(data, [](uint32_t idx) { return idx; });
302
+ } else if (input_calc.dims == 1) {
303
+ return thread_reduce_impl<output_vec_size>(data, [&](uint32_t idx) { return idx * element_stride; });
304
+ } else {
305
+ return thread_reduce_impl<output_vec_size>(data, [&](uint32_t idx) { return input_calc.get(idx)[0] / sizeof(scalar_t); });
306
+ }
307
+ }
308
+ }
309
+
310
+ C10_DEVICE arg_t input_vectorized_thread_reduce_impl(const scalar_t* data) const {
311
+ uint32_t end = config.num_inputs;
312
+
313
+ // Handle the head of input slice where data is not aligned
314
+ arg_t value = ident;
315
+ constexpr int align_bytes = alignof(aligned_vector<scalar_t, input_vec_size>);
316
+ constexpr int align_elements = align_bytes / sizeof(scalar_t);
317
+ int shift = ((int64_t)data) % align_bytes / sizeof(scalar_t);
318
+ if (shift > 0) {
319
+ data -= shift;
320
+ end += shift;
321
+ if(threadIdx.x >= shift && threadIdx.x < align_elements && config.should_reduce_tail()){
322
+ value = reducer::reduce(value, data[threadIdx.x], threadIdx.x - shift);
323
+ }
324
+ end -= align_elements;
325
+ data += align_elements;
326
+ shift = align_elements - shift;
327
+ }
328
+
329
+ // Do the vectorized reduction
330
+ using load_t = aligned_vector<scalar_t, input_vec_size>;
331
+
332
+ uint32_t idx = config.input_idx();
333
+ const uint32_t stride = config.step_input;
334
+
335
+ // Multiple accumulators to remove dependency between unrolled loops.
336
+ arg_t value_list[input_vec_size];
337
+ value_list[0] = value;
338
+
339
+ #pragma unroll
340
+ for (int i = 1; i < input_vec_size; i++) {
341
+ value_list[i] = ident;
342
+ }
343
+
344
+ scalar_t values[input_vec_size];
345
+
346
+ load_t *values_vector = reinterpret_cast<load_t*>(&values[0]);
347
+
348
+ while (idx * input_vec_size + input_vec_size - 1 < end) {
349
+ *values_vector = reinterpret_cast<const load_t*>(data)[idx];
350
+ #pragma unroll
351
+ for (uint32_t i = 0; i < input_vec_size; i++) {
352
+ value_list[i] = reducer::reduce(value_list[i], values[i], shift + idx * input_vec_size + i);
353
+ }
354
+ idx += stride;
355
+ }
356
+
357
+ // tail
358
+ uint32_t tail_start = end - end % input_vec_size;
359
+ if (config.should_reduce_tail()) {
360
+ int idx = tail_start + threadIdx.x;
361
+ if (idx < end) {
362
+ value_list[0] = reducer::reduce(value_list[0], data[idx], idx + shift);
363
+ }
364
+ }
365
+
366
+ // combine accumulators
367
+ #pragma unroll
368
+ for (int i = 1; i < input_vec_size; i++) {
369
+ value_list[0] = reducer::combine(value_list[0], value_list[i]);
370
+ }
371
+ return value_list[0];
372
+ }
373
+
374
+ template <int output_vec_size, typename offset_calc_t>
375
+ C10_DEVICE Array<arg_t, output_vec_size> thread_reduce_impl(const scalar_t* data_, offset_calc_t calc) const {
376
+ uint32_t idx = config.input_idx();
377
+ const uint32_t end = config.num_inputs;
378
+ const uint32_t stride = config.step_input;
379
+ const int vt0=${vt0};
380
+
381
+ using arg_vec_t = Array<arg_t, output_vec_size>;
382
+ using load_t = aligned_vector<scalar_t, output_vec_size>;
383
+ const load_t* data = reinterpret_cast<const load_t*>(data_);
384
+
385
+ // Multiple accumulators to remove dependency between unrolled loops.
386
+ arg_vec_t value_list[vt0];
387
+
388
+ #pragma unroll
389
+ for (int i = 0; i < vt0; i++) {
390
+ #pragma unroll
391
+ for (int j = 0; j < output_vec_size; j++) {
392
+ value_list[i][j] = ident;
393
+ }
394
+ }
395
+
396
+ load_t values[vt0];
397
+
398
+ while (idx + (vt0 - 1) * stride < end) {
399
+ #pragma unroll
400
+ for (uint32_t i = 0; i < vt0; i++) {
401
+ values[i] = data[calc(idx + i * stride) / output_vec_size];
402
+ }
403
+ #pragma unroll
404
+ for (uint32_t i = 0; i < vt0; i++) {
405
+ #pragma unroll
406
+ for (uint32_t j = 0; j < output_vec_size; j++) {
407
+ value_list[i][j] = reducer::reduce(value_list[i][j], values[i].val[j], idx + i * stride);
408
+ }
409
+ }
410
+ idx += stride * vt0;
411
+ }
412
+
413
+ // tail
414
+ int idx_ = idx;
415
+ #pragma unroll
416
+ for (uint32_t i = 0; i < vt0; i++) {
417
+ if (idx >= end) {
418
+ break;
419
+ }
420
+ values[i] = data[calc(idx) / output_vec_size];
421
+ idx += stride;
422
+ }
423
+ idx = idx_;
424
+ #pragma unroll
425
+ for (uint32_t i = 0; i < vt0; i++) {
426
+ if (idx >= end) {
427
+ break;
428
+ }
429
+ #pragma unroll
430
+ for (uint32_t j = 0; j < output_vec_size; j++) {
431
+ value_list[i][j] = reducer::reduce(value_list[i][j], values[i].val[j], idx);
432
+ }
433
+ idx += stride;
434
+ }
435
+
436
+ // combine accumulators
437
+ #pragma unroll
438
+ for (int i = 1; i < vt0; i++) {
439
+ #pragma unroll
440
+ for (uint32_t j = 0; j < output_vec_size; j++) {
441
+ value_list[0][j] = reducer::combine(value_list[0][j], value_list[i][j]);
442
+ }
443
+ }
444
+ return value_list[0];
445
+ }
446
+ template <int output_vec_size>
447
+ C10_DEVICE Array<arg_t, output_vec_size> block_x_reduce(Array<arg_t, output_vec_size> value, char* shared_memory) const {
448
+ using args_vec_t = Array<arg_t, output_vec_size>;
449
+ int dim_x = blockDim.x;
450
+ args_vec_t* shared = (args_vec_t*)shared_memory;
451
+ if (dim_x > warpSize) {
452
+ int address_base = threadIdx.x + threadIdx.y*blockDim.x;
453
+ shared[address_base] = value;
454
+ for (int offset = dim_x/2; offset >= warpSize; offset >>= 1) {
455
+ __syncthreads();
456
+ if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) {
457
+ args_vec_t other = shared[address_base + offset];
458
+ #pragma unroll
459
+ for (int i = 0; i < output_vec_size; i++) {
460
+ value[i] = reducer::combine(value[i], other[i]);
461
+ }
462
+ shared[address_base] = value;
463
+ }
464
+ }
465
+ dim_x = warpSize;
466
+ }
467
+
468
+ __syncthreads();
469
+
470
+ for (int offset = 1; offset < dim_x; offset <<= 1) {
471
+ #pragma unroll
472
+ for (int i = 0; i < output_vec_size; i++) {
473
+ arg_t other = reducer::warp_shfl_down(value[i], offset);
474
+ value[i] = reducer::combine(value[i], other);
475
+ }
476
+ }
477
+ return value;
478
+ }
479
+
480
+ template <int output_vec_size>
481
+ C10_DEVICE Array<arg_t, output_vec_size> block_y_reduce(Array<arg_t, output_vec_size> value, char* shared_memory) const {
482
+ using args_vec_t = Array<arg_t, output_vec_size>;
483
+ args_vec_t* shared = (args_vec_t*)shared_memory;
484
+ shared[config.shared_memory_offset(0)] = value;
485
+ for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
486
+ __syncthreads();
487
+ if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
488
+ args_vec_t other = shared[config.shared_memory_offset(offset)];
489
+ #pragma unroll
490
+ for (int i = 0; i < output_vec_size; i++) {
491
+ value[i] = reducer::combine(value[i], other[i]);
492
+ }
493
+ shared[config.shared_memory_offset(0)] = value;
494
+ }
495
+ }
496
+ return value;
497
+ }
498
+ )ESCAPE";
499
+
500
+ const std::string reduction_template_1 = R"ESCAPE(
501
+
502
+ C10_DEVICE bool mark_block_finished() const {
503
+ __shared__ bool is_last_block_done_shared;
504
+
505
+ __syncthreads();
506
+ if (threadIdx.x == 0 && threadIdx.y == 0) {
507
+ int prev_blocks_finished = atomicAdd(&semaphores[blockIdx.x], 1);
508
+ is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1);
509
+ }
510
+
511
+ __syncthreads();
512
+
513
+ return is_last_block_done_shared;
514
+ }
515
+
516
+ template <int output_vec_size>
517
+ C10_DEVICE Array<arg_t, output_vec_size> accumulate_in_output(
518
+ Array<out_scalar_t*, output_vec_size> out,
519
+ Array<arg_t, output_vec_size> value
520
+ ) const {
521
+ Array<arg_t, output_vec_size> ret;
522
+ #pragma unroll
523
+ for (int i = 0; i < output_vec_size; i++) {
524
+ ret[i] = reducer::combine(*(out[i]), value[i]);
525
+ }
526
+ return ret;
527
+ }
528
+
529
+
530
+ C10_DEVICE out_scalar_t get_accumulated_output(
531
+ out_scalar_t* out, arg_t value
532
+ ) const {
533
+ assert(!final_output);
534
+ return (out_scalar_t)value;
535
+ }
536
+
537
+ template<class T>
538
+ C10_DEVICE void set_results(const T x, const uint32_t base_offset) const {
539
+ assert(noutputs == 1);
540
+ auto res = (out_scalar_t*)((char*)dst[0] + base_offset);
541
+ *res = x;
542
+ }
543
+
544
+ //TODO - multi-output reduction - we won't be able to use thrust::pair
545
+ //just explicitly specify typed output reads/writes
546
+ //Currently implemented for max of two outputs
547
+ // template<class T1, class T2>
548
+ // C10_DEVICE void set_results(const thrust::pair<T1, T2> x, const index_t base_offset) const {
549
+ // if (noutputs >= 1) {
550
+ // auto res0 = (T1*)((char*)dst[0] + base_offset);
551
+ // *res0 = x.first;
552
+ // }
553
+ // if (noutputs >= 2) {
554
+ // // base offset is computed assuming element size being sizeof(T1), so we need to make a
555
+ // // correction to obtain the correct base offset
556
+ // auto res1 = (T2*) ((char *) dst[1] + base_offset / sizeof(T1) * sizeof(T2));
557
+ // *res1 = x.second;
558
+ // }
559
+ // }
560
+
561
+ template <int output_vec_size>
562
+ C10_DEVICE void set_results_to_output(Array<arg_t, output_vec_size> value, Array<uint32_t, output_vec_size> base_offset) const {
563
+ assert(final_output);
564
+ #pragma unroll
565
+ for (int i = 0; i < output_vec_size; i++) {
566
+ set_results(reducer::project(value[i]), base_offset[i]);
567
+ }
568
+ }
569
+
570
+ template <int output_vec_size>
571
+ C10_DEVICE Array<arg_t, output_vec_size> global_reduce(Array<arg_t, output_vec_size> value, Array<arg_t, output_vec_size> *acc, char* shared_memory) const {
572
+ using arg_vec_t = Array<arg_t, output_vec_size>;
573
+ using out_ptr_vec_t = Array<out_scalar_t*, output_vec_size>;
574
+ using offset_vec_t = Array<uint32_t, output_vec_size>;
575
+
576
+ arg_vec_t* reduce_buffer = (arg_vec_t*)cta_buf;
577
+ uint32_t output_idx = config.output_idx<output_vec_size>();
578
+ offset_vec_t base_offsets;
579
+ out_ptr_vec_t out;
580
+
581
+ #pragma unroll
582
+ for (int i = 0; i < output_vec_size; i++) {
583
+ base_offsets[i] = output_calc.get(output_idx + i)[0];
584
+ out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]);
585
+ }
586
+
587
+ bool should_store = config.should_store(output_idx);
588
+ if (should_store) {
589
+ uint32_t offset = config.staging_memory_offset(blockIdx.y);
590
+ reduce_buffer[offset] = value;
591
+ }
592
+
593
+ __threadfence(); // make sure writes are globally visible
594
+ __syncthreads(); // if multiple warps in this block wrote to staging, make sure they're all done
595
+ bool is_last_block_done = mark_block_finished();
596
+
597
+ if (is_last_block_done) {
598
+ value = ident;
599
+ if (config.should_block_x_reduce()) {
600
+ uint32_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
601
+ uint32_t step = blockDim.x * blockDim.y;
602
+ for (; input_offset < config.ctas_per_output; input_offset += step) {
603
+ uint32_t idx = config.staging_memory_offset(input_offset);
604
+ arg_vec_t next = reduce_buffer[idx];
605
+ #pragma unroll
606
+ for (int i = 0; i < output_vec_size; i++) {
607
+ value[i] = reducer::combine(value[i], next[i]);
608
+ }
609
+ }
610
+ } else {
611
+ uint32_t input_offset = threadIdx.y;
612
+ uint32_t step = blockDim.y;
613
+ for (; input_offset < config.ctas_per_output; input_offset += step) {
614
+ uint32_t idx = config.staging_memory_offset(input_offset);
615
+ arg_vec_t next = reduce_buffer[idx];
616
+ #pragma unroll
617
+ for (int i = 0; i < output_vec_size; i++) {
618
+ value[i] = reducer::combine(value[i], next[i]);
619
+ }
620
+ }
621
+ }
622
+ value = block_y_reduce(value, shared_memory);
623
+ if (config.should_block_x_reduce()) {
624
+ value = block_x_reduce<output_vec_size>(value, shared_memory);
625
+ }
626
+ if (should_store) {
627
+ if (accumulate) {
628
+ #pragma unroll
629
+ for (int i = 0; i < output_vec_size; i++) {
630
+ value[i] = reducer::translate_idx(value[i], base_idx);
631
+ }
632
+ }
633
+
634
+ if (acc == nullptr) {
635
+ if (accumulate) {
636
+ value = accumulate_in_output<output_vec_size>(out, value);
637
+ }
638
+ if (final_output) {
639
+ set_results_to_output<output_vec_size>(value, base_offsets);
640
+ } else {
641
+ #pragma unroll
642
+ for (int i = 0; i < output_vec_size; i++) {
643
+ *(out[i]) = get_accumulated_output(out[i], value[i]);
644
+ }
645
+ }
646
+ } else {
647
+ if (accumulate) {
648
+ #pragma unroll
649
+ for (int i = 0; i < output_vec_size; i++) {
650
+ value[i] = reducer::combine((*acc)[i], value[i]);
651
+ }
652
+ }
653
+ if (final_output) {
654
+ set_results_to_output<output_vec_size>(value, base_offsets);
655
+ } else {
656
+ *acc = value;
657
+ }
658
+ }
659
+ }
660
+ }
661
+
662
+ return value;
663
+ }
664
+ };
665
+
666
+ extern "C"
667
+ __launch_bounds__(${max_threads_lb}, 4)
668
+ __global__ void reduction_${name}_kernel(ReduceJitOp r){
669
+ r.run();
670
+ }
671
+ )ESCAPE";
672
+
673
+ const std::string reduction_template = reduction_template_0 + reduction_template_1;
674
+
675
+
676
+ const std::string &get_reduction_template() {
677
+ return reduction_template;
678
+ }
679
+
680
+ }}
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/thread_constants.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+
4
+ // Marks a lambda as executable on both the host and device. The __host__
5
+ // attribute is important so that we can access static type information from
6
+ // the host, even if the function is typically only executed on the device.
7
+ #ifndef GPU_LAMBDA
8
+ #define GPU_LAMBDA __host__ __device__
9
+ #endif
10
+
11
+ #if defined(USE_ROCM)
12
+ constexpr int num_threads() {
13
+ return 256;
14
+ }
15
+ #else
16
+ constexpr uint32_t num_threads() {
17
+ return C10_WARP_SIZE * 4;
18
+ }
19
+ #endif
20
+
21
+ constexpr int thread_work_size() { return 4; }
22
+ constexpr int block_work_size() { return thread_work_size() * num_threads(); }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/cuda/vol2col.cuh ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/detail/KernelUtils.h>
5
+ #include <ATen/cuda/detail/IndexUtils.cuh>
6
+ #include <ATen/cuda/detail/TensorInfo.cuh>
7
+
8
+ #include <c10/macros/Macros.h>
9
+
10
+ namespace at {
11
+ namespace native {
12
+
13
+ using namespace at::cuda::detail;
14
+
15
+ // Kernel for fast unfold+copy on volumes
16
+ template <typename T>
17
+ __global__ void vol2col_kernel(
18
+ const int64_t n,
19
+ const T* data_vol,
20
+ const int depth,
21
+ const int height,
22
+ const int width,
23
+ const int ksize_t,
24
+ const int ksize_h,
25
+ const int ksize_w,
26
+ const int pad_t,
27
+ const int pad_h,
28
+ const int pad_w,
29
+ const int stride_t,
30
+ const int stride_h,
31
+ const int stride_w,
32
+ const int dilation_t,
33
+ const int dilation_h,
34
+ const int dilation_w,
35
+ const int depth_col,
36
+ const int height_col,
37
+ const int width_col,
38
+ T* data_col) {
39
+ CUDA_KERNEL_LOOP(index, n) {
40
+ auto w_out = index % width_col;
41
+ index /= width_col;
42
+ auto h_out = index % height_col;
43
+ index /= height_col;
44
+ auto t_out = index % depth_col;
45
+ auto channel_in = index / depth_col;
46
+ auto channel_out = channel_in * ksize_t * ksize_h * ksize_w;
47
+ auto t_in = t_out * stride_t - pad_t;
48
+ auto h_in = h_out * stride_h - pad_h;
49
+ auto w_in = w_out * stride_w - pad_w;
50
+ data_col +=
51
+ ((channel_out * depth_col + t_out) * height_col + h_out) * width_col +
52
+ w_out;
53
+ data_vol += ((channel_in * depth + t_in) * height + h_in) * width + w_in;
54
+ for (int i = 0; i < ksize_t; ++i) {
55
+ for (int j = 0; j < ksize_h; ++j) {
56
+ for (int k = 0; k < ksize_w; ++k) {
57
+ auto t = t_in + i * dilation_t;
58
+ auto h = h_in + j * dilation_h;
59
+ auto w = w_in + k * dilation_w;
60
+ *data_col = (t >= 0 && h >= 0 && w >= 0 && t < depth && h < height &&
61
+ w < width)
62
+ ? data_vol
63
+ [i * dilation_t * height * width + j * dilation_h * width +
64
+ k * dilation_w]
65
+ : static_cast<T>(0);
66
+ data_col += depth_col * height_col * width_col;
67
+ }
68
+ }
69
+ }
70
+ }
71
+ }
72
+
73
+ template <typename T>
74
+ void vol2col(
75
+ cudaStream_t stream,
76
+ const T* data_vol,
77
+ const int channels,
78
+ const int depth,
79
+ const int height,
80
+ const int width,
81
+ const int depth_col,
82
+ const int height_col,
83
+ const int width_col,
84
+ const int ksize_t,
85
+ const int ksize_h,
86
+ const int ksize_w,
87
+ const int pad_t,
88
+ const int pad_h,
89
+ const int pad_w,
90
+ const int stride_t,
91
+ const int stride_h,
92
+ const int stride_w,
93
+ const int dilation_t,
94
+ const int dilation_h,
95
+ const int dilation_w,
96
+ T* data_col) {
97
+ // We are going to launch channels * depth_col * height_col * width_col
98
+ // kernels, each kernel responsible for copying a single-channel grid.
99
+ // We cast an operand to int64 so that the product will not overflow
100
+ const auto num_kernels = static_cast<int64_t>(channels) * depth_col * height_col * width_col;
101
+ // Launch
102
+ vol2col_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
103
+ num_kernels,
104
+ data_vol,
105
+ depth,
106
+ height,
107
+ width,
108
+ ksize_t,
109
+ ksize_h,
110
+ ksize_w,
111
+ pad_t,
112
+ pad_h,
113
+ pad_w,
114
+ stride_t,
115
+ stride_h,
116
+ stride_w,
117
+ dilation_t,
118
+ dilation_h,
119
+ dilation_w,
120
+ depth_col,
121
+ height_col,
122
+ width_col,
123
+ data_col);
124
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
125
+ }
126
+
127
+ template <typename T, typename accT>
128
+ __global__ void vol2im_kernel(
129
+ const int64_t n,
130
+ const T* data_col,
131
+ const unsigned depth,
132
+ const unsigned height,
133
+ const unsigned width,
134
+ const unsigned channels,
135
+ const unsigned kernel_t,
136
+ const unsigned kernel_h,
137
+ const unsigned kernel_w,
138
+ const unsigned pad_t,
139
+ const unsigned pad_h,
140
+ const unsigned pad_w,
141
+ const unsigned stride_t,
142
+ const unsigned stride_h,
143
+ const unsigned stride_w,
144
+ const unsigned dilation_t,
145
+ const unsigned dilation_h,
146
+ const unsigned dilation_w,
147
+ const unsigned depth_col,
148
+ const unsigned height_col,
149
+ const unsigned width_col,
150
+ T* data_vol) {
151
+ CUDA_KERNEL_LOOP(index, n) {
152
+ accT val = static_cast<accT>(0);
153
+ const auto w_im = index % width + pad_w;
154
+ const auto h_im = (index / width) % height + pad_h;
155
+ const auto t_im = (index / width / height) % depth + pad_t;
156
+ const auto c_im = index / (width * height * depth);
157
+ auto kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
158
+ auto kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
159
+ auto kernel_extent_t = (kernel_t - 1) * dilation_t + 1;
160
+ // compute the start and end of the output
161
+ const auto w_col_start =
162
+ (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
163
+ const auto w_col_end = std::min(w_im / stride_w + 1, width_col);
164
+ const auto h_col_start =
165
+ (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
166
+ const auto h_col_end = std::min(h_im / stride_h + 1, height_col);
167
+ const auto t_col_start =
168
+ (t_im < kernel_extent_t) ? 0 : (t_im - kernel_extent_t) / stride_t + 1;
169
+ const auto t_col_end = std::min(t_im / stride_t + 1, depth_col);
170
+ // TODO: use LCM of stride and dilation to avoid unnecessary loops
171
+ for (unsigned t_col = t_col_start; t_col < t_col_end; t_col += 1) {
172
+ for (unsigned h_col = h_col_start; h_col < h_col_end; h_col += 1) {
173
+ for (unsigned w_col = w_col_start; w_col < w_col_end; w_col += 1) {
174
+ uint64_t t_k = (t_im - t_col * stride_t);
175
+ uint64_t h_k = (h_im - h_col * stride_h);
176
+ uint64_t w_k = (w_im - w_col * stride_w);
177
+ if (t_k % dilation_t == 0 && h_k % dilation_h == 0 &&
178
+ w_k % dilation_w == 0) {
179
+ t_k /= dilation_t;
180
+ h_k /= dilation_h;
181
+ w_k /= dilation_w;
182
+ const int64_t idx_k =
183
+ ((c_im * kernel_t + t_k) * kernel_h + h_k) * kernel_w + w_k;
184
+ const int64_t data_col_index =
185
+ ((idx_k * depth_col + t_col) *
186
+ height_col + h_col) *
187
+ width_col + w_col;
188
+ val += data_col[data_col_index];
189
+ }
190
+ }
191
+ }
192
+ }
193
+ data_vol[index] = static_cast<T>(val);
194
+ }
195
+ }
196
+
197
+ template <typename T, typename accT>
198
+ void col2vol(
199
+ cudaStream_t stream,
200
+ const T* data_col,
201
+ const int64_t channels,
202
+ const int64_t depth,
203
+ const int64_t height,
204
+ const int64_t width,
205
+ const int64_t output_depth,
206
+ const int64_t output_height,
207
+ const int64_t output_width,
208
+ const int64_t patch_t,
209
+ const int64_t patch_h,
210
+ const int64_t patch_w,
211
+ const int64_t pad_t,
212
+ const int64_t pad_h,
213
+ const int64_t pad_w,
214
+ const int64_t stride_t,
215
+ const int64_t stride_h,
216
+ const int64_t stride_w,
217
+ const int64_t dilation_t,
218
+ const int64_t dilation_h,
219
+ const int64_t dilation_w,
220
+ T* data_vol) {
221
+ const auto num_kernels = channels * depth * height * width;
222
+
223
+ auto check_fits_in_unsigned =
224
+ [](int64_t val, const char * name) {
225
+ constexpr auto umax = std::numeric_limits<unsigned>::max();
226
+ TORCH_CHECK(val >= 0 && val <= umax,
227
+ name, " must fit in a 32-bit unsigned value");
228
+ };
229
+ check_fits_in_unsigned(num_kernels, "input size");
230
+ check_fits_in_unsigned(
231
+ channels * patch_t * patch_h * patch_w, "channels x kernel size");
232
+
233
+ // To avoid involving atomic operations, we will launch one kernel per
234
+ // bottom dimension, and then in the kernel add up the top dimensions.
235
+ vol2im_kernel<T, accT>
236
+ <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
237
+ num_kernels,
238
+ data_col,
239
+ depth,
240
+ height,
241
+ width,
242
+ channels,
243
+ patch_t,
244
+ patch_h,
245
+ patch_w,
246
+ pad_t,
247
+ pad_h,
248
+ pad_w,
249
+ stride_t,
250
+ stride_h,
251
+ stride_w,
252
+ dilation_t,
253
+ dilation_h,
254
+ dilation_w,
255
+ output_depth,
256
+ output_height,
257
+ output_width,
258
+ data_vol);
259
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
260
+ }
261
+
262
+ } // namespace native
263
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/Copy.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <ATen/core/Tensor.h>
5
+
6
+ namespace at {
7
+ namespace native {
8
+ namespace mps {
9
+
10
+ at::Tensor& mps_copy_(at::Tensor& dst, const at::Tensor& src, bool non_blocking);
11
+ void copy_blit_mps(void* dst, const void* src, size_t size);
12
+
13
+ } // namespace mps
14
+ } // namespace native
15
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/MPSGraphSonomaOps.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
4
+
5
+ #if !defined(__MAC_14_0) && \
6
+ (!defined(MAC_OS_X_VERSION_14_0) || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_14_0))
7
+
8
+ typedef NS_ENUM(NSUInteger, MPSGraphFFTScalingMode)
9
+ {
10
+ MPSGraphFFTScalingModeNone = 0L,
11
+ MPSGraphFFTScalingModeSize = 1L,
12
+ MPSGraphFFTScalingModeUnitary = 2L,
13
+ };
14
+
15
+ @interface FakeMPSGraphFFTDescriptor : NSObject<NSCopying>
16
+ @property (readwrite, nonatomic) BOOL inverse;
17
+ @property (readwrite, nonatomic) MPSGraphFFTScalingMode scalingMode;
18
+ @property (readwrite, nonatomic) BOOL roundToOddHermitean;
19
+ +(nullable instancetype) descriptor;
20
+ @end
21
+
22
+ @compatibility_alias MPSGraphFFTDescriptor FakeMPSGraphFFTDescriptor;
23
+
24
+ @interface MPSGraph (SonomaOps)
25
+ -(MPSGraphTensor * _Nonnull) conjugateWithTensor:(MPSGraphTensor * _Nonnull) tensor
26
+ name:(NSString * _Nullable) name;
27
+
28
+ -(MPSGraphTensor * _Nonnull) fastFourierTransformWithTensor:(MPSGraphTensor * _Nonnull) tensor
29
+ axes:(NSArray<NSNumber *> * _Nonnull) axes
30
+ descriptor:(MPSGraphFFTDescriptor * _Nonnull) descriptor
31
+ name:(NSString * _Nullable) name;
32
+
33
+ -(MPSGraphTensor * _Nonnull) realToHermiteanFFTWithTensor:(MPSGraphTensor * _Nonnull) tensor
34
+ axes:(NSArray<NSNumber *> * _Nonnull) axes
35
+ descriptor:(MPSGraphFFTDescriptor * _Nonnull) descriptor
36
+ name:(NSString * _Nullable) name;
37
+
38
+ -(MPSGraphTensor * _Nonnull) HermiteanToRealFFTWithTensor:(MPSGraphTensor * _Nonnull) tensor
39
+ axes:(NSArray<NSNumber *> * _Nonnull) axes
40
+ descriptor:(MPSGraphFFTDescriptor * _Nonnull) descriptor
41
+ name:(NSString * _Nullable) name;
42
+ @end
43
+
44
+ // define BFloat16 enums for MacOS13
45
+ #define MPSDataTypeBFloat16 ((MPSDataType) (MPSDataTypeAlternateEncodingBit | MPSDataTypeFloat16))
46
+
47
+ // define Metal version
48
+ #define MTLLanguageVersion3_1 ((MTLLanguageVersion) ((3 << 16) + 1))
49
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/MPSGraphVenturaOps.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
3
+
4
+ // TODO: Remove me when moved to MacOS 13
5
+ #if !defined(__MAC_13_2) && \
6
+ (!defined(MAC_OS_X_VERSION_13_2) || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_13_2))
7
+
8
+ @interface FakeMPSGraphConvolution3DOpDescriptor : NSObject<NSCopying>
9
+
10
+ @property (readwrite, nonatomic) NSUInteger strideInX;
11
+ @property (readwrite, nonatomic) NSUInteger strideInY;
12
+ @property (readwrite, nonatomic) NSUInteger strideInZ;
13
+ @property (readwrite, nonatomic) NSUInteger dilationRateInX;
14
+ @property (readwrite, nonatomic) NSUInteger dilationRateInY;
15
+ @property (readwrite, nonatomic) NSUInteger dilationRateInZ;
16
+
17
+ @property (readwrite, nonatomic) NSUInteger paddingLeft;
18
+ @property (readwrite, nonatomic) NSUInteger paddingRight;
19
+ @property (readwrite, nonatomic) NSUInteger paddingTop;
20
+ @property (readwrite, nonatomic) NSUInteger paddingBottom;
21
+ @property (readwrite, nonatomic) NSUInteger paddingFront;
22
+ @property (readwrite, nonatomic) NSUInteger paddingBack;
23
+
24
+ @property (readwrite, nonatomic) MPSGraphPaddingStyle paddingStyle;
25
+ @property (readwrite, nonatomic) MPSGraphTensorNamedDataLayout dataLayout;
26
+ @property (readwrite, nonatomic) MPSGraphTensorNamedDataLayout weightsLayout;
27
+
28
+ @property (readwrite, nonatomic) NSUInteger groups;
29
+
30
+ @end
31
+
32
+ @compatibility_alias MPSGraphConvolution3DOpDescriptor FakeMPSGraphConvolution3DOpDescriptor;
33
+
34
+ #endif
35
+
36
+ @interface MPSGraph (VenturaOps)
37
+
38
+ #if !defined(__MAC_13_0) && \
39
+ (!defined(MAC_OS_X_VERSION_13_0) || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_13_0))
40
+
41
+ typedef NS_ENUM(NSUInteger, MPSGraphResizeNearestRoundingMode)
42
+ {
43
+ MPSGraphResizeNearestRoundingModeRoundPreferCeil = 0L,
44
+ MPSGraphResizeNearestRoundingModeRoundPreferFloor = 1L,
45
+ MPSGraphResizeNearestRoundingModeCeil = 2L,
46
+ MPSGraphResizeNearestRoundingModeFloor = 3L,
47
+ MPSGraphResizeNearestRoundingModeRoundToEven = 4L,
48
+ MPSGraphResizeNearestRoundingModeRoundToOdd = 5L,
49
+ };
50
+
51
+ // Define complex enums for MacOS 12
52
+ #define MPSDataTypeComplexBit 0x01000000
53
+ #define MPSDataTypeComplexFloat32 ((MPSDataType) (MPSDataTypeFloatBit | MPSDataTypeComplexBit | 64))
54
+ #define MPSDataTypeComplexFloat16 ((MPSDataType) (MPSDataTypeFloatBit | MPSDataTypeComplexBit | 32))
55
+ #endif
56
+
57
+ - (MPSGraphTensor * _Nonnull) convolution3DWithSourceTensor:(MPSGraphTensor * _Nonnull) source
58
+ weightsTensor:(MPSGraphTensor * _Nonnull) weights
59
+ descriptor:(MPSGraphConvolution3DOpDescriptor * _Nonnull) descriptor
60
+ name:(NSString * _Nullable) name;
61
+
62
+ - (MPSGraphTensor * _Nonnull) convolution3DDataGradientWithIncomingGradientTensor:(MPSGraphTensor * _Nonnull) incomingGradient
63
+ weightsTensor:(MPSGraphTensor * _Nonnull) weights
64
+ outputShape:(MPSShape * _Nonnull) outputShape
65
+ forwardConvolutionDescriptor:(MPSGraphConvolution3DOpDescriptor * _Nonnull) forwardConvolutionDescriptor
66
+ name:(NSString * _Nullable) name;
67
+
68
+ - (MPSGraphTensor * _Nonnull) convolution3DWeightsGradientWithIncomingGradientTensor:(MPSGraphTensor * _Nonnull) incomingGradient
69
+ sourceTensor:(MPSGraphTensor * _Nonnull) source
70
+ outputShape:(MPSShape * _Nonnull) outputShape
71
+ forwardConvolutionDescriptor:(MPSGraphConvolution3DOpDescriptor * _Nonnull) forwardConvolutionDescriptor
72
+ name:(NSString * _Nullable) name;
73
+
74
+ - (MPSGraphTensor * _Nonnull)cumulativeSumWithTensor:(MPSGraphTensor * _Nonnull)tensor
75
+ axis:(NSInteger)axis
76
+ name:(NSString * _Nullable)name;
77
+
78
+ - (MPSGraphTensor * _Nonnull)sortWithTensor:(MPSGraphTensor * _Nonnull)tensor
79
+ axis:(NSInteger)axis
80
+ name:(NSString * _Nullable)name;
81
+
82
+ - (MPSGraphTensor * _Nonnull) sortWithTensor:(MPSGraphTensor * _Nonnull) tensor
83
+ axis:(NSInteger) axis
84
+ descending:(BOOL) descending
85
+ name:(NSString * _Nullable) name;
86
+
87
+ - (MPSGraphTensor * _Nonnull) sortWithTensor:(MPSGraphTensor * _Nonnull) tensor
88
+ axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
89
+ descending:(BOOL) descending
90
+ name:(NSString * _Nullable) name;
91
+
92
+ - (MPSGraphTensor * _Nonnull) sortWithTensor:(MPSGraphTensor * _Nonnull) tensor
93
+ axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
94
+ name:(NSString * _Nullable) name;
95
+
96
+ - (MPSGraphTensor * _Nonnull)argSortWithTensor:(MPSGraphTensor * _Nonnull)tensor
97
+ axis:(NSInteger)axis
98
+ name:(NSString * _Nullable)name;
99
+
100
+ - (MPSGraphTensor * _Nonnull) argSortWithTensor:(MPSGraphTensor * _Nonnull) tensor
101
+ axis:(NSInteger) axis
102
+ descending:(BOOL) descending
103
+ name:(NSString * _Nullable) name;
104
+
105
+ - (MPSGraphTensor * _Nonnull) argSortWithTensor:(MPSGraphTensor * _Nonnull) tensor
106
+ axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
107
+ descending:(BOOL) descending
108
+ name:(NSString * _Nullable) name;
109
+
110
+ - (MPSGraphTensor * _Nonnull) argSortWithTensor:(MPSGraphTensor * _Nonnull) tensor
111
+ axisTensor:(MPSGraphTensor * _Nonnull) axisTensor
112
+ name:(NSString * _Nullable) name;
113
+
114
+ - (MPSGraphTensor * _Nonnull)inverseOfTensor:(MPSGraphTensor * _Nonnull) inputTensor
115
+ name:(NSString * _Nullable)name;
116
+
117
+ - (MPSGraphTensor * _Nonnull) resizeNearestWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
118
+ sizeTensor:(MPSGraphTensor * _Nonnull) size
119
+ nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
120
+ centerResult:(BOOL) centerResult
121
+ alignCorners:(BOOL) alignCorners
122
+ layout:(MPSGraphTensorNamedDataLayout) layout
123
+ name:(NSString * _Nullable) name;
124
+
125
+ - (MPSGraphTensor * _Nonnull) resizeNearestWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
126
+ sizeTensor:(MPSGraphTensor * _Nonnull) size
127
+ scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
128
+ nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
129
+ layout:(MPSGraphTensorNamedDataLayout) layout
130
+ name:(NSString * _Nullable) name;
131
+
132
+ - (MPSGraphTensor * _Nonnull) resizeBilinearWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
133
+ sizeTensor:(MPSGraphTensor * _Nonnull) size
134
+ centerResult:(BOOL) centerResult
135
+ alignCorners:(BOOL) alignCorners
136
+ layout:(MPSGraphTensorNamedDataLayout) layout
137
+ name:(NSString * _Nullable) name;
138
+
139
+ - (MPSGraphTensor * _Nonnull) resizeBilinearWithTensor:(MPSGraphTensor * _Nonnull) imagesTensor
140
+ sizeTensor:(MPSGraphTensor * _Nonnull) size
141
+ scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
142
+ layout:(MPSGraphTensorNamedDataLayout) layout
143
+ name:(NSString * _Nullable) name;
144
+
145
+ - (MPSGraphTensor * _Nonnull) resizeNearestWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
146
+ input:(MPSGraphTensor * _Nonnull) input
147
+ nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
148
+ centerResult:(BOOL) centerResult
149
+ alignCorners:(BOOL) alignCorners
150
+ layout:(MPSGraphTensorNamedDataLayout) layout
151
+ name:(NSString * _Nullable) name;
152
+
153
+ - (MPSGraphTensor * _Nonnull) resizeNearestWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
154
+ input:(MPSGraphTensor * _Nonnull) input
155
+ scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
156
+ nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
157
+ layout:(MPSGraphTensorNamedDataLayout) layout
158
+ name:(NSString * _Nullable) name;
159
+
160
+ - (MPSGraphTensor * _Nonnull) resizeBilinearWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
161
+ input:(MPSGraphTensor * _Nonnull) input
162
+ centerResult:(BOOL) centerResult
163
+ alignCorners:(BOOL) alignCorners
164
+ layout:(MPSGraphTensorNamedDataLayout) layout
165
+ name:(NSString * _Nullable) name;
166
+
167
+ - (MPSGraphTensor * _Nonnull) resizeBilinearWithGradientTensor:(MPSGraphTensor * _Nonnull) gradient
168
+ input:(MPSGraphTensor * _Nonnull) input
169
+ scaleOffsetTensor:(MPSGraphTensor * _Nonnull) scaleOffset
170
+ layout:(MPSGraphTensorNamedDataLayout) layout
171
+ name:(NSString * _Nullable) name;
172
+
173
+ - (MPSGraphTensor * _Nonnull) sampleGridWithSourceTensor:(MPSGraphTensor * _Nonnull) source
174
+ coordinateTensor:(MPSGraphTensor * _Nonnull) coordinates
175
+ layout:(MPSGraphTensorNamedDataLayout) layout
176
+ normalizeCoordinates:(BOOL) normalizeCoordinates
177
+ relativeCoordinates:(BOOL) relativeCoordinates
178
+ alignCorners:(BOOL) alignCorners
179
+ paddingMode:(MPSGraphPaddingMode) paddingMode
180
+ samplingMode:(MPSGraphResizeMode) samplingMode
181
+ constantValue:(double) constantValue
182
+ name:(NSString * _Nullable) name;
183
+
184
+ - (MPSGraphTensor * _Nonnull) sampleGridWithSourceTensor:(MPSGraphTensor * _Nonnull) source
185
+ coordinateTensor:(MPSGraphTensor * _Nonnull) coordinates
186
+ layout:(MPSGraphTensorNamedDataLayout) layout
187
+ normalizeCoordinates:(BOOL) normalizeCoordinates
188
+ relativeCoordinates:(BOOL) relativeCoordinates
189
+ alignCorners:(BOOL) alignCorners
190
+ paddingMode:(MPSGraphPaddingMode) paddingMode
191
+ nearestRoundingMode:(MPSGraphResizeNearestRoundingMode) nearestRoundingMode
192
+ constantValue:(double) constantValue
193
+ name:(NSString * _Nullable) name;
194
+ - (MPSGraphTensor * _Nonnull) truncateWithTensor:(MPSGraphTensor * _Nonnull) tensor
195
+ name:(NSString * _Nullable) name;
196
+
197
+ @end
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/OperationUtils.h ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
6
+ #include <ATen/Tensor.h>
7
+ #include <ATen/Utils.h>
8
+ #include <ATen/mps/MPSStream.h>
9
+ #include <ATen/native/mps/TensorFactory.h>
10
+ #include <c10/util/Optional.h>
11
+ #include <c10/core/ScalarType.h>
12
+ #include <torch/library.h>
13
+ #include <exception>
14
+ #include <unordered_map>
15
+
16
+ #ifndef AT_PER_OPERATOR_HEADERS
17
+ #include <ATen/Functions.h>
18
+ #include <ATen/NativeFunctions.h>
19
+ #else
20
+ #include <ATen/ops/empty.h>
21
+ #include <ATen/ops/empty_like.h>
22
+ #include <ATen/ops/zeros.h>
23
+ #include <ATen/ops/zeros_like.h>
24
+ #endif
25
+
26
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
27
+
28
+ // Fwd declarations
29
+ namespace at {
30
+ struct TensorIteratorBase;
31
+ }
32
+ using namespace at::mps;
33
+
34
+ namespace at::native::mps {
35
+
36
+ void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)());
37
+
38
+ struct MPSScalar {
39
+ id<MTLBuffer> getMTLBuffer() const { return __builtin_bit_cast(id<MTLBuffer>, buffer.get()); }
40
+
41
+ size_t size = 0;
42
+ ScalarType type = ScalarType::Undefined;
43
+ c10::DataPtr buffer; // stores MTLBuffer (frees buffer if MPSScalar instance goes out of scope)
44
+ union {
45
+ float f; // MPS doesn't support 'double'
46
+ at::Half h;
47
+ int64_t i;
48
+ bool b;
49
+ c10::complex<float> cf;
50
+ c10::complex<at::Half> ch;
51
+ at::BFloat16 bf16;
52
+ } value {};
53
+ };
54
+
55
+ void runMPSGraph(MPSStream* mpsStream,
56
+ MPSGraph* mpsGraph,
57
+ NSDictionary* feeds,
58
+ NSDictionary* results);
59
+
60
+ MPSDataType getMPSDataType(ScalarType scalar_type);
61
+ static inline MPSDataType getMPSDataType(const Tensor& t) {
62
+ return getMPSDataType(t.scalar_type());
63
+ }
64
+ MPSDataType getMPSScalarType(ScalarType scalar_type);
65
+ static inline MPSDataType getMPSScalarType(const Tensor& t) {
66
+ return getMPSScalarType(t.scalar_type());
67
+ }
68
+ MPSScalar getMPSScalar(const Scalar& scalar, ScalarType type);
69
+ std::string getMPSTypeString(ScalarType scalar_type, bool short_name = false);
70
+ static inline std::string getMPSTypeString(const Tensor& t, bool short_name = false) {
71
+ return getMPSTypeString(t.scalar_type(), short_name);
72
+ }
73
+ std::string scalarToMetalTypeString(const c10::ScalarType& scalar_type);
74
+ NSArray<NSNumber*>* getTensorAxes(const Tensor& t);
75
+ NSArray<NSNumber*>* getTensorAxes(const IntArrayRef& sizes, at::OptionalIntArrayRef dim);
76
+ std::string getMPSShapeString(MPSShape* shape);
77
+ std::string getTensorsStringKey(const TensorList& tensors, bool short_dtype = true);
78
+ std::string getArrayRefString(const IntArrayRef s);
79
+ // use has_storage() on the returned tensor to determine if src actually is a view
80
+ Tensor gatherViewTensor(const at::Tensor& src, at::Tensor& dst);
81
+ Tensor& scatterViewTensor(const at::Tensor& src, at::Tensor& output);
82
+ bool canSliceViewTensor(const Tensor& src, MPSShape *mpsShape);
83
+ MPSGraphTensorData* getMPSGraphTensorDataForView(const Tensor& src, MPSShape *mpsShape, const MPSDataType mpsDataType);
84
+ MPSGraphTensor* castToIHFTypes(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor, const Tensor& input, bool includesInt64 = false);
85
+ MPSGraphTensor* castFromIHFTypes(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor, const Tensor& input, bool includesInt64 = false);
86
+
87
+ // The MPSShape could vary based on memory format
88
+ MPSShape* getMPSShape(const Tensor& t, c10::MemoryFormat memory_format = MemoryFormat::Contiguous);
89
+ MPSShape* getMPSShape(IntArrayRef sizes, c10::MemoryFormat memory_format = MemoryFormat::Contiguous);
90
+
91
+ static inline id<MTLBuffer> getMTLBufferStorage(const at::Tensor& tensor) {
92
+ return __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
93
+ }
94
+
95
+ class Placeholder {
96
+ public:
97
+ Placeholder() : _placeholder(nullptr), _value(nullptr), _tensor(Tensor()) {}
98
+ Placeholder(MPSGraphTensor* mpsGraphTensor) : _placeholder(mpsGraphTensor), _value(nullptr), _tensor(Tensor()) {}
99
+ Placeholder(MPSGraphTensor* mpsGraphTensor, const Tensor& self, MPSShape *mpsShape = nullptr,
100
+ bool gatherTensorData = true, MPSDataType dataType = MPSDataTypeInvalid);
101
+ MPSGraphTensor* getMPSGraphTensor() {
102
+ return _placeholder;
103
+ }
104
+ MPSGraphTensorData* getMPSGraphTensorData() {
105
+ return _value;
106
+ }
107
+ bool isIntermediate() {
108
+ return _value == nullptr;
109
+ }
110
+
111
+ private:
112
+ MPSGraphTensor* _placeholder;
113
+ MPSGraphTensorData* _value;
114
+ Tensor _tensor;
115
+ };
116
+
117
+ void resize_tensor(Tensor* output);
118
+ Tensor wrapped_scalar_tensor_mps(const Scalar& scalar, const Device device);
119
+ MPSGraphTensor* trunc_tensor(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor);
120
+ MPSGraphTensor* convertNHWCtoNCHW(MPSGraph *mpsGraph, MPSGraphTensor* tensor);
121
+ MPSGraphTensor* castMPSTensor(MPSGraph *mpsGraph, MPSGraphTensor* tensor, ScalarType toType);
122
+ MPSGraphTensor* castMPSTensor(MPSGraph *mpsGraph, MPSGraphTensor* tensor, MPSDataType toType);
123
+ MPSGraphTensorData *getMPSGraphTensorData(MPSGraph* mpsGraph, MPSStream* mpsStream, const Tensor& tensor);
124
+ MPSGraphTensorData* getMPSGraphTensorFromScalar(MPSStream* mpsStream, MPSScalar& scalar);
125
+
126
+ MPSGraph* make_mps_graph();
127
+ void printTensorNDArray(const Tensor& t);
128
+ MPSNDArray* ndArrayFromTensor(const Tensor& tensor, MPSShape *shape, MPSDataType mpsType);
129
+
130
+ MPSGraphTensor* mpsGraphUnrankedPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType);
131
+ MPSGraphTensor* mpsGraphRankedPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType, MPSShape* mpsShape);
132
+ MPSGraphTensor* mpsGraphRankedPlaceHolder(MPSGraph *mpsGraph, const Tensor& tensor);
133
+ MPSGraphTensor* mpsGraphScalarPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType);
134
+ MPSGraphTensor* mpsGraphScalarPlaceHolder(MPSGraph *mpsGraph, const Scalar& scalar);
135
+
136
+ string get_mem_format_string(c10::MemoryFormat memory_format);
137
+
138
+ using MPSCacheKey = uint64_t;
139
+
140
+ // derive this class to cache a graph and its inputs/outputs
141
+ // can be used to store any NSObject
142
+ struct MPSCachedGraph
143
+ {
144
+ MPSCachedGraph(NSObject *object) : _object([object retain]) {}
145
+ virtual ~MPSCachedGraph() {
146
+ [_object release];
147
+ _object = nullptr;
148
+ }
149
+
150
+ template<typename T>
151
+ inline T* as() {
152
+ return static_cast<T*>(this);
153
+ }
154
+
155
+ MPSGraph *graph() const { return (MPSGraph *)_object; }
156
+ NSObject *object() const { return _object; }
157
+ private:
158
+ NSObject *_object = nullptr;
159
+ };
160
+
161
+ struct MPSUnaryCachedGraph : public MPSCachedGraph
162
+ {
163
+ MPSUnaryCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
164
+ MPSGraphTensor *inputTensor_ = nil;
165
+ MPSGraphTensor *outputTensor_ = nil;
166
+ };
167
+
168
+ struct MPSUnaryGradCachedGraph : public MPSCachedGraph
169
+ {
170
+ MPSUnaryGradCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
171
+ MPSGraphTensor *gradOutputTensor_ = nil;
172
+ MPSGraphTensor *inputTensor_ = nil;
173
+ MPSGraphTensor *outputTensor_ = nil; // some backward input is actually the forward's output
174
+ MPSGraphTensor *gradInputTensor_ = nil;
175
+ };
176
+
177
+ struct MPSBinaryCachedGraph : public MPSCachedGraph
178
+ {
179
+ MPSBinaryCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
180
+ MPSGraphTensor *inputTensor_ = nil;
181
+ MPSGraphTensor *otherTensor_ = nil;
182
+ MPSGraphTensor *outputTensor_ = nil;
183
+ };
184
+
185
+ struct MPSBinaryGradCachedGraph : public MPSCachedGraph
186
+ {
187
+ MPSBinaryGradCachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
188
+ MPSGraphTensor *gradOutputTensor_ = nil;
189
+ MPSGraphTensor *inputTensor_ = nil;
190
+ MPSGraphTensor *otherTensor_ = nil;
191
+ MPSGraphTensor *gradInputTensor_ = nil;
192
+ };
193
+
194
+ // TODO: Improve the overall design of MPSGraphCache.
195
+ // https://github.com/pytorch/pytorch/issues/77176
196
+ // Cache holding various keys mapped to graphs
197
+ struct MPSGraphCache
198
+ {
199
+ typedef MPSCachedGraph * (^CreateCachedGraphBlock)();
200
+
201
+ struct CacheEntry {
202
+ CacheEntry(const std::string& key, MPSCachedGraph *cachedGraph) : cachedGraph_(cachedGraph), key_(key) {}
203
+ MPSCachedGraph* cachedGraph_ = nullptr;
204
+ std::string key_;
205
+ };
206
+
207
+ public:
208
+
209
+ static MPSGraphCache* getInstance() {
210
+ if(_instance_cache == nullptr) {
211
+ _instance_cache = new MPSGraphCache();
212
+ }
213
+ return _instance_cache;
214
+ }
215
+
216
+ ~MPSGraphCache() {
217
+ dispatch_release(serialQueue_);
218
+
219
+ for (const auto& i : cache_) {
220
+ delete i.second.cachedGraph_;
221
+ }
222
+ }
223
+
224
+ // Disallow the copy constructor and operator= functions
225
+ MPSGraphCache(const MPSGraphCache&) = delete;
226
+ void operator=(const MPSGraphCache&) = delete;
227
+
228
+ MPSCachedGraph* CreateCachedGraph(const std::string& key, CreateCachedGraphBlock createCacheBlock) {
229
+
230
+ __block MPSCachedGraph* cachedGraph = nil;
231
+
232
+ MPSCacheKey hash = std::hash<std::string>{}(key);
233
+
234
+ dispatch_sync_with_rethrow(serialQueue_, ^() {
235
+ // verify the cached entry doesn't already exist
236
+ if (cache_.count(hash) != 0) {
237
+ auto& entry = cache_.at(hash);
238
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(key == entry.key_, "Key collision in the MPS cached graph!\n");
239
+ cachedGraph = entry.cachedGraph_;
240
+ } else {
241
+ cachedGraph = createCacheBlock();
242
+ CacheEntry entry(key, cachedGraph);
243
+ cache_.emplace(hash, entry);
244
+ profileCachedGraph(entry);
245
+ }
246
+ });
247
+ return cachedGraph;
248
+ }
249
+
250
+ template<typename T>
251
+ inline T* CreateCachedGraphAs(const std::string& key, CreateCachedGraphBlock createCacheBlock) {
252
+ return static_cast<T *>(CreateCachedGraph(key, createCacheBlock));
253
+ }
254
+
255
+ MPSCachedGraph* LookUp(const std::string& key) const {
256
+
257
+ __block MPSCachedGraph* cachedGraph = nullptr;
258
+
259
+ MPSCacheKey hash = std::hash<std::string>{}(key);
260
+
261
+ dispatch_sync(serialQueue_, ^() {
262
+
263
+ if (cache_.count(hash) != 0) {
264
+ auto& entry = cache_.at(hash);
265
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(key == entry.key_, "Key collision in the MPS cached graph!\n");
266
+ cachedGraph = entry.cachedGraph_;
267
+ profileCachedGraph(entry);
268
+ }
269
+ });
270
+ return cachedGraph;
271
+ }
272
+
273
+ template<typename T>
274
+ inline T* LookUpAs(const std::string& key) const {
275
+ return static_cast<T *>(LookUp(key));
276
+ }
277
+
278
+ private:
279
+ MPSGraphCache() {
280
+ serialQueue_ = dispatch_queue_create("cache queue", DISPATCH_QUEUE_SERIAL);
281
+ }
282
+ // this is defined in OperationUtils.mm to not include
283
+ // MPSProfiler.h in header OperationUtils.h
284
+ void profileCachedGraph(const CacheEntry& cacheEntry) const;
285
+
286
+ static MPSGraphCache* _instance_cache;
287
+ std::unordered_map<MPSCacheKey, CacheEntry> cache_;
288
+ dispatch_queue_t serialQueue_ = nullptr;
289
+
290
+ };
291
+
292
+ // Common template for creating graph with a specified cache if missing
293
+ template<typename T>
294
+ inline T* LookUpOrCreateCachedGraph(const std::string& key, std::function<void(MPSGraph*, T*)> instantiate) {
295
+ auto cache_ = MPSGraphCache::getInstance();
296
+ if (auto rc = cache_->LookUpAs<T>(key)) {
297
+ return rc;
298
+ }
299
+ return cache_->CreateCachedGraphAs<T>(key, ^mps::MPSCachedGraph*() {
300
+ T* newCachedGraph = nil;
301
+ @autoreleasepool {
302
+ // Initialize graph
303
+ auto mpsGraph = mps::make_mps_graph();
304
+ newCachedGraph = new T(mpsGraph);
305
+ instantiate(mpsGraph, newCachedGraph);
306
+ }
307
+ return newCachedGraph;
308
+ });
309
+ }
310
+
311
+ // Common math operations
312
+ MPSGraphTensor* log1p(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor);
313
+
314
+ #define MPS_CHECK_INT64_OP_SUPPORTED(input_tensor, mac_os_13_3_plus, op_name) \
315
+ if (!mac_os_13_3_plus && input_tensor.scalar_type() == kLong) { \
316
+ TORCH_WARN_ONCE("MPS: no support for int64 for ", op_name, \
317
+ ", downcasting to a smaller data type (int32/float32). Native support for int64 has been added in macOS 13.3."); \
318
+ }
319
+
320
+ /**
321
+ * Returns distance from lowest to highest element offset in given tensor.
322
+ */
323
+ size_t compute_storage_numel_distance(const at::Tensor& t);
324
+
325
+ /**
326
+ * Checks whether tensor is mapped to a contiguous area in the storage.
327
+ */
328
+ inline bool is_dense_in_storage(const at::Tensor& t) {
329
+ return compute_storage_numel_distance(t) == static_cast<size_t>(t.numel());
330
+ }
331
+
332
+ static inline void mtl_setBuffer(id<MTLComputeCommandEncoder> encoder, const Tensor& t, unsigned idx) {
333
+ [encoder setBuffer:getMTLBufferStorage(t)
334
+ offset:t.storage_offset() * t.element_size()
335
+ atIndex:idx];
336
+ }
337
+
338
+ static inline void mtl_dispatch1DJob(id<MTLComputeCommandEncoder> encoder,
339
+ id<MTLComputePipelineState> cplState,
340
+ uint32_t length) {
341
+ const uint32_t maxThreadsPerGroup = [cplState maxTotalThreadsPerThreadgroup];
342
+ auto size = MTLSizeMake(length, 1, 1);
343
+ auto threadGroupSize = MTLSizeMake(std::min(maxThreadsPerGroup, length), 1, 1);
344
+ [encoder dispatchThreads:size threadsPerThreadgroup:threadGroupSize];
345
+ }
346
+
347
+ id<MTLBuffer> generateKernelDataOffsets(id<MTLComputeCommandEncoder> commandEncoder, const TensorIteratorBase& iter, bool use_64bit_index = false);
348
+
349
+ inline NSDictionary* dictionaryFromPlaceholders(Placeholder& p1) {
350
+ return @{ p1.getMPSGraphTensor(): p1.getMPSGraphTensorData() };
351
+ }
352
+
353
+ inline NSDictionary* dictionaryFromPlaceholders(Placeholder& p1, Placeholder& p2) {
354
+ return @{
355
+ p1.getMPSGraphTensor(): p1.getMPSGraphTensorData(),
356
+ p2.getMPSGraphTensor(): p2.getMPSGraphTensorData(),
357
+ };
358
+ }
359
+
360
+ inline NSDictionary* dictionaryFromPlaceholders(Placeholder& p1, Placeholder& p2, Placeholder& p3) {
361
+ return @{
362
+ p1.getMPSGraphTensor(): p1.getMPSGraphTensorData(),
363
+ p2.getMPSGraphTensor(): p2.getMPSGraphTensorData(),
364
+ p3.getMPSGraphTensor(): p3.getMPSGraphTensorData(),
365
+ };
366
+ }
367
+
368
+ inline NSDictionary* dictionaryFromPlaceholders(Placeholder& p1, Placeholder& p2, Placeholder& p3, Placeholder& p4) {
369
+ return @{
370
+ p1.getMPSGraphTensor(): p1.getMPSGraphTensorData(),
371
+ p2.getMPSGraphTensor(): p2.getMPSGraphTensorData(),
372
+ p3.getMPSGraphTensor(): p3.getMPSGraphTensorData(),
373
+ p4.getMPSGraphTensor(): p4.getMPSGraphTensorData(),
374
+ };
375
+ }
376
+
377
+ inline void runMPSGraph(MPSStream* stream, MPSGraph* graph, NSDictionary* feeds, Placeholder& result) {
378
+ runMPSGraph(stream, graph, feeds, dictionaryFromPlaceholders(result));
379
+ }
380
+
381
+ inline bool supportsComplex() {
382
+ return is_macos_13_or_newer(MacOSVersion::MACOS_VER_14_0_PLUS);
383
+ }
384
+
385
+ // MPS yet to support double types, but starting from MacOS 14, supports bfloat16
386
+ inline bool supportedFloatingType(ScalarType dtype) {
387
+ return dtype == kFloat || dtype == kHalf || dtype == kBFloat16;
388
+ }
389
+
390
+ inline bool supportedFloatingType(const Tensor& t) {
391
+ return supportedFloatingType(t.scalar_type());
392
+ }
393
+
394
+ } // namespace at::native::mps
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/TensorFactory.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #define AT_DISPATCH_MPS_TYPES(TYPE, NAME, ...) \
4
+ AT_DISPATCH_SWITCH( \
5
+ TYPE, NAME, \
6
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
7
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
8
+ AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
9
+ AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
10
+ AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \
11
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
12
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__))
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/mps/UnaryConstants.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ const char* UNARY_KERNEL_TEMPLATE = R"METAL(
4
+ #include <metal_stdlib>
5
+ using namespace metal;
6
+
7
+ constant float a[4] = {{0.886226899, -1.645349621, 0.914624893, -0.140543331}};
8
+ constant float b[4] = {{-2.118377725, 1.442710462, -0.329097515, 0.012229801}};
9
+ constant float c[4] = {{-1.970840454, -1.624906493, 3.429567803, 1.641345311}};
10
+ constant float d[2] = {{3.543889200, 1.637067800}};
11
+
12
+ kernel void erfinv_mps_kernel( device {0} *output [[buffer(0)]],
13
+ device {1} *input [[buffer(1)]],
14
+ uint index [[thread_position_in_grid]]) {{
15
+
16
+ float y = input[index];
17
+ float x, z, num, dem; /*working variables */
18
+ /* coefficients in rational expansion */
19
+
20
+ float y_abs = abs(y);
21
+ if(y_abs > 1.0f){{
22
+ output[index] = NAN;
23
+ return;
24
+ }}
25
+ if(y_abs == 1.0f){{
26
+ output[index] = copysign(INFINITY, y);
27
+ return;
28
+ }}
29
+ if(y_abs <= 0.7f) {{
30
+ z = y * y;
31
+ num = (((a[3]*z + a[2])*z + a[1])*z + a[0]);
32
+ dem = ((((b[3]*z + b[2])*z + b[1])*z +b[0]) * z + 1.0f);
33
+ x = y * num / dem;
34
+ }}
35
+ else{{
36
+ z = sqrt(-1.0f*log((1.0-y_abs)/2.0));
37
+ num = ((c[3]*z + c[2])*z + c[1]) * z + c[0];
38
+ dem = (d[1]*z + d[0])*z + 1.0f;
39
+ x = copysign(num, y) / dem;
40
+ }}
41
+
42
+ output[index] = x;
43
+ }})METAL";
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorBinaryOps.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ATen_fwd.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at {
7
+ namespace native {
8
+
9
+ enum class NESTED_DENSE_OP: uint8_t {ADD, MUL};
10
+
11
+ using nested_dense_elementwise_fn = void (*)(Tensor& result, const Tensor & self, const Tensor & other, const NESTED_DENSE_OP& op);
12
+
13
+ DECLARE_DISPATCH(nested_dense_elementwise_fn, nested_dense_elementwise_stub);
14
+
15
+ } // namespace native
16
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorFactories.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ namespace native {
5
+
6
+ } // namespace native
7
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorMath.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ATen_fwd.h>
4
+ #include <ATen/NestedTensorImpl.h>
5
+ #include <c10/macros/Macros.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+
10
+ TORCH_API Tensor NestedTensor_to_padded_tensor_generic(
11
+ const Tensor& t,
12
+ double padding,
13
+ OptionalIntArrayRef output_size);
14
+
15
+ template <typename Func>
16
+ Tensor map_nt(const Tensor& nt, Func f) {
17
+ auto* nt_impl = get_nested_tensor_impl(nt);
18
+ const auto& sizes = nt_impl->get_nested_sizes();
19
+ return at::detail::make_tensor<NestedTensorImpl>(f(nt_impl->get_buffer()), sizes);
20
+ }
21
+ template <typename Func>
22
+ Tensor map_nt_binary(const Tensor& nt_1, const Tensor& nt_2, Func f){
23
+ auto* nt_impl_1 = get_nested_tensor_impl(nt_1);
24
+ auto* nt_impl_2 = get_nested_tensor_impl(nt_2);
25
+ const auto& sizes = nt_impl_1->get_nested_sizes();
26
+ return at::detail::make_tensor<NestedTensorImpl>(f(nt_impl_1->get_buffer(), nt_impl_2->get_buffer()), sizes);
27
+ }
28
+
29
+ C10_ALWAYS_INLINE std::pair<int64_t, int64_t> _check_nested_layer_norm_inputs(
30
+ const NestedTensorImpl& input,
31
+ IntArrayRef normalized_shape,
32
+ const Tensor& weight /* optional */,
33
+ const Tensor& bias /* optional */) {
34
+
35
+ const size_t normalized_ndim = normalized_shape.size();
36
+ TORCH_CHECK(
37
+ normalized_ndim >= 1,
38
+ "Expected normalized_shape to be at least 1-dimensional, i.e., ",
39
+ "containing at least one element, but got normalized_shape = ",
40
+ normalized_shape);
41
+ TORCH_CHECK(
42
+ !weight.defined() || weight.sizes().equals(normalized_shape),
43
+ "Expected weight to be of same shape as normalized_shape, but got ",
44
+ "weight of shape ",
45
+ weight.sizes(),
46
+ " and normalized_shape = ",
47
+ normalized_shape);
48
+ TORCH_CHECK(
49
+ !bias.defined() || bias.sizes().equals(normalized_shape),
50
+ "Expected bias to be of same shape as normalized_shape, but got ",
51
+ "bias of shape ",
52
+ bias.sizes(),
53
+ " and normalized_shape = ",
54
+ normalized_shape);
55
+
56
+ // Check that the normalized_shape has the exact same sizes as the last dimensions from the NestedTensor input
57
+ // Also, compute M and N considering the idiosyncracies of NestedTensors
58
+ int64_t N = 1;
59
+ for (const auto i: c10::irange(normalized_ndim)) {
60
+ TORCH_CHECK(
61
+ input.opt_size(-normalized_ndim + i) != c10::nullopt,
62
+ "normalized_shape extends into irregular dimensions for the nested tensor"
63
+ );
64
+ TORCH_CHECK(
65
+ normalized_shape[i] == *input.opt_size(-normalized_ndim + i),
66
+ "The shape at dimension ",
67
+ i,
68
+ "of normalized_shape doesn't match the input"
69
+ );
70
+ N *= normalized_shape[i];
71
+ }
72
+
73
+ const int64_t M = input.numel() / N;
74
+
75
+ return std::make_pair(M, N);
76
+ }
77
+
78
+ Tensor reshape_nested(const Tensor& self, IntArrayRef proposed_shape);
79
+
80
+ } // namespace native
81
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/native/nested/NestedTensorTransformerFunctions.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Transformer-specific NestedTensor utility functions.
3
+ *
4
+ * Not co-located with NestedTensor core code yet because they only
5
+ * support specific cases needed in transformers.
6
+ */
7
+ #pragma once
8
+
9
+ #include <vector>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Optional.h>
13
+
14
+ namespace c10 {
15
+ class Scalar;
16
+ } // namespace c10
17
+
18
+ namespace at {
19
+ class Tensor;
20
+ namespace native {
21
+ struct NestedTensorImpl;
22
+
23
+ // Requires that self is a contiguous NestedTensor, other is not a
24
+ // NestedTensor, self.dim() == 3, and other.dim() == 2. Also, self
25
+ // must have a consistent last dimension across its included Tensors
26
+ // and that dimension must match other.size(0).
27
+ Tensor NestedTensor_matmul(const Tensor& self, const Tensor& other);
28
+
29
+ // Requires that mat1 is a contiguous NestedTensor, self & mat2 are
30
+ // not NestedTensors, mat1.dim() == 3, mat2.dim() == 2, and that mat1
31
+ // has a consistent last dimension across its included Tensors that
32
+ // matches mat2.size(0).
33
+ Tensor NestedTensor_times_Tensor_plus_Tensor_addmm(
34
+ const Tensor& self,
35
+ const Tensor& mat1,
36
+ const Tensor& mat2,
37
+ const c10::Scalar& beta,
38
+ const c10::Scalar& alpha,
39
+ c10::optional<bool> use_gelu = c10::nullopt);
40
+
41
+ Tensor NestedTensor_add_NestedTensor_in_place(
42
+ const Tensor& self,
43
+ const Tensor& other);
44
+
45
+ TORCH_API Tensor NestedTensor_batch_offsets_from_size_tensor(
46
+ const Tensor& sizes,
47
+ int64_t extra_elements);
48
+
49
+ Tensor NestedTensor_from_padded_tensor_cpu(
50
+ const Tensor& padded,
51
+ const NestedTensorImpl& nt);
52
+
53
+ Tensor NestedTensor_to_mask(const Tensor& nt, c10::optional<int64_t> mask_dim, c10::optional<int64_t> mask_dim_length);
54
+
55
+ template <typename T>
56
+ void remove_padding_kernelLauncher(
57
+ const T* input,
58
+ T* output,
59
+ const int* offsets,
60
+ const int* input_sizes,
61
+ const int* output_sizes,
62
+ int output_dim,
63
+ const int batch_size);
64
+
65
+ template <typename T>
66
+ void remove_padding_transform0213_kernelLauncher(
67
+ const T* input,
68
+ T* output,
69
+ const int* offsets,
70
+ const int* input_sizes,
71
+ const int* output_sizes,
72
+ int output_dim,
73
+ const int batch_size);
74
+
75
+ template <typename T>
76
+ void add_padding_kernelLauncher(
77
+ T* input,
78
+ T* output,
79
+ T padding_value,
80
+ const int* offsets,
81
+ const int* input_sizes,
82
+ int input_dim,
83
+ const std::vector<int64_t>& output_sizes,
84
+ const int batch_size,
85
+ const int output_batch_size);
86
+
87
+ TORCH_API Tensor flash_attention_helper(
88
+ const Tensor& query,
89
+ const Tensor& key,
90
+ const Tensor& value,
91
+ double dropout_p,
92
+ bool need_attn_weights,
93
+ bool is_causal);
94
+
95
+ TORCH_API std::tuple<Tensor, Tensor> mem_efficient_helper_nested_unpacked(
96
+ const Tensor& query,
97
+ const Tensor& key,
98
+ const Tensor& value,
99
+ double dropout_p,
100
+ bool need_attn_weights,
101
+ bool is_causal);
102
+ } // namespace native
103
+ } // namespace at