applied-ai-018 commited on
Commit
f1d10ec
·
verified ·
1 Parent(s): 76d22f7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/hf_ckpt/global_step100/pytorch_model.bin +3 -0
  2. ckpts/universal/global_step120/zero/11.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step120/zero/3.attention.query_key_value.weight/fp32.pt +3 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/native/Activation.h +98 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/native/AdaptivePooling.h +39 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h +321 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h +119 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h +173 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/native/CPUFallback.h +45 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h +13 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessor.h +34 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/native/ConvUtils.h +446 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h +20 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h +14 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h +229 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/native/Distance.h +20 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h +394 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/native/Distributions.h +518 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h +139 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h +371 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/native/FractionalMaxPooling.h +80 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/native/FunctionOfAMatrixUtils.h +20 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h +298 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h +16 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h +41 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/native/IndexingUtils.h +160 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h +46 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebra.h +18 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h +623 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/native/LossMulti.h +72 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h +71 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h +26 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h +11 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h +62 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/native/PointwiseOps.h +28 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h +340 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h +69 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h +53 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/native/RangeFactories.h +12 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h +16 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h +56 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h +449 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h +40 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h +48 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h +173 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h +75 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h +128 -0
ckpts/hf_ckpt/global_step100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:236e12db8c1e238ae03573c81df6ecf83d675c0231eb51287fdd11b2e5d612dc
3
+ size 4857494742
ckpts/universal/global_step120/zero/11.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b98f72270d3a00c1ebf35d22d90615933480a9b60fdea903142860e00d0b2ea
3
+ size 9387
ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd90a5e77e520ac37a6b0c1d4fd3349152e0ac41fdc22d98b1cb018d9f84726c
3
+ size 33555612
ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dc4b3521c5f99fb831ccc7a7c3f500085e7f75db2dab395cf71734044420ee4
3
+ size 33555627
ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edbb5f72b6b5eb88e2a9ff3af3f5f7854e83e9d6814df26049c3c141816ebdc8
3
+ size 33555533
ckpts/universal/global_step120/zero/3.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c413b015e5e33481ea9708fc13da7d58964ca6c305b008fe92de9e72f63fb4
3
+ size 50332749
venv/lib/python3.10/site-packages/torch/include/ATen/native/Activation.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/string_view.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ struct TensorIterator;
13
+ struct TensorIteratorBase;
14
+ class TensorBase;
15
+ }
16
+
17
+ namespace at::native {
18
+
19
+ // These constants control the approximation behavior of gelu function.
20
+ enum class GeluType {
21
+ None, // Baseline Gelu
22
+ Tanh, // Tahn Gelu Approximation
23
+ END
24
+ };
25
+
26
+ static GeluType get_gelutype_enum(const c10::string_view approximate) {
27
+ if (approximate == "none") {
28
+ return GeluType::None;
29
+ } else if (approximate == "tanh") {
30
+ return GeluType::Tanh;
31
+ } else {
32
+ TORCH_CHECK(false, "approximate argument must be either none or tanh.");
33
+ }
34
+ }
35
+
36
+ static std::string gelutype_to_string(const GeluType type) {
37
+ switch(type) {
38
+ case GeluType::None: return "none";
39
+ case GeluType::Tanh: return "tanh";
40
+ default: TORCH_CHECK(false, "unknown GELU type: ", static_cast<int>(type));
41
+ }
42
+ }
43
+
44
+ using structured_activation_fn = void (*)(TensorIteratorBase&);
45
+ using structured_activation_backward_fn = void (*)(TensorIteratorBase&);
46
+
47
+ using activation_fn = void (*)(TensorIterator&);
48
+ using activation_backward_fn = void (*)(TensorIterator&);
49
+ using softplus_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
50
+ using softplus_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
51
+ using threshold_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
52
+ using hardtanh_backward_fn = void (*)(TensorIterator&, const c10::Scalar&, const c10::Scalar&);
53
+ using hardsigmoid_fn = void(*)(TensorIteratorBase&);
54
+ using hardsigmoid_backward_fn = void(*)(TensorIteratorBase&);
55
+ using hardswish_fn = void(*)(TensorIterator&);
56
+ using hardswish_backward_fn = void(*)(TensorIterator&);
57
+ using shrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
58
+ using softshrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
59
+ using shrink_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
60
+ using elu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&);
61
+ using elu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&, bool);
62
+ using leaky_relu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
63
+ using leaky_relu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
64
+ using log_sigmoid_cpu_fn = void (*)(TensorBase&, TensorBase&, const TensorBase&);
65
+ using gelu_fn = void (*)(TensorIteratorBase&, GeluType);
66
+ using gelu_backward_fn = void (*)(TensorIteratorBase&, GeluType);
67
+ using glu_jvp_fn = void (*)(TensorIteratorBase&);
68
+
69
+ DECLARE_DISPATCH(elu_fn, elu_stub);
70
+ DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub);
71
+ DECLARE_DISPATCH(softplus_fn, softplus_stub);
72
+ DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub);
73
+ DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub);
74
+ DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub);
75
+ DECLARE_DISPATCH(threshold_fn, threshold_stub);
76
+ DECLARE_DISPATCH(gelu_fn, GeluKernel);
77
+ DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel);
78
+ DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub);
79
+ DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub);
80
+ DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub);
81
+ DECLARE_DISPATCH(hardswish_fn, hardswish_stub);
82
+ DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub);
83
+ DECLARE_DISPATCH(shrink_fn, hardshrink_stub);
84
+ DECLARE_DISPATCH(softshrink_fn, softshrink_stub);
85
+ DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub);
86
+ DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub);
87
+ DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub);
88
+ DECLARE_DISPATCH(structured_activation_fn, glu_stub);
89
+ DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub);
90
+ DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub);
91
+ DECLARE_DISPATCH(structured_activation_fn, silu_stub);
92
+ DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub);
93
+ DECLARE_DISPATCH(structured_activation_fn, mish_stub);
94
+ DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub);
95
+ DECLARE_DISPATCH(activation_fn, prelu_stub);
96
+ DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub);
97
+
98
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/AdaptivePooling.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <c10/util/ArrayRef.h>
6
+ #include <c10/util/irange.h>
7
+ #include <cmath>
8
+
9
+ namespace at::native {
10
+
11
+ using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
12
+ using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
13
+ DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel);
14
+ DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel);
15
+
16
+ using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
17
+ using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
18
+ DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel);
19
+ DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel);
20
+
21
+ static inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
22
+ return (a / b) * c + ((a % b) * c) / b;
23
+ }
24
+
25
+ static inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
26
+ return 1 + ((a + 1) * c - 1) / b;
27
+ }
28
+
29
+ static inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
30
+ int64_t ndim = gradOutput_.ndimension();
31
+ for (const auto i : c10::irange(1, ndim)) {
32
+ TORCH_CHECK(gradOutput_.size(i) > 0,
33
+ arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
34
+ "but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
35
+ " being empty");
36
+ }
37
+ }
38
+
39
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <c10/util/string_view.h>
5
+ #include <ATen/Config.h>
6
+ #include <ATen/native/DispatchStub.h>
7
+
8
+ // Forward declare TI
9
+ namespace at {
10
+ class Tensor;
11
+ struct TensorIterator;
12
+
13
+ namespace native {
14
+ enum class TransposeType;
15
+ }
16
+
17
+ }
18
+
19
+ namespace at::native {
20
+
21
+ enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss};
22
+
23
+ #if AT_BUILD_WITH_LAPACK()
24
+ // Define per-batch functions to be used in the implementation of batched
25
+ // linear algebra operations
26
+
27
+ template <class scalar_t>
28
+ void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info);
29
+
30
+ template <class scalar_t>
31
+ void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info);
32
+
33
+ template <class scalar_t, class value_t=scalar_t>
34
+ void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info);
35
+
36
+ template <class scalar_t>
37
+ void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
38
+
39
+ template <class scalar_t>
40
+ void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
41
+
42
+ template <class scalar_t>
43
+ void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info);
44
+
45
+ template <class scalar_t, class value_t = scalar_t>
46
+ void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info);
47
+
48
+ template <class scalar_t>
49
+ void lapackGels(char trans, int m, int n, int nrhs,
50
+ scalar_t *a, int lda, scalar_t *b, int ldb,
51
+ scalar_t *work, int lwork, int *info);
52
+
53
+ template <class scalar_t, class value_t = scalar_t>
54
+ void lapackGelsd(int m, int n, int nrhs,
55
+ scalar_t *a, int lda, scalar_t *b, int ldb,
56
+ value_t *s, value_t rcond, int *rank,
57
+ scalar_t* work, int lwork,
58
+ value_t *rwork, int* iwork, int *info);
59
+
60
+ template <class scalar_t, class value_t = scalar_t>
61
+ void lapackGelsy(int m, int n, int nrhs,
62
+ scalar_t *a, int lda, scalar_t *b, int ldb,
63
+ int *jpvt, value_t rcond, int *rank,
64
+ scalar_t *work, int lwork, value_t* rwork, int *info);
65
+
66
+ template <class scalar_t, class value_t = scalar_t>
67
+ void lapackGelss(int m, int n, int nrhs,
68
+ scalar_t *a, int lda, scalar_t *b, int ldb,
69
+ value_t *s, value_t rcond, int *rank,
70
+ scalar_t *work, int lwork,
71
+ value_t *rwork, int *info);
72
+
73
+ template <LapackLstsqDriverType, class scalar_t, class value_t = scalar_t>
74
+ struct lapackLstsq_impl;
75
+
76
+ template <class scalar_t, class value_t>
77
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gels, scalar_t, value_t> {
78
+ static void call(
79
+ char trans, int m, int n, int nrhs,
80
+ scalar_t *a, int lda, scalar_t *b, int ldb,
81
+ scalar_t *work, int lwork, int *info, // Gels flavor
82
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
83
+ value_t *s, // Gelss flavor
84
+ int *iwork // Gelsd flavor
85
+ ) {
86
+ lapackGels<scalar_t>(
87
+ trans, m, n, nrhs,
88
+ a, lda, b, ldb,
89
+ work, lwork, info);
90
+ }
91
+ };
92
+
93
+ template <class scalar_t, class value_t>
94
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelsy, scalar_t, value_t> {
95
+ static void call(
96
+ char trans, int m, int n, int nrhs,
97
+ scalar_t *a, int lda, scalar_t *b, int ldb,
98
+ scalar_t *work, int lwork, int *info, // Gels flavor
99
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
100
+ value_t *s, // Gelss flavor
101
+ int *iwork // Gelsd flavor
102
+ ) {
103
+ lapackGelsy<scalar_t, value_t>(
104
+ m, n, nrhs,
105
+ a, lda, b, ldb,
106
+ jpvt, rcond, rank,
107
+ work, lwork, rwork, info);
108
+ }
109
+ };
110
+
111
+ template <class scalar_t, class value_t>
112
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelsd, scalar_t, value_t> {
113
+ static void call(
114
+ char trans, int m, int n, int nrhs,
115
+ scalar_t *a, int lda, scalar_t *b, int ldb,
116
+ scalar_t *work, int lwork, int *info, // Gels flavor
117
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
118
+ value_t *s, // Gelss flavor
119
+ int *iwork // Gelsd flavor
120
+ ) {
121
+ lapackGelsd<scalar_t, value_t>(
122
+ m, n, nrhs,
123
+ a, lda, b, ldb,
124
+ s, rcond, rank,
125
+ work, lwork,
126
+ rwork, iwork, info);
127
+ }
128
+ };
129
+
130
+ template <class scalar_t, class value_t>
131
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelss, scalar_t, value_t> {
132
+ static void call(
133
+ char trans, int m, int n, int nrhs,
134
+ scalar_t *a, int lda, scalar_t *b, int ldb,
135
+ scalar_t *work, int lwork, int *info, // Gels flavor
136
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
137
+ value_t *s, // Gelss flavor
138
+ int *iwork // Gelsd flavor
139
+ ) {
140
+ lapackGelss<scalar_t, value_t>(
141
+ m, n, nrhs,
142
+ a, lda, b, ldb,
143
+ s, rcond, rank,
144
+ work, lwork,
145
+ rwork, info);
146
+ }
147
+ };
148
+
149
+ template <LapackLstsqDriverType driver_type, class scalar_t, class value_t = scalar_t>
150
+ void lapackLstsq(
151
+ char trans, int m, int n, int nrhs,
152
+ scalar_t *a, int lda, scalar_t *b, int ldb,
153
+ scalar_t *work, int lwork, int *info, // Gels flavor
154
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
155
+ value_t *s, // Gelss flavor
156
+ int *iwork // Gelsd flavor
157
+ ) {
158
+ lapackLstsq_impl<driver_type, scalar_t, value_t>::call(
159
+ trans, m, n, nrhs,
160
+ a, lda, b, ldb,
161
+ work, lwork, info,
162
+ jpvt, rcond, rank, rwork,
163
+ s,
164
+ iwork);
165
+ }
166
+
167
+ template <class scalar_t>
168
+ void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info);
169
+
170
+ template <class scalar_t>
171
+ void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info);
172
+
173
+ template <class scalar_t>
174
+ void lapackLdlHermitian(
175
+ char uplo,
176
+ int n,
177
+ scalar_t* a,
178
+ int lda,
179
+ int* ipiv,
180
+ scalar_t* work,
181
+ int lwork,
182
+ int* info);
183
+
184
+ template <class scalar_t>
185
+ void lapackLdlSymmetric(
186
+ char uplo,
187
+ int n,
188
+ scalar_t* a,
189
+ int lda,
190
+ int* ipiv,
191
+ scalar_t* work,
192
+ int lwork,
193
+ int* info);
194
+
195
+ template <class scalar_t>
196
+ void lapackLdlSolveHermitian(
197
+ char uplo,
198
+ int n,
199
+ int nrhs,
200
+ scalar_t* a,
201
+ int lda,
202
+ int* ipiv,
203
+ scalar_t* b,
204
+ int ldb,
205
+ int* info);
206
+
207
+ template <class scalar_t>
208
+ void lapackLdlSolveSymmetric(
209
+ char uplo,
210
+ int n,
211
+ int nrhs,
212
+ scalar_t* a,
213
+ int lda,
214
+ int* ipiv,
215
+ scalar_t* b,
216
+ int ldb,
217
+ int* info);
218
+
219
+ template<class scalar_t, class value_t=scalar_t>
220
+ void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info);
221
+ #endif
222
+
223
+ #if AT_BUILD_WITH_BLAS()
224
+ template <class scalar_t>
225
+ void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb);
226
+ #endif
227
+
228
+ using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/);
229
+ DECLARE_DISPATCH(cholesky_fn, cholesky_stub);
230
+
231
+ using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/);
232
+
233
+ DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub);
234
+
235
+ using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/);
236
+
237
+ DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub);
238
+
239
+ using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/);
240
+ DECLARE_DISPATCH(geqrf_fn, geqrf_stub);
241
+
242
+ using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/);
243
+ DECLARE_DISPATCH(orgqr_fn, orgqr_stub);
244
+
245
+ using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/);
246
+ DECLARE_DISPATCH(ormqr_fn, ormqr_stub);
247
+
248
+ using linalg_eigh_fn = void (*)(
249
+ const Tensor& /*eigenvalues*/,
250
+ const Tensor& /*eigenvectors*/,
251
+ const Tensor& /*infos*/,
252
+ bool /*upper*/,
253
+ bool /*compute_eigenvectors*/);
254
+ DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub);
255
+
256
+ using lstsq_fn = void (*)(
257
+ const Tensor& /*a*/,
258
+ Tensor& /*b*/,
259
+ Tensor& /*rank*/,
260
+ Tensor& /*singular_values*/,
261
+ Tensor& /*infos*/,
262
+ double /*rcond*/,
263
+ std::string /*driver_name*/);
264
+ DECLARE_DISPATCH(lstsq_fn, lstsq_stub);
265
+
266
+ using triangular_solve_fn = void (*)(
267
+ const Tensor& /*A*/,
268
+ const Tensor& /*B*/,
269
+ bool /*left*/,
270
+ bool /*upper*/,
271
+ TransposeType /*transpose*/,
272
+ bool /*unitriangular*/);
273
+ DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub);
274
+
275
+ using lu_factor_fn = void (*)(
276
+ const Tensor& /*input*/,
277
+ const Tensor& /*pivots*/,
278
+ const Tensor& /*infos*/,
279
+ bool /*compute_pivots*/);
280
+ DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub);
281
+
282
+ using unpack_pivots_fn = void(*)(
283
+ TensorIterator& iter,
284
+ const int64_t dim_size,
285
+ const int64_t max_pivot);
286
+ DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub);
287
+
288
+ using lu_solve_fn = void (*)(
289
+ const Tensor& /*LU*/,
290
+ const Tensor& /*pivots*/,
291
+ const Tensor& /*B*/,
292
+ TransposeType /*trans*/);
293
+ DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub);
294
+
295
+ using ldl_factor_fn = void (*)(
296
+ const Tensor& /*LD*/,
297
+ const Tensor& /*pivots*/,
298
+ const Tensor& /*info*/,
299
+ bool /*upper*/,
300
+ bool /*hermitian*/);
301
+ DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub);
302
+
303
+ using svd_fn = void (*)(
304
+ const Tensor& /*A*/,
305
+ const bool /*full_matrices*/,
306
+ const bool /*compute_uv*/,
307
+ const c10::optional<c10::string_view>& /*driver*/,
308
+ const Tensor& /*U*/,
309
+ const Tensor& /*S*/,
310
+ const Tensor& /*Vh*/,
311
+ const Tensor& /*info*/);
312
+ DECLARE_DISPATCH(svd_fn, svd_stub);
313
+
314
+ using ldl_solve_fn = void (*)(
315
+ const Tensor& /*LD*/,
316
+ const Tensor& /*pivots*/,
317
+ const Tensor& /*result*/,
318
+ bool /*upper*/,
319
+ bool /*hermitian*/);
320
+ DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub);
321
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/util/TypeSafeSignMath.h>
7
+
8
+
9
+ namespace at {
10
+ struct TensorIterator;
11
+ struct TensorIteratorBase;
12
+ }
13
+
14
+ namespace at::native {
15
+
16
+ inline void alpha_check(const ScalarType dtype, const Scalar& alpha) {
17
+ TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool,
18
+ "Boolean alpha only supported for Boolean results.");
19
+ TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype)
20
+ || alpha.isIntegral(true),
21
+ "For integral input tensors, argument alpha must not be a floating point number.");
22
+ TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(),
23
+ "For non-complex input tensors, argument alpha must not be a complex number.")
24
+ }
25
+
26
+ // Basic checking for all sub functions.
27
+ inline void sub_check(const TensorBase& self, const TensorBase& other) {
28
+ TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool,
29
+ "Subtraction, the `-` operator, with two bool tensors is not supported. "
30
+ "Use the `^` or `logical_xor()` operator instead.")
31
+ TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool,
32
+ "Subtraction, the `-` operator, with a bool tensor is not supported. "
33
+ "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
34
+ }
35
+
36
+ inline void sub_check(const TensorBase& self, const Scalar& scalar) {
37
+ TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(),
38
+ "Subtraction, the `-` operator, with two bool tensors is not supported. "
39
+ "Use the `^` or `logical_xor()` operator instead.")
40
+ TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(),
41
+ "Subtraction, the `-` operator, with a bool tensor is not supported. "
42
+ "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
43
+ }
44
+
45
+ using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
46
+ using structured_binary_fn_double = void(*)(TensorIteratorBase&, double);
47
+ using structured_binary_fn = void(*)(TensorIteratorBase&);
48
+
49
+ using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
50
+ using binary_fn_double = void(*)(TensorIterator&, double);
51
+ using binary_fn = void(*)(TensorIterator&);
52
+ using binary_clamp_fn_alpha =
53
+ void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val);
54
+
55
+ // NB: codegenned
56
+ DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
57
+
58
+ DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub);
59
+ DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub);
60
+ DECLARE_DISPATCH(structured_binary_fn, mul_stub);
61
+ DECLARE_DISPATCH(structured_binary_fn, div_true_stub);
62
+ DECLARE_DISPATCH(structured_binary_fn, div_floor_stub);
63
+ DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub);
64
+ DECLARE_DISPATCH(structured_binary_fn, atan2_stub);
65
+ DECLARE_DISPATCH(structured_binary_fn, remainder_stub);
66
+ DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub);
67
+ DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub);
68
+ DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub);
69
+ DECLARE_DISPATCH(structured_binary_fn, lshift_stub);
70
+ DECLARE_DISPATCH(structured_binary_fn, rshift_stub);
71
+ DECLARE_DISPATCH(binary_fn, logical_xor_stub);
72
+ DECLARE_DISPATCH(binary_fn, logical_and_stub);
73
+ DECLARE_DISPATCH(binary_fn, logical_or_stub);
74
+ DECLARE_DISPATCH(structured_binary_fn, lt_stub);
75
+ DECLARE_DISPATCH(structured_binary_fn, le_stub);
76
+ DECLARE_DISPATCH(structured_binary_fn, gt_stub);
77
+ DECLARE_DISPATCH(structured_binary_fn, ge_stub);
78
+ DECLARE_DISPATCH(structured_binary_fn, eq_stub);
79
+ DECLARE_DISPATCH(structured_binary_fn, ne_stub);
80
+ DECLARE_DISPATCH(binary_fn, max_elementwise_stub);
81
+ DECLARE_DISPATCH(binary_fn, min_elementwise_stub);
82
+ DECLARE_DISPATCH(structured_binary_fn, maximum_stub);
83
+ DECLARE_DISPATCH(structured_binary_fn, minimum_stub);
84
+ DECLARE_DISPATCH(structured_binary_fn, fmax_stub);
85
+ DECLARE_DISPATCH(structured_binary_fn, fmin_stub);
86
+ DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub);
87
+ DECLARE_DISPATCH(binary_fn_double, huber_stub);
88
+ DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub);
89
+ DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub);
90
+ DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub);
91
+ DECLARE_DISPATCH(structured_binary_fn, mse_stub);
92
+ DECLARE_DISPATCH(structured_binary_fn, fmod_stub);
93
+ DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub);
94
+ DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub);
95
+ DECLARE_DISPATCH(structured_binary_fn, gcd_stub);
96
+ DECLARE_DISPATCH(structured_binary_fn, lcm_stub);
97
+ DECLARE_DISPATCH(structured_binary_fn, hypot_stub);
98
+ DECLARE_DISPATCH(structured_binary_fn, igamma_stub);
99
+ DECLARE_DISPATCH(structured_binary_fn, igammac_stub);
100
+ DECLARE_DISPATCH(structured_binary_fn, nextafter_stub);
101
+ DECLARE_DISPATCH(structured_binary_fn, heaviside_stub);
102
+ DECLARE_DISPATCH(structured_binary_fn, copysign_stub);
103
+ DECLARE_DISPATCH(structured_binary_fn, xlogy_stub);
104
+ DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub);
105
+ DECLARE_DISPATCH(structured_binary_fn, zeta_stub);
106
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub);
107
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub);
108
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub);
109
+ DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub);
110
+ DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub);
111
+ DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub);
112
+ DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub);
113
+ DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub);
114
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub);
115
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub);
116
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub);
117
+ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub);
118
+
119
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/TypeProperties.h>
5
+ #include <ATen/ScalarOps.h>
6
+
7
+ #ifndef AT_PER_OPERATOR_HEADERS
8
+ #include <ATen/NativeFunctions.h>
9
+ #else
10
+ #include <ATen/ops/result_type.h>
11
+ #endif
12
+
13
+ namespace at::native {
14
+
15
+ // original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to
16
+ // the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not
17
+ // match, will change them to be a common super type so comparisons are done between the same types.
18
+ // For any trimmed_* tensor, if its outgoing value matches what it was incoming (typically null), then the
19
+ // corresponding raw_* version should be used since it was already contiguous of the right type.
20
+ inline void searchsorted_maybe_trim_input_tensors(
21
+ Tensor& trimmed_input,
22
+ Tensor& trimmed_boundaries,
23
+ Tensor& trimmed_sorter,
24
+ const Tensor& raw_input,
25
+ const Tensor& raw_boundaries,
26
+ const Tensor& raw_sorter) {
27
+ bool in_is_contiguous = raw_input.is_contiguous();
28
+ bool bd_is_contiguous = raw_boundaries.is_contiguous();
29
+ bool sort_is_contiguous = raw_sorter.is_contiguous();
30
+
31
+ if (!in_is_contiguous) {
32
+ TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the performance due "
33
+ "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous input value "
34
+ "tensor if possible. This message will only appear once per program.");
35
+ trimmed_input = raw_input.contiguous();
36
+ }
37
+ if (!bd_is_contiguous) {
38
+ TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the performance due "
39
+ "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous boundary "
40
+ "tensor if possible. This message will only appear once per program.");
41
+ trimmed_boundaries = raw_boundaries.contiguous();
42
+ }
43
+ if (!sort_is_contiguous) {
44
+ TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the performance due "
45
+ "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sorter "
46
+ "tensor if possible. This message will only appear once per program.");
47
+ trimmed_sorter = raw_sorter.contiguous();
48
+ }
49
+ if (raw_input.dtype() != raw_boundaries.dtype()) {
50
+ at::native::ResultTypeState state = {};
51
+ state = at::native::update_result_type_state(raw_boundaries, state);
52
+ state = at::native::update_result_type_state(raw_input, state);
53
+ ScalarType common_stype = at::native::result_type(state);
54
+
55
+ TORCH_INTERNAL_ASSERT(common_stype != ScalarType::Undefined);
56
+ if (common_stype != raw_input.scalar_type()) {
57
+ trimmed_input = in_is_contiguous ? raw_input.to(common_stype) : trimmed_input.to(common_stype);
58
+ }
59
+ if (common_stype != raw_boundaries.scalar_type()) {
60
+ trimmed_boundaries = bd_is_contiguous ? raw_boundaries.to(common_stype) : trimmed_boundaries.to(common_stype);
61
+ }
62
+ }
63
+ }
64
+
65
+ /* unused but needed for internal jagged tensor class */
66
+ inline void searchsorted_maybe_trim_input_tensors(
67
+ Tensor& trimmed_input,
68
+ Tensor& trimmed_boundaries,
69
+ const Tensor& raw_input,
70
+ const Tensor& raw_boundaries) {
71
+ Tensor trimmed_sorter;
72
+ Tensor raw_sorter;
73
+ return searchsorted_maybe_trim_input_tensors(
74
+ trimmed_input,
75
+ trimmed_boundaries,
76
+ trimmed_sorter,
77
+ raw_input,
78
+ raw_boundaries,
79
+ raw_sorter);
80
+ }
81
+
82
+ inline bool searchsorted_dims_matched_before_last_dim(const Tensor& boundaries, const Tensor& input) {
83
+ if (boundaries.dim() != input.dim()) {
84
+ return false;
85
+ }
86
+ const auto& dims_bd = boundaries.sizes();
87
+ const auto& dims_in = input.sizes();
88
+ for (int64_t dim = 0; dim + 1 < boundaries.dim(); ++dim) {
89
+ if (dims_bd[dim] != dims_in[dim]) {
90
+ return false;
91
+ }
92
+ }
93
+ return true;
94
+ }
95
+
96
+ inline Tensor searchsorted_scalar_tensor(const Scalar& scalar, const c10::Device& device) {
97
+ auto tensor = c10::scalar_to_tensor(scalar, device);
98
+ // This is to adopt the scalar promotion rules defined in native/TypeProperties.h
99
+ // So we have the same type promotion rules as binary operations.
100
+ tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
101
+ return tensor;
102
+ }
103
+
104
+ inline void searchsorted_pre_check(
105
+ const Tensor& boundaries,
106
+ const Tensor& input,
107
+ const Tensor& output,
108
+ const bool out_int32,
109
+ const bool right,
110
+ const c10::optional<c10::string_view> side_opt,
111
+ const Tensor& sorter) {
112
+ if (side_opt) {
113
+ const c10::string_view side = *side_opt;
114
+ TORCH_CHECK(side == "left" || side == "right", "torch.searchsorted(): side can only be 'left' or 'right' but ",
115
+ "got ", side);
116
+
117
+ // assume the user has not explicitly set (right=False, side="right")
118
+ TORCH_CHECK(!right || side == "right", "torch.searchsorted(): side and right can't be set to opposites, got side "
119
+ "of ", side, " while right was True");
120
+ }
121
+
122
+ TORCH_CHECK(boundaries.device() == input.device(), "torch.searchsorted(): boundaries and input value tensors ",
123
+ "should have same device type, but got boundaries tensor device type ", boundaries.device(), " and input value ",
124
+ "tensor device type ", input.device());
125
+
126
+ if (sorter.defined()) {
127
+ TORCH_CHECK(sorter.device() == boundaries.device(), "torch.searchsorted(): sorter and boundary tensors should ",
128
+ "have same device type, but got sorter tensor device type ", sorter.device(), " and input value tensor ",
129
+ "device type ", boundaries.device());
130
+
131
+ TORCH_CHECK(sorter.sizes() == boundaries.sizes(), "torch.searchsorted(): boundary and sorter must have the same "
132
+ "size, but got boundary tensor ", boundaries.sizes(), "and got sorter tensor ", sorter.sizes());
133
+
134
+ TORCH_CHECK(sorter.scalar_type() == ScalarType::Long, "torch.searchsorted(): sorter must be a tensor of long ",
135
+ "dtype but got dtype ", sorter.scalar_type());
136
+
137
+ if (sorter.numel() > 0) {
138
+ auto minmax = sorter.aminmax();
139
+ int64_t vmin = std::get<0>(minmax).item().toLong();
140
+ int64_t vmax = std::get<1>(minmax).item().toLong();
141
+ TORCH_CHECK(vmin >= 0 && vmax < sorter.sizes().back(), "torch.searchsorted(): sorter index out of range");
142
+ }
143
+ }
144
+
145
+ TORCH_CHECK(input.dim() > 0 || (input.dim() == 0 && input.numel() == 1 && boundaries.dim() == 1),
146
+ "torch.searchsorted(): input value can be a scalar only when boundaries tensor dimension is 1, but we got ",
147
+ "boundaries tensor dim(", boundaries.dim(), ") and input value's dim(", input.dim(), ") numel(",
148
+ input.numel(), ")");
149
+
150
+ TORCH_CHECK(boundaries.dim() != 0, "torch.searchsorted(): boundaries tensor should have positive dimension, but ",
151
+ "got 0 dimension");
152
+
153
+ TORCH_CHECK(boundaries.dim() == 1 || searchsorted_dims_matched_before_last_dim(boundaries, input),
154
+ "torch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor ",
155
+ "and input value tensor must match, but we got boundaries tensor ", boundaries.sizes(), " and input value tensor ",
156
+ input.sizes());
157
+
158
+ ScalarType output_dtype = output.scalar_type();
159
+ TORCH_CHECK(
160
+ (output_dtype == ScalarType::Long && !out_int32) ||
161
+ (output_dtype == ScalarType::Int && out_int32),
162
+ "torch.searchsorted(): output tensor's dtype is wrong, it can only be Int(int32) or Long(int64) depending on ",
163
+ "whether out_int32 flag is True, but we got output tensor's dtype ", output_dtype,
164
+ " and out_int32 flag is ", (out_int32 ? "True" : "False"));
165
+
166
+ if (out_int32) {
167
+ TORCH_CHECK(boundaries.sizes().back() < INT_MAX,
168
+ "torch.searchsorted(): the size of boundaries' last dimension should be less than ", INT_MAX, ", but we got ",
169
+ boundaries.sizes().back());
170
+ }
171
+ }
172
+
173
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/CPUFallback.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/core/stack.h>
5
+ #include <ATen/core/boxing/KernelFunction.h>
6
+ #include <ATen/core/dispatch/Dispatcher.h>
7
+ #include <c10/util/Metaprogramming.h>
8
+ #include <torch/library.h>
9
+
10
+ namespace at::native {
11
+
12
+ // This function implements a boxed fallback to CPU.
13
+ // External backends can add their own custom logging on top if it to customize their own CPU fallbacks.
14
+ TORCH_API void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool error_on_views = false);
15
+
16
+ // This is a helper function that backends can use to directly call their boxed CPU fallback
17
+ // TODO: update and add a usage example after https://github.com/pytorch/pytorch/pull/58092 lands.
18
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
19
+ struct _call_fallback_fn final {};
20
+
21
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
22
+ struct _call_fallback_fn<fallback_fn, Op, symint, ReturnType(ParameterTypes...)> final {
23
+ static ReturnType call(typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) {
24
+ auto op = c10::Dispatcher::singleton()
25
+ // TODO: figure out how to make compiler happy without dynamic casts
26
+ .findSchemaOrThrow((const char*) Op::name, (const char*) Op::overload_name)
27
+ //.findSchemaOrThrow("a", "b")
28
+ .typed<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>();
29
+ return c10::impl::BoxedKernelWrapper<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>::call(
30
+ c10::BoxedKernel::makeFromFunction<fallback_fn>(),
31
+ op,
32
+ c10::DispatchKeySet(), // we know that the cpu_fallback doesn't use the dispatch keyset.
33
+ // TODO: get std::forward<> to work
34
+ args...
35
+ );
36
+ }
37
+ };
38
+
39
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
40
+ using call_fallback_fn_symint = _call_fallback_fn<fallback_fn, Op, true, typename Op::schema>;
41
+
42
+ template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
43
+ using call_fallback_fn = _call_fallback_fn<fallback_fn, Op, false, typename Op::schema>;
44
+
45
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <limits>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
12
+
13
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/native/CompositeRandomAccessor.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/CompositeRandomAccessorCommon.h>
4
+
5
+ namespace at::native {
6
+
7
+ struct TupleInfoCPU {
8
+ template <typename ...Types>
9
+ using tuple = std::tuple<Types...>;
10
+
11
+ template <typename ...Types>
12
+ static constexpr auto tie(Types&... args) noexcept {
13
+ return std::tie(args...);
14
+ }
15
+ };
16
+
17
+ template <typename KeyAccessor, typename ValueAccessor>
18
+ using CompositeRandomAccessorCPU =
19
+ CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
20
+
21
+ template <typename Values, typename References>
22
+ void swap(
23
+ references_holder<Values, References> rh1,
24
+ references_holder<Values, References> rh2
25
+ ) {
26
+ return std::swap(rh1.data(), rh2.data());
27
+ }
28
+
29
+ template <int N, typename Values, typename References>
30
+ auto get(references_holder<Values, References> rh) -> decltype(std::get<N>(rh.data())) {
31
+ return std::get<N>(rh.data());
32
+ }
33
+
34
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ConvUtils.h ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <ATen/detail/CUDAHooksInterface.h>
5
+ #include <ATen/native/DispatchStub.h>
6
+ #include <c10/util/env.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::native {
10
+
11
+ using conv_depthwise2d_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
12
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
13
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 2>);
14
+ DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub);
15
+ using conv_depthwise3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
16
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
17
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
18
+ DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub);
19
+ using cudnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
20
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
21
+ at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
22
+ DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub);
23
+ using mps_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
24
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
25
+ at::IntArrayRef, int64_t, std::array<bool,3>);
26
+ DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub);
27
+ using cudnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
28
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
29
+ at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
30
+ DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub);
31
+ using miopen_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
32
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
33
+ at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
34
+ DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub);
35
+ using miopen_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
36
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
37
+ at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
38
+ DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub);
39
+ using miopen_depthwise_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
40
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
41
+ at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
42
+ DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub);
43
+ using mkldnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
44
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
45
+ at::IntArrayRef, int64_t, std::array<bool,3>);
46
+ DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub);
47
+ using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const c10::optional<Tensor>&,
48
+ IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t);
49
+ DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub);
50
+ using mkldnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
51
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
52
+ at::IntArrayRef, at::IntArrayRef, int64_t, std::array<bool,3>);
53
+ DECLARE_DISPATCH(mkldnn_convolution_transpose_backward_fn, mkldnn_convolution_transpose_backward_stub);
54
+ using slow_conv_dilated2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
55
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
56
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
57
+ DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub);
58
+ using slow_conv_dilated3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
59
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
60
+ at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
61
+ DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub);
62
+ using slow_conv_transpose2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
63
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
64
+ at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
65
+ DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub);
66
+ using slow_conv_transpose3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
67
+ const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
68
+ at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
69
+ DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub);
70
+
71
+ namespace {
72
+ static bool cudnnv8_heuristic_mode_b = c10::utils::check_env("TORCH_CUDNN_USE_HEURISTIC_MODE_B") == true;
73
+ }
74
+
75
+ static inline bool cudnnv8_enabled_check_debug() {
76
+ static bool cudnnv8_flag = c10::utils::check_env("TORCH_CUDNN_V8_API_DISABLED") != true;
77
+ static bool cudnnv8_debug = c10::utils::check_env("TORCH_CUDNN_V8_API_DEBUG") == true;
78
+ static uint8_t cudnnv8_debugcount = 0;
79
+ if (cudnnv8_debug == 1 && cudnnv8_debugcount < 10) {
80
+ TORCH_WARN("TORCH_CUDNN_V8_DEBUG ON, V8 ON: ", cudnnv8_flag, " TORCH_CUDNN_USE_HEURISTIC_MODE B: ", cudnnv8_heuristic_mode_b);
81
+ cudnnv8_debugcount++;
82
+ }
83
+ return cudnnv8_flag == 1;
84
+ }
85
+
86
+ static inline bool cudnnv8_use_heur_mode_b() {
87
+ return cudnnv8_heuristic_mode_b;
88
+ }
89
+
90
+ // Keep in sync with py::enum_ in Module.cpp
91
+ enum class ConvBackend {
92
+ CudaDepthwise2d,
93
+ CudaDepthwise3d,
94
+ Cudnn,
95
+ CudnnTranspose,
96
+ Empty,
97
+ Miopen,
98
+ MiopenDepthwise,
99
+ MiopenTranspose,
100
+ Mkldnn,
101
+ MkldnnTranspose,
102
+ MkldnnEmpty,
103
+ NnpackSpatial,
104
+ Overrideable,
105
+ Slow2d,
106
+ Slow3d,
107
+ SlowDilated2d,
108
+ SlowDilated3d,
109
+ SlowTranspose2d,
110
+ SlowTranspose3d,
111
+ Winograd3x3Depthwise,
112
+ Xnnpack2d,
113
+ Mps,
114
+ MpsTranspose,
115
+ };
116
+
117
+ // Overload for selecting the convolution backend from the full set of convolution inputs.
118
+ // This overload is exposed to python for testing, etc.
119
+ TORCH_API ConvBackend select_conv_backend(
120
+ const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt,
121
+ SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation,
122
+ bool transposed, SymIntArrayRef output_padding, c10::SymInt groups, const at::OptionalSymIntArrayRef bias_sizes_opt);
123
+
124
+ TORCH_API at::MemoryFormat _determine_backend_memory_format(const Tensor& input,
125
+ const Tensor& weight,
126
+ const ConvBackend backend);
127
+
128
+ // ---------------------------------------------------------------------
129
+ //
130
+ // Math
131
+ //
132
+ // ---------------------------------------------------------------------
133
+
134
+ constexpr int input_batch_size_dim = 0; // also grad_input
135
+ constexpr int input_channels_dim = 1;
136
+ constexpr int output_batch_size_dim = 0; // also grad_output
137
+ constexpr int output_channels_dim = 1;
138
+ constexpr int weight_output_channels_dim = 0;
139
+ constexpr int weight_input_channels_dim = 1;
140
+
141
+ // Often written as 2 + max_dim (extra dims for batch size and channels)
142
+ constexpr int max_dim = 3;
143
+
144
+ // ---------------------------------------------------------------------
145
+ //
146
+ // Checking
147
+ //
148
+ // ---------------------------------------------------------------------
149
+
150
+ // Used on pad, stride and dilation
151
+ static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name)
152
+ {
153
+ TORCH_CHECK(args.size() <= expected_size,
154
+ "Too many ", arg_name, " values (", args.size(), ") supplied, expecting ",
155
+ expected_size, " (while checking arguments for ", c, ")");
156
+ TORCH_CHECK(args.size() >= expected_size,
157
+ "Not enough ", arg_name, " values (", args.size(), ") supplied, expecting ",
158
+ expected_size, " (while checking arguments for ", c, ")");
159
+
160
+ auto num_negative_values = std::count_if(args.begin(), args.end(), [](int x){return x < 0;});
161
+ if (num_negative_values > 0){
162
+ std::stringstream ss;
163
+ ss << arg_name << " should be greater than zero but got (";
164
+ std::copy(args.begin(), args.end() - 1, std::ostream_iterator<int>(ss,", "));
165
+ ss << args.back() << ")" << " (while checking arguments for " << c << ")";
166
+ AT_ERROR(ss.str());
167
+ }
168
+ }
169
+
170
+
171
+ // NOTE [ Convolution checks ]
172
+ //
173
+ // NB: For many call sites, it is not strictly necessary to check all of
174
+ // these relationships (for example, for forward convolution, we compute
175
+ // the size of output ourselves, so we don't actually need to check
176
+ // output. However, writing a single function that does everything
177
+ // means we get to reuse it for both forwards and all backwards
178
+ // variants, even when the set of "real" inputs varies. The magic of
179
+ // relational computing!
180
+ //
181
+ // (There is one downside, which is that it is slightly harder to write
182
+ // error messages which are able to distinguish between real inputs
183
+ // (which the user can change) and computed inputs (which the user can
184
+ // only indirectly affect). It would be an interesting exercise to
185
+ // come up with a general framework to handle such situations.)
186
+ static void convolution_shape_check(
187
+ CheckedFrom c,
188
+ const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output,
189
+ IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
190
+ {
191
+ check_args(c, padding, input->dim() - 2, "padding");
192
+ check_args(c, stride, padding.size(), "stride");
193
+ check_args(c, dilation, padding.size(), "dilation");
194
+
195
+ // Input
196
+ checkDimRange(c, input, 3, 6 /* exclusive */);
197
+ checkSize_symint(c, input, input_channels_dim, weight->size(1) * groups);
198
+
199
+ // Weight
200
+ checkSameDim(c, input, weight);
201
+
202
+ // TODO: check that output->size() matches output_sizes
203
+ // TODO: check that weight matches output->sizes()
204
+ checkSameDim(c, input, output);
205
+ }
206
+
207
+ // NB: conv_output_size and conv_input_size are not bijections,
208
+ // as conv_output_size loses information; this is why conv_input_size
209
+ // takes an extra output_padding argument to resolve the ambiguity.
210
+
211
+ template <typename T>
212
+ static inline std::vector<T> _conv_output_size(
213
+ ArrayRef<T> input_size, ArrayRef<T> weight_size,
214
+ ArrayRef<T> padding, ArrayRef<T> stride, ArrayRef<T> dilation = ArrayRef<T>()
215
+ ) {
216
+ // ASSERT(input_size.size() > 2)
217
+ // ASSERT(input_size.size() == weight_size.size())
218
+ bool has_dilation = !dilation.empty();
219
+ auto dim = input_size.size();
220
+ std::vector<T> output_size(dim);
221
+ output_size[0] = input_size[input_batch_size_dim];
222
+ output_size[1] = weight_size[weight_output_channels_dim];
223
+ for (const auto d : c10::irange(2, dim)) {
224
+ auto dilation_ = has_dilation ? dilation[d - 2] : 1;
225
+ auto kernel = dilation_ * (weight_size[d] - 1) + 1;
226
+ output_size[d] = (input_size[d] + (2 * padding[d - 2]) - kernel) / stride[d - 2] + 1;
227
+ }
228
+ return output_size;
229
+ }
230
+
231
+ static inline std::vector<int64_t> conv_output_size(
232
+ IntArrayRef input_size, IntArrayRef weight_size,
233
+ IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef()
234
+ ) {
235
+ return _conv_output_size(input_size, weight_size, padding, stride, dilation);
236
+ }
237
+
238
+ static inline std::vector<c10::SymInt> conv_output_size(
239
+ SymIntArrayRef input_size, SymIntArrayRef weight_size,
240
+ SymIntArrayRef padding, SymIntArrayRef stride, SymIntArrayRef dilation = SymIntArrayRef()
241
+ ) {
242
+ return _conv_output_size(input_size, weight_size, padding, stride, dilation);
243
+ }
244
+
245
+ template <typename T>
246
+ std::vector<T> _conv_input_size(
247
+ ArrayRef<T> output_size, ArrayRef<T> weight_size,
248
+ ArrayRef<T> padding, ArrayRef<T> output_padding, ArrayRef<T> stride, ArrayRef<T> dilation, T groups
249
+ ) {
250
+ // ASSERT(output_size.size() > 2)
251
+ // ASSERT(output_size.size() == weight_size.size())
252
+ auto dim = output_size.size();
253
+ std::vector<T> input_size(dim);
254
+ input_size[0] = output_size[output_batch_size_dim];
255
+ input_size[1] = weight_size[weight_input_channels_dim] * groups;
256
+ for (const auto d : c10::irange(2, dim)) {
257
+ auto kernel = (weight_size[d] - 1) * dilation[d - 2] + 1;
258
+ input_size[d] = (output_size[d] - 1) * stride[d - 2] - (padding[d - 2] * 2) +
259
+ kernel + output_padding[d - 2];
260
+ }
261
+ return input_size;
262
+ }
263
+
264
+ static inline std::vector<c10::SymInt> conv_input_size(
265
+ SymIntArrayRef output_size, SymIntArrayRef weight_size,
266
+ SymIntArrayRef padding, SymIntArrayRef output_padding, SymIntArrayRef stride, SymIntArrayRef dilation, c10::SymInt groups
267
+ ) {
268
+ return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
269
+ }
270
+
271
+ static inline std::vector<int64_t> conv_input_size(
272
+ IntArrayRef output_size, IntArrayRef weight_size,
273
+ IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
274
+ ) {
275
+ return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
276
+ }
277
+
278
+ template <typename T>
279
+ std::vector<T> _conv_weight_size(
280
+ ArrayRef<T> input_size, ArrayRef<T> output_size,
281
+ ArrayRef<T> padding, ArrayRef<T> output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
282
+ ) {
283
+ auto dim = input_size.size();
284
+ std::vector<T> weight_size(dim);
285
+ weight_size[0] = output_size[1];
286
+ weight_size[1] = input_size[1] / groups;
287
+ for (const auto d : c10::irange(2, dim)) {
288
+ auto kernel = input_size[d] - (output_size[d] - 1) * stride[d - 2]
289
+ + padding[d - 2] * 2 - output_padding[d - 2];
290
+ weight_size[d] = (kernel - 1) / dilation[d - 2] + 1;
291
+ }
292
+ return weight_size;
293
+ }
294
+
295
+ static inline std::vector<c10::SymInt> conv_weight_size(
296
+ SymIntArrayRef input_size, SymIntArrayRef output_size,
297
+ SymIntArrayRef padding, SymIntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
298
+ ) {
299
+ return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
300
+ }
301
+
302
+ static inline std::vector<int64_t> conv_weight_size(
303
+ IntArrayRef input_size, IntArrayRef output_size,
304
+ IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
305
+ ) {
306
+ return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
307
+ }
308
+
309
+ static inline Tensor reshape_bias(int64_t dim, const Tensor& bias) {
310
+ std::vector<int64_t> shape(dim, 1);
311
+ shape[1] = -1;
312
+ return bias.reshape(shape);
313
+ }
314
+
315
+ static inline at::MemoryFormat cudnn_conv_suggest_memory_format(const at::Tensor& input, const at::Tensor& weight) {
316
+ // disable NHWC for float64 input.
317
+ if (!at::detail::getCUDAHooks().compiledWithCuDNN() ||
318
+ input.scalar_type() == at::kDouble ||
319
+ weight.scalar_type() == at::kDouble) {
320
+ return at::MemoryFormat::Contiguous;
321
+ }
322
+ long cudnn_version = at::detail::getCUDAHooks().versionCuDNN();
323
+ auto input_memory_format = input.suggest_memory_format();
324
+ auto weight_memory_format = weight.suggest_memory_format();
325
+ auto weight_ndim = weight.ndimension();
326
+
327
+ bool can_use_cudnn_channels_last_2d = (cudnn_version >= 7603) && (weight_ndim == 4) && (
328
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
329
+ (weight_memory_format == at::MemoryFormat::ChannelsLast)
330
+ );
331
+ if (can_use_cudnn_channels_last_2d) {
332
+ return at::MemoryFormat::ChannelsLast;
333
+ }
334
+
335
+ bool can_use_cudnn_channels_last_3d = (cudnn_version >= 8005) && (weight_ndim == 5) && (
336
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
337
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d)
338
+ );
339
+ if (can_use_cudnn_channels_last_3d) {
340
+ return at::MemoryFormat::ChannelsLast3d;
341
+ }
342
+
343
+ return at::MemoryFormat::Contiguous;
344
+ }
345
+
346
+ // controls whether emptyCache will be called following cudnn conv benchmarking
347
+ TORCH_API void _cudnn_set_conv_benchmark_empty_cache(bool enable);
348
+ TORCH_API bool _cudnn_get_conv_benchmark_empty_cache();
349
+
350
+
351
+ static inline bool miopen_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
352
+
353
+ // disable NHWC for float64 input.
354
+ if (!at::detail::getCUDAHooks().compiledWithMIOpen() ||
355
+ input.scalar_type() == at::kDouble ||
356
+ weight.scalar_type() == at::kDouble) {
357
+ return false;
358
+ }
359
+
360
+ bool can_use_miopen_channels_last_2d = false;
361
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 40300)
362
+ // TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
363
+ // See #64427
364
+ static c10::optional<bool> PYTORCH_MIOPEN_SUGGEST_NHWC = c10::utils::check_env("PYTORCH_MIOPEN_SUGGEST_NHWC");
365
+
366
+ auto input_memory_format = input.suggest_memory_format();
367
+ auto weight_memory_format = weight.suggest_memory_format();
368
+
369
+ can_use_miopen_channels_last_2d = PYTORCH_MIOPEN_SUGGEST_NHWC && *PYTORCH_MIOPEN_SUGGEST_NHWC && (
370
+ ( (input_memory_format == at::MemoryFormat::ChannelsLast) ||
371
+ (weight_memory_format == at::MemoryFormat::ChannelsLast) )
372
+ );
373
+ #endif
374
+
375
+ bool can_use_miopen_channels_last_3d = false;
376
+
377
+ return can_use_miopen_channels_last_2d || can_use_miopen_channels_last_3d;
378
+ }
379
+
380
+ static inline bool mkldnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
381
+
382
+ // disable NHWC for float64 input.
383
+ if (input.scalar_type() == at::kDouble ||
384
+ weight.scalar_type() == at::kDouble) {
385
+ return false;
386
+ }
387
+
388
+ // disable NHWC for MkldnnCPU tensor.
389
+ if (input.is_mkldnn() || weight.is_mkldnn()) {
390
+ return false;
391
+ }
392
+
393
+ auto input_memory_format = input.suggest_memory_format();
394
+ auto weight_memory_format = weight.suggest_memory_format();
395
+
396
+ bool can_use_mkldnn_channels_last_2d =
397
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
398
+ (weight_memory_format == at::MemoryFormat::ChannelsLast);
399
+
400
+ bool can_use_mkldnn_channels_last_3d =
401
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
402
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d);
403
+
404
+ return can_use_mkldnn_channels_last_2d || can_use_mkldnn_channels_last_3d;
405
+ }
406
+
407
+ static inline bool thnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
408
+
409
+ auto input_memory_format = input.suggest_memory_format();
410
+ auto weight_memory_format = weight.suggest_memory_format();
411
+
412
+ bool can_use_thnn_channels_last_2d = input.device().is_cpu() && (
413
+ (input_memory_format == at::MemoryFormat::ChannelsLast) || (
414
+ weight_memory_format == at::MemoryFormat::ChannelsLast));
415
+
416
+ return can_use_thnn_channels_last_2d;
417
+ }
418
+
419
+ static inline bool xpu_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
420
+
421
+ // check layout only for xpu tensor.
422
+ if (!input.is_xpu() || !weight.is_xpu()) {
423
+ return false;
424
+ }
425
+
426
+ // disable NHWC for float64 input.
427
+ if (input.scalar_type() == at::kDouble ||
428
+ weight.scalar_type() == at::kDouble) {
429
+ return false;
430
+ }
431
+
432
+ auto input_memory_format = input.suggest_memory_format();
433
+ auto weight_memory_format = weight.suggest_memory_format();
434
+
435
+ bool can_use_xpu_channels_last_2d =
436
+ (input_memory_format == at::MemoryFormat::ChannelsLast) ||
437
+ (weight_memory_format == at::MemoryFormat::ChannelsLast);
438
+
439
+ bool can_use_xpu_channels_last_3d =
440
+ (input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
441
+ (weight_memory_format == at::MemoryFormat::ChannelsLast3d);
442
+
443
+ return can_use_xpu_channels_last_2d || can_use_xpu_channels_last_3d;
444
+ }
445
+
446
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+
7
+ class Tensor;
8
+ struct TensorIterator;
9
+ class TensorBase;
10
+
11
+ namespace native {
12
+
13
+ using copy_fn = void (*)(TensorIterator&, bool non_blocking);
14
+
15
+ DECLARE_DISPATCH(copy_fn, copy_stub);
16
+
17
+ TORCH_API void copy_ignoring_overlaps(const TensorBase &dst, const TensorBase &src);
18
+
19
+ } // namespace native
20
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/Cross.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+
8
+ namespace native {
9
+
10
+ using cross_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const int64_t d);
11
+
12
+ DECLARE_DISPATCH(cross_fn, cross_stub);
13
+
14
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/DilatedConvolutionUtils.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <vector>
5
+
6
+ #include <ATen/div_rtn.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \
11
+ TORCH_CHECK( \
12
+ T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \
13
+ "Need " #T " of dimension ", \
14
+ DIM, \
15
+ " and " #T ".size[", \
16
+ DIM_SIZE, \
17
+ "] == ", \
18
+ SIZE, \
19
+ " but got input to be of shape ", \
20
+ T.sizes())
21
+
22
+ namespace at::native::internal {
23
+ namespace {
24
+ inline bool all_positive(IntArrayRef& arr) {
25
+ return std::all_of(
26
+ arr.begin(), arr.end(), [](int64_t item) { return item > 0; });
27
+ }
28
+
29
+ inline bool all_nonnegative(std::vector<int64_t>& arr) {
30
+ return std::all_of(
31
+ arr.begin(), arr.end(), [](int64_t item) { return item >= 0; });
32
+ }
33
+
34
+ } // namespace
35
+
36
+ // calculate the rear part of output tensor sizes
37
+ template <int64_t dim>
38
+ std::vector<int64_t> get_output_size(
39
+ const Tensor& input,
40
+ IntArrayRef kernel_size,
41
+ IntArrayRef stride_size,
42
+ IntArrayRef pad_size,
43
+ IntArrayRef dilation_size) {
44
+ std::vector<int64_t> sizes;
45
+ for (const auto index : c10::irange(dim)) {
46
+ sizes.push_back(
47
+ div_rtn<int64_t>(
48
+ input.size(index + input.dim() - dim) + 2 * pad_size[index] -
49
+ (dilation_size[index] * (kernel_size[index] - 1) + 1),
50
+ stride_size[index]) +
51
+ 1);
52
+ }
53
+ return sizes;
54
+ }
55
+
56
+ // calculate the sizes of output tensor
57
+ template <int64_t dim>
58
+ std::vector<int64_t> get_output_size(
59
+ const Tensor& input,
60
+ const Tensor& weight,
61
+ IntArrayRef kernel_size,
62
+ IntArrayRef stride_size,
63
+ IntArrayRef pad_size,
64
+ IntArrayRef dilation_size) {
65
+ auto output_size = get_output_size<dim>(
66
+ input, kernel_size, stride_size, pad_size, dilation_size);
67
+ output_size.insert(output_size.begin(), weight.size(0));
68
+ if (input.dim() == dim + 2) {
69
+ output_size.insert(output_size.begin(), input.size(0));
70
+ }
71
+ return output_size;
72
+ }
73
+ /*
74
+ slow_conv_dilated_shape_check - check user-input to dilated convolution
75
+ forward and backward functions.
76
+ */
77
+ template <int64_t dim>
78
+ void slow_conv_dilated_shape_check(
79
+ const Tensor& input,
80
+ const Tensor& weight,
81
+ const Tensor& bias,
82
+ const Tensor& grad_output,
83
+ IntArrayRef kernel_size,
84
+ IntArrayRef stride_size,
85
+ IntArrayRef pad_size,
86
+ IntArrayRef dilation_size) {
87
+ /*
88
+ When the following tensors are defined:
89
+
90
+ bias, grad_weight, grad_output
91
+
92
+ then these are assumed to be contiguous without checking
93
+ because of these tensors are made contiguous by calling
94
+ .contiguous() method or by resizing of zero-sized tensors in
95
+ forward/backward functions.
96
+
97
+ When grad_weight is defined then it is assumed without
98
+ checking to have the same shape as weight, see backward
99
+ functions.
100
+ */
101
+ // Check size arguments
102
+ TORCH_CHECK(
103
+ kernel_size.size() == dim,
104
+ "kernel sizes length should be ",
105
+ dim,
106
+ ", but got ",
107
+ kernel_size.size());
108
+ TORCH_CHECK(
109
+ stride_size.size() == dim,
110
+ "strides length should be ",
111
+ dim,
112
+ ", but got ",
113
+ stride_size.size());
114
+ TORCH_CHECK(
115
+ dilation_size.size() == dim,
116
+ "dilations length should be ",
117
+ dim,
118
+ ", but got ",
119
+ dilation_size.size());
120
+ TORCH_CHECK(
121
+ pad_size.size() == dim,
122
+ "pads length should be ",
123
+ dim,
124
+ ", but got ",
125
+ pad_size.size());
126
+
127
+ TORCH_CHECK(
128
+ all_positive(kernel_size),
129
+ "kernel size should be greater than zero, but got ",
130
+ kernel_size);
131
+ TORCH_CHECK(
132
+ all_positive(stride_size),
133
+ "stride should be greater than zero, but got ",
134
+ stride_size);
135
+ TORCH_CHECK(
136
+ all_positive(dilation_size),
137
+ "dilation should be greater than zero, but got ",
138
+ dilation_size);
139
+
140
+ // check input
141
+ TORCH_CHECK(input.defined(), "input must be defined");
142
+ bool is_batch = input.dim() == dim + 2;
143
+ int64_t n = (is_batch ? 2 : 1);
144
+ int64_t ndim = n + dim;
145
+ if (!is_batch) {
146
+ // input dim has to be dim + 1 if not batched
147
+ TORCH_CHECK(
148
+ input.dim() == dim + 1,
149
+ "input must be 4D or 5D tensor but got ",
150
+ input.dim(),
151
+ "D tensor");
152
+ }
153
+
154
+ // check output sizes
155
+ auto output_size = get_output_size<dim>(
156
+ input, kernel_size, stride_size, pad_size, dilation_size);
157
+
158
+ TORCH_CHECK(
159
+ all_nonnegative(output_size),
160
+ "calculated output size ",
161
+ output_size,
162
+ " is too small (all sizes must be non-negative)");
163
+
164
+ // check weight
165
+ TORCH_CHECK(weight.defined(), "weight must be defined");
166
+ TORCH_CHECK(
167
+ weight.dim() == dim + 2,
168
+ "weight must be ",
169
+ dim + 2,
170
+ "D tensor but got ",
171
+ weight.dim(),
172
+ "D tensor dim=",
173
+ dim);
174
+ TORCH_CHECK(
175
+ weight.sizes().slice(2) == kernel_size,
176
+ "weight[2:] shape ",
177
+ weight.sizes().slice(2),
178
+ " must be equal to kernel_size ",
179
+ kernel_size);
180
+
181
+ TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1));
182
+
183
+ // check bias when present
184
+ if (bias.defined()) {
185
+ TORCH_CHECK(
186
+ bias.dim() == 1,
187
+ "bias must be 1D tensor but got ",
188
+ bias.dim(),
189
+ "D tensor");
190
+ TORCH_CHECK_DIM_SIZE(bias, 1, 0, weight.size(0));
191
+ }
192
+
193
+ // check grad_output when present
194
+ if (grad_output.defined()) {
195
+ TORCH_CHECK(
196
+ grad_output.dim() == ndim,
197
+ "grad_output must be ",
198
+ ndim,
199
+ "D tensor but got ",
200
+ grad_output.dim(),
201
+ "D tensor");
202
+ if (is_batch) {
203
+ TORCH_CHECK(
204
+ grad_output.size(0) == input.size(0),
205
+ "grad_output.size(0)=",
206
+ grad_output.size(0),
207
+ " must be input.size(0)=",
208
+ input.size(0));
209
+ }
210
+ TORCH_CHECK(
211
+ grad_output.size(n - 1) == weight.size(0),
212
+ "grad_output.size(",
213
+ n - 1,
214
+ ")=",
215
+ grad_output.size(n - 1),
216
+ " must be weight.size(0)=",
217
+ weight.size(0));
218
+ TORCH_CHECK(
219
+ grad_output.sizes().slice(n) == output_size,
220
+ "grad_output[",
221
+ n,
222
+ ":] shape",
223
+ grad_output.sizes().slice(n),
224
+ " must be equal to output size ",
225
+ output_size);
226
+ }
227
+ }
228
+
229
+ } // namespace at::native::internal
venv/lib/python3.10/site-packages/torch/include/ATen/native/Distance.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+
8
+ namespace native {
9
+
10
+ using pdist_forward_fn = void(*)(Tensor&, const Tensor&, const double p);
11
+ using pdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
12
+ using cdist_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p);
13
+ using cdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
14
+
15
+ DECLARE_DISPATCH(pdist_forward_fn, pdist_forward_stub);
16
+ DECLARE_DISPATCH(pdist_backward_fn, pdist_backward_stub);
17
+ DECLARE_DISPATCH(cdist_fn, cdist_stub);
18
+ DECLARE_DISPATCH(cdist_backward_fn, cdist_backward_stub);
19
+
20
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/DistributionTemplates.h ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/Dispatch_v2.h>
6
+ #include <ATen/Generator.h>
7
+ #include <ATen/ExpandUtils.h>
8
+ #include <ATen/Tensor.h>
9
+ #include <ATen/MemoryOverlap.h>
10
+ #include <ATen/NamedTensorUtils.h>
11
+ #include <ATen/native/Resize.h>
12
+ #include <ATen/native/TensorIterator.h>
13
+ #include <c10/util/Optional.h>
14
+ #include <limits>
15
+ #include <cmath>
16
+
17
+ #ifndef AT_PER_OPERATOR_HEADERS
18
+ #include <ATen/Functions.h>
19
+ #else
20
+ #include <ATen/ops/empty_like.h>
21
+ #include <ATen/ops/empty.h>
22
+ #include <ATen/ops/full.h>
23
+ #include <ATen/ops/view_as_real.h>
24
+ #endif
25
+
26
+ namespace at::native::templates {
27
+
28
+ // ==================================================== Random ========================================================
29
+
30
+ // The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`.
31
+ // The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t).
32
+ // This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance:
33
+ //
34
+ // auto actual = torch::empty({3, 3}, torch::half);
35
+ // actual.random_(0, 65504);
36
+ //
37
+ // If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504
38
+ // and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to`
39
+ // moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to
40
+ // the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous
41
+ // available number for torch::half dtype.
42
+ template<typename scalar_t>
43
+ int64_t update_from(int64_t from) {
44
+ static_assert(
45
+ std::is_floating_point<scalar_t>::value ||
46
+ std::is_same<scalar_t, at::Half>::value ||
47
+ std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
48
+ const auto from_plus_1 = static_cast<int64_t>(static_cast<scalar_t>(from + 1));
49
+ if (from_plus_1 < from) {
50
+ int64_t from_ = std::abs(from + 1);
51
+ int n = 0;
52
+ while (from_ >>= 1) ++n;
53
+ // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
54
+ from = from_plus_1 + (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
55
+ }
56
+ return from;
57
+ }
58
+
59
+ template<typename scalar_t>
60
+ int64_t update_to(int64_t to) {
61
+ static_assert(
62
+ std::is_floating_point<scalar_t>::value ||
63
+ std::is_same<scalar_t, at::Half>::value ||
64
+ std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
65
+ const auto to_minus_1 = static_cast<int64_t>(static_cast<scalar_t>(to - 1));
66
+ if (to_minus_1 >= to) {
67
+ int64_t to_ = std::abs(to - 1);
68
+ int n = 0;
69
+ while (to_ >>= 1) ++n;
70
+ // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
71
+ to = to_minus_1 - (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
72
+ }
73
+ return to;
74
+ }
75
+
76
+ // Return earlier for not invoking kernel.
77
+ // See https://github.com/pytorch/pytorch/issues/103418 for more details
78
+ #define CHECK_EMPTY_AND_RETURN(tensor) \
79
+ if (tensor.numel() == 0) { \
80
+ return tensor; \
81
+ }
82
+
83
+ template<template<typename> class random_kernel, typename RNG>
84
+ at::Tensor& random_impl(at::Tensor& self, c10::optional<Generator> generator) {
85
+ CHECK_EMPTY_AND_RETURN(self);
86
+ auto iter = at::TensorIterator::borrowing_nullary_op(self);
87
+ random_kernel<RNG>()(iter, generator);
88
+ return self;
89
+ }
90
+
91
+ #define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \
92
+ TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \
93
+
94
+ #define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \
95
+ if (var < -(1LL << digits) || var > (1LL << digits)) { \
96
+ TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \
97
+ "Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \
98
+ "This warning will become an error in version 1.7 release, please fix the code in advance"); \
99
+ }
100
+
101
+ static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
102
+ const auto scalar_type = typeMetaToScalarType(dtype);
103
+ if (isFloatingType(scalar_type)) {
104
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] {
105
+ const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
106
+ const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
107
+ CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
108
+ CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
109
+
110
+ constexpr auto digits = std::numeric_limits<scalar_t>::digits;
111
+ WARN_OUT_OF_BOUNDS(from, "from", digits, dtype);
112
+ WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype);
113
+ });
114
+ } else if (scalar_type == kUInt64) {
115
+ // When you do a comparison between int64_t and uint64_t, the usual
116
+ // arithmetic conversions say that the int64_t value is promoted to
117
+ // unsigned. But this conversion wraps around: if I had -1 as my int64_t,
118
+ // then it will promote to 0xFFFFFFFFFFFFFFFF in uint64_t. This is never
119
+ // the right thing to do.
120
+ CHECK_OUT_OF_BOUNDS(from, "from", 0, INT64_MAX, dtype);
121
+ CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", 0, INT64_MAX, dtype);
122
+ } else if (isIntegralType(scalar_type, /*includeBool=*/true)) {
123
+ AT_DISPATCH_V2(scalar_type, "check_random_integral_bounds", AT_WRAP([&]() {
124
+ const auto min = static_cast<int64_t>(std::numeric_limits<scalar_t>::lowest());
125
+ const auto max = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
126
+ CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
127
+ CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
128
+ }), AT_EXPAND(AT_INTEGRAL_TYPES), kUInt16, kUInt32, kBool);
129
+ } else {
130
+ TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types");
131
+ }
132
+ }
133
+
134
+ template<template<typename> class random_from_to_kernel, typename RNG>
135
+ at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> generator) {
136
+ uint64_t range = 0;
137
+ auto iter = at::TensorIterator::borrowing_nullary_op(self);
138
+ if (to_opt.has_value()) {
139
+ // [from, to)
140
+ int64_t to = *to_opt;
141
+ TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
142
+ if (isFloatingType(iter.dtype())) {
143
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] {
144
+ from = update_from<scalar_t>(from);
145
+ to = update_to<scalar_t>(to);
146
+ TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to);
147
+ });
148
+ }
149
+ check_from_to_in_range(from, to - 1, self.dtype());
150
+ CHECK_EMPTY_AND_RETURN(self);
151
+ range = static_cast<uint64_t>(to) - static_cast<uint64_t>(from);
152
+ random_from_to_kernel<RNG>()(iter, range, from, generator);
153
+ } else if (from != std::numeric_limits<int64_t>::lowest()) {
154
+ // [from, std::numeric_limits<int64_t>::max()]
155
+ int64_t to_inc = 0;
156
+ if (isFloatingType(iter.dtype())) {
157
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] {
158
+ constexpr int64_t scalar_t_max = static_cast<int64_t>(1) << std::numeric_limits<scalar_t>::digits;
159
+ to_inc = scalar_t_max > std::numeric_limits<int64_t>::max() ? std::numeric_limits<int64_t>::max() : static_cast<int64_t>(scalar_t_max);
160
+ from = update_from<scalar_t>(from);
161
+ TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc);
162
+ });
163
+ } else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) {
164
+ AT_DISPATCH_V2(self.scalar_type(), "random_from_to_range_calc", AT_WRAP([&] {
165
+ if constexpr (std::is_same_v<scalar_t, bool>) {
166
+ to_inc = static_cast<int64_t>(true);
167
+ } else {
168
+ to_inc = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
169
+ }
170
+ }), AT_EXPAND(AT_INTEGRAL_TYPES_V2), kBool);
171
+ } else {
172
+ TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types");
173
+ }
174
+ check_from_to_in_range(from, to_inc, self.dtype());
175
+ CHECK_EMPTY_AND_RETURN(self);
176
+ range = static_cast<uint64_t>(to_inc) - static_cast<uint64_t>(from) + 1;
177
+ random_from_to_kernel<RNG>()(iter, range, from, generator);
178
+ } else {
179
+ // [std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()]
180
+ // range = 2^64
181
+ CHECK_EMPTY_AND_RETURN(self);
182
+ random_from_to_kernel<RNG>()(iter, generator);
183
+ }
184
+ return self;
185
+ }
186
+
187
+ // ==================================================== Normal ========================================================
188
+
189
+ #define CHECK_NORMAL_TENSOR_STD(std) \
190
+ do { \
191
+ TORCH_CHECK( \
192
+ !std.is_complex(), \
193
+ "normal expects standard deviation to be non-complex"); \
194
+ TORCH_CHECK( \
195
+ std.numel() == 0 || std.is_meta() || std.min().ge(0).item<bool>(), \
196
+ "normal expects all elements of std >= 0.0"); \
197
+ } while (0)
198
+
199
+ #define CHECK_NORMAL_STD(std) \
200
+ TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std);
201
+
202
+ template<template<typename> class normal_kernel, typename RNG>
203
+ Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
204
+ CHECK_NORMAL_STD(std);
205
+ CHECK_EMPTY_AND_RETURN(self);
206
+
207
+ if (self.is_complex()) {
208
+ auto float_tensor = at::view_as_real(self);
209
+ // variance for normal distribution of the real and imaginary values
210
+ // is half of the input variance
211
+ normal_kernel<RNG>()(float_tensor, mean, std/(std::sqrt(2)), gen);
212
+ } else {
213
+ normal_kernel<RNG>()(self, mean, std, gen);
214
+ }
215
+ return self;
216
+ }
217
+
218
+ template<template<typename> class normal_kernel, typename RNG>
219
+ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional<Generator> gen) {
220
+ CHECK_NORMAL_STD(std);
221
+ auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous);
222
+ auto shape = at::infer_size(mean.sizes(), std_tensor.sizes());
223
+ at::native::resize_output(output, shape);
224
+ normal_impl_<normal_kernel, RNG>(output, 0, std, gen);
225
+ output.add_(mean);
226
+ return output;
227
+ }
228
+
229
+ template<template<typename> class normal_kernel, typename RNG>
230
+ Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional<Generator> gen) {
231
+ CHECK_NORMAL_TENSOR_STD(std);
232
+ auto mean_tensor = at::full({}, mean, output.options());
233
+ auto shape = at::infer_size(mean_tensor.sizes(), std.sizes());
234
+ at::native::resize_output(output, shape);
235
+ normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
236
+ // CUDA NB: addcmul_out copies the tensor to be added into the output.
237
+ // The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
238
+ // The third argument is not a constant reference and hence the samples in output are overwritten.
239
+ // Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
240
+ output.mul_(std).add_(mean_tensor);
241
+ return output;
242
+ }
243
+
244
+ template<template<typename> class normal_kernel, typename RNG>
245
+ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
246
+ CHECK_NORMAL_TENSOR_STD(std);
247
+ auto shape = at::infer_size(mean.sizes(), std.sizes());
248
+ at::native::resize_output(output, shape);
249
+ normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
250
+ // CUDA NB: addcmul_out copies the tensor to be added into the output.
251
+ // The previous function here was addcmul_out(output, mean, output, std, 1);
252
+ // The third argument is not a constant reference and hence the samples in output are overwritten.
253
+ // Consequently, the computation performed is mean + mean * std instead of mean + output * std
254
+ output.mul_(std).add_(mean);
255
+ return output;
256
+ }
257
+
258
+ template<template<typename> class normal_kernel, typename RNG>
259
+ Tensor normal_impl(const Tensor& mean, double std, c10::optional<Generator> gen) {
260
+ CHECK_NORMAL_STD(std);
261
+ Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous);
262
+ normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
263
+ return ret;
264
+ }
265
+
266
+ template<template<typename> class normal_kernel, typename RNG>
267
+ Tensor normal_impl(double mean, const Tensor& std, c10::optional<Generator> gen) {
268
+ CHECK_NORMAL_TENSOR_STD(std);
269
+ Tensor ret = at::empty_like(std, MemoryFormat::Contiguous);
270
+ normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
271
+ return ret;
272
+ }
273
+
274
+ template<template<typename> class normal_kernel, typename RNG>
275
+ Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
276
+ CHECK_NORMAL_TENSOR_STD(std);
277
+ auto shape = at::infer_size(mean.sizes(), std.sizes());
278
+ Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous);
279
+ normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
280
+ return ret;
281
+ }
282
+
283
+ // ==================================================== Uniform =======================================================
284
+
285
+ template<template<typename> class uniform_kernel, typename RNG>
286
+ at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional<Generator> generator) {
287
+ if (self.is_complex()) {
288
+ CHECK_EMPTY_AND_RETURN(self);
289
+ auto float_tensor = at::view_as_real(self);
290
+ uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator);
291
+ } else {
292
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] {
293
+ const auto dtype = self.dtype();
294
+ const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
295
+ const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
296
+ CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
297
+ CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype);
298
+ TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to);
299
+ TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
300
+ "uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()),
301
+ ">::max(), but found to=", to, " and from=", from,
302
+ " which result in to-from to exceed the limit");
303
+ from = std::min(std::max(from, min), max);
304
+ to = std::max(std::min(to, max), min);
305
+ });
306
+ CHECK_EMPTY_AND_RETURN(self);
307
+ auto iter = at::TensorIterator::borrowing_nullary_op(self);
308
+ uniform_kernel<RNG>()(iter, from, to, generator);
309
+ }
310
+ return self;
311
+ }
312
+
313
+ // ================================================== LogNormal =======================================================
314
+
315
+ template<template<typename> class log_normal_kernel, typename RNG>
316
+ at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional<Generator> gen) {
317
+ TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
318
+ CHECK_EMPTY_AND_RETURN(self);
319
+ auto iter = TensorIterator::borrowing_nullary_op(self);
320
+ log_normal_kernel<RNG>()(iter, mean, std, gen);
321
+ return self;
322
+ }
323
+
324
+ // =================================================== Geometric ======================================================
325
+
326
+ template<template<typename> class geometric_kernel, typename RNG>
327
+ Tensor& geometric_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
328
+ TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
329
+ CHECK_EMPTY_AND_RETURN(self);
330
+ auto iter = TensorIterator::borrowing_nullary_op(self);
331
+ geometric_kernel<RNG>()(iter, p, gen);
332
+ return self;
333
+ }
334
+
335
+ // ================================================== Exponential =====================================================
336
+
337
+ template<template<typename> class exponential_kernel, typename RNG>
338
+ Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional<Generator> gen) {
339
+ TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
340
+ CHECK_EMPTY_AND_RETURN(self);
341
+ auto iter = TensorIterator::borrowing_nullary_op(self);
342
+ exponential_kernel<RNG>()(iter, lambda, gen);
343
+ return self;
344
+ }
345
+
346
+ // ==================================================== Cauchy ========================================================
347
+
348
+ template<template<typename> class cauchy_kernel, typename RNG>
349
+ Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
350
+ // TODO: instead of variable name 'sigma', use 'gamma' or 'scale'
351
+ // the variance, squared sigma, is undefined for cauchy distribution
352
+ TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma);
353
+ TORCH_CHECK(at::isFloatingType(self.scalar_type()), "Cauchy distribution is a continuous probability distribution. dtype must be a floating point but you specified ", self.dtype());
354
+ CHECK_EMPTY_AND_RETURN(self);
355
+ auto iter = TensorIterator::borrowing_nullary_op(self);
356
+ cauchy_kernel<RNG>()(iter, median, sigma, gen);
357
+ return self;
358
+ }
359
+
360
+ // ==================================================== Bernoulli =====================================================
361
+
362
+ template<template<typename> class bernoulli_tensor_kernel, typename RNG>
363
+ Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
364
+ CHECK_EMPTY_AND_RETURN(self);
365
+ NoNamesGuard guard;
366
+ at::assert_no_internal_overlap(self);
367
+ bernoulli_tensor_kernel<RNG>()(self, p_, gen);
368
+ return self;
369
+ }
370
+
371
+ template<template<typename> class bernoulli_scalar_kernel, typename RNG>
372
+ Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
373
+ TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
374
+ CHECK_EMPTY_AND_RETURN(self);
375
+ at::assert_no_internal_overlap(self);
376
+ bernoulli_scalar_kernel<RNG>()(self, p, gen);
377
+ return self;
378
+ }
379
+
380
+ template<template<typename> class bernoulli_tensor_kernel, typename RNG>
381
+ Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional<Generator> gen) {
382
+ // result.resize_as_(self) requires self to have same dtype as result, so we
383
+ // use resize_ instead.
384
+ // TODO: Fix resize_as_. See pytorch/pytorch#11665.
385
+ result.resize_(self.sizes());
386
+ bernoulli_impl_<bernoulli_tensor_kernel, RNG>(result, self, gen);
387
+ namedinference::propagate_names(result, self);
388
+ return result;
389
+ }
390
+
391
+ #undef CHECK_OUT_OF_BOUNDS
392
+ #undef WARN_OUT_OF_BOUNDS
393
+
394
+ } // namespace at::native::templates
venv/lib/python3.10/site-packages/torch/include/ATen/native/Distributions.h ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/Math.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/MathConstants.h>
6
+
7
+ // ROCM hcc doesn't work well with using std:: in kernel functions
8
+ #if defined(__CUDA_ARCH__)
9
+ #include <c10/cuda/CUDAMathCompat.h>
10
+ #define compat_exp c10::cuda::compat::exp
11
+ #define compat_ceil c10::cuda::compat::ceil
12
+ #define compat_floor c10::cuda::compat::floor
13
+ #define compat_log c10::cuda::compat::log
14
+ #define compat_pow c10::cuda::compat::pow
15
+ #define compat_sqrt c10::cuda::compat::sqrt
16
+ #define compat_tan c10::cuda::compat::tan
17
+ #define compat_abs c10::cuda::compat::abs
18
+ #define compat_log1p c10::cuda::compat::log1p
19
+ #elif defined(__HIPCC__)
20
+ #include <c10/hip/HIPMathCompat.h>
21
+ #define compat_exp c10::hip::compat::exp
22
+ #define compat_ceil c10::hip::compat::ceil
23
+ #define compat_floor c10::hip::compat::floor
24
+ #define compat_log c10::hip::compat::log
25
+ #define compat_pow c10::hip::compat::pow
26
+ #define compat_sqrt c10::hip::compat::sqrt
27
+ #define compat_tan c10::hip::compat::tan
28
+ #define compat_abs c10::hip::compat::abs
29
+ #define compat_log1p c10::hip::compat::log1p
30
+ #else
31
+ #define compat_exp std::exp
32
+ #define compat_ceil std::ceil
33
+ #define compat_floor std::floor
34
+ #define compat_log std::log
35
+ #define compat_pow std::pow
36
+ #define compat_sqrt std::sqrt
37
+ #define compat_tan std::tan
38
+ #define compat_abs std::abs
39
+ #define compat_log1p std::log1p
40
+ #endif
41
+
42
+ namespace {
43
+
44
+ #if !defined(__CUDA_ARCH__) && !defined(__HIPCC__)
45
+ // we cannot use std::isnan directly due to some incompatibility of
46
+ // gcc constexpr'ing and nvcc
47
+ using std::isnan;
48
+ #endif
49
+
50
+ // Here sampler_t should be function type scalar_t(void). For gpu
51
+ // "sampler" is a device function, but since ROCM doesn't have
52
+ // equivalent to nvstd::function, we use a template type parameter to
53
+ // capture it.
54
+ template<typename scalar_t, typename sampler_t>
55
+ struct BaseSampler {
56
+ sampler_t sampler;
57
+ C10_DEVICE BaseSampler(const sampler_t& sampler): sampler(sampler) {}
58
+ C10_DEVICE scalar_t sample() {
59
+ return sampler();
60
+ }
61
+ };
62
+
63
+ // The function `sample_gamma` is
64
+ // is adapted from Numpy's distributions.c implementation.
65
+ // It is MIT licensed, so here is the copyright:
66
+
67
+ /* Copyright 2005 Robert Kern ([email protected])
68
+ *
69
+ * Permission is hereby granted, free of charge, to any person obtaining a
70
+ * copy of this software and associated documentation files (the
71
+ * "Software"), to deal in the Software without restriction, including
72
+ * without limitation the rights to use, copy, modify, merge, publish,
73
+ * distribute, sublicense, and/or sell copies of the Software, and to
74
+ * permit persons to whom the Software is furnished to do so, subject to
75
+ * the following conditions:
76
+ *
77
+ * The above copyright notice and this permission notice shall be included
78
+ * in all copies or substantial portions of the Software.
79
+ *
80
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
81
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
82
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
83
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
84
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
85
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
86
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
87
+ */
88
+
89
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t, typename normal_sampler_t>
90
+ C10_DEVICE scalar_t sample_gamma(scalar_t alpha, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform, BaseSampler<accscalar_t, normal_sampler_t>& standard_normal) {
91
+ accscalar_t scale = 1.0f;
92
+
93
+ // Boost alpha for higher acceptance probability.
94
+ if (alpha < 1.0f) {
95
+ if (alpha == 0.f) return 0.f;
96
+ scale *= compat_pow(1 - standard_uniform.sample(), 1.0f / alpha);
97
+ alpha += 1.0f;
98
+ }
99
+
100
+ // This implements the acceptance-rejection method of Marsaglia and Tsang (2000)
101
+ // doi:10.1145/358407.358414
102
+ const accscalar_t d = alpha - 1.0f / 3.0f;
103
+ const accscalar_t c = 1.0f / compat_sqrt(9.0f * d);
104
+ for (;;) {
105
+ accscalar_t x, y;
106
+ do {
107
+ x = standard_normal.sample();
108
+ y = 1.0f + c * x;
109
+ } while (y <= 0);
110
+ const accscalar_t v = y * y * y;
111
+ const accscalar_t u = 1 - standard_uniform.sample();
112
+ const accscalar_t xx = x * x;
113
+ if (u < 1.0f - 0.0331f * xx * xx)
114
+ return static_cast<scalar_t>(scale * d * v);
115
+ if (compat_log(u) < 0.5f * xx + d * (1.0f - v + compat_log(v)))
116
+ return static_cast<scalar_t>(scale * d * v);
117
+ }
118
+ }
119
+
120
+ /* the functions stirling_approx_tail, binomial_inversion, and btrs are adapted
121
+ * from TensorFlow's random_binomial_op.cc implementation. That code is under
122
+ * copyright: 2019 The TensorFlow Authors.
123
+ *
124
+ * It was released under the Apache License, Version 2.0 (the "License"), available at:
125
+ * http://www.apache.org/licenses/LICENSE-2.0
126
+ */
127
+
128
+ template<typename scalar_t>
129
+ C10_DEVICE scalar_t stirling_approx_tail(scalar_t k) {
130
+ const static scalar_t kTailValues[] = {
131
+ 0.0810614667953272,
132
+ 0.0413406959554092,
133
+ 0.0276779256849983,
134
+ 0.02079067210376509,
135
+ 0.0166446911898211,
136
+ 0.0138761288230707,
137
+ 0.0118967099458917,
138
+ 0.0104112652619720,
139
+ 0.00925546218271273,
140
+ 0.00833056343336287
141
+ };
142
+ if (k <= 9) {
143
+ return kTailValues[static_cast<size_t>(k)];
144
+ }
145
+ scalar_t kp1sq = (k + 1) * (k + 1);
146
+ return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1);
147
+ }
148
+
149
+
150
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
151
+ C10_DEVICE scalar_t binomial_inversion(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
152
+ accscalar_t U;
153
+ accscalar_t geom_sum = 0;
154
+ scalar_t num_geom = 0;
155
+
156
+ accscalar_t logprob = compat_log1p(-prob);
157
+
158
+ while (1) {
159
+ U = standard_uniform.sample();
160
+ accscalar_t geom = compat_ceil(compat_log(U) / logprob);
161
+ geom_sum += geom;
162
+ if (geom_sum > count) {
163
+ break;
164
+ }
165
+ num_geom = num_geom + 1;
166
+ }
167
+ return num_geom;
168
+ }
169
+
170
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
171
+ C10_DEVICE scalar_t btrs(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
172
+ scalar_t k;
173
+ accscalar_t U, V, us;
174
+
175
+ // This is spq in the paper.
176
+ const accscalar_t stddev = compat_sqrt(count * prob * (1 - prob));
177
+
178
+ // Other coefficients for Transformed Rejection sampling.
179
+ const accscalar_t b = 1.15 + 2.53 * stddev;
180
+ const accscalar_t a = -0.0873 + 0.0248 * b + 0.01 * prob;
181
+ const accscalar_t c = count * prob + 0.5;
182
+ const accscalar_t v_r = 0.92 - 4.2 / b;
183
+ const accscalar_t r = prob / (1 - prob);
184
+
185
+ const accscalar_t alpha = (2.83 + 5.1 / b) * stddev;
186
+ const accscalar_t m = compat_floor((count + 1) * prob);
187
+
188
+ while (1) {
189
+ U = standard_uniform.sample() - 0.5;
190
+ V = standard_uniform.sample();
191
+
192
+ us = 0.5 - compat_abs(U);
193
+ k = static_cast<scalar_t>(compat_floor((2 * a / us + b) * U + c));
194
+
195
+ // Reject non-sensical answers.
196
+ if (k < 0 || k > count) {
197
+ continue;
198
+ }
199
+ // Region for which the box is tight, and we can return our calculated value.
200
+ // This should happen 0.86 * v_r times. In the limit as n * p is large,
201
+ // the acceptance rate converges to ~79% (and in the lower regime it is ~24%).
202
+ if (us >= 0.07 && V <= v_r) {
203
+ return k;
204
+ }
205
+
206
+ // This deviates from Hormann's BTRS algorithm, as there is a log missing.
207
+ // For all (u, v) pairs outside of the bounding box, this calculates the
208
+ // transformed-reject ratio.
209
+ V = compat_log(V * alpha / (a / (us * us) + b));
210
+ accscalar_t upperbound =
211
+ ((m + 0.5) * compat_log((m + 1) / (r * (count - m + 1))) +
212
+ (count + 1) * compat_log((count - m + 1) / (count - k + 1)) +
213
+ (k + 0.5) * compat_log(r * (count - k + 1) / (k + 1)) +
214
+ stirling_approx_tail<accscalar_t>(m) + stirling_approx_tail<accscalar_t>(count - m) -
215
+ stirling_approx_tail<accscalar_t>(k) - stirling_approx_tail<accscalar_t>(count - k));
216
+
217
+ if (V <= upperbound) {
218
+ return k;
219
+ }
220
+ }
221
+ }
222
+
223
+ template<typename scalar_t, typename accscalar_t, typename uniform_sampler_t>
224
+ C10_DEVICE scalar_t sample_binomial(scalar_t count, scalar_t prob, BaseSampler<accscalar_t, uniform_sampler_t>& standard_uniform) {
225
+ if (count <= 0.0 || prob <= 0.0) {
226
+ return 0;
227
+ } else if (prob >= 1.0) {
228
+ return count;
229
+ } else if (prob <= 0.5) {
230
+ if (count * prob >= 10.0) {
231
+ // btrs
232
+ return btrs<scalar_t, accscalar_t, uniform_sampler_t>(count, prob, standard_uniform);
233
+ } else {
234
+ // binomial inversion
235
+ return binomial_inversion<scalar_t, accscalar_t, uniform_sampler_t>(count, prob, standard_uniform);
236
+ }
237
+ } else if (prob > 0.5) {
238
+ scalar_t qprob = 1.0 - prob;
239
+ if (count * qprob >= 10.0) {
240
+ // btrs
241
+ return count - btrs<scalar_t, accscalar_t, uniform_sampler_t>(count, qprob, standard_uniform);
242
+ } else {
243
+ // count - binomial inversion
244
+ return count - binomial_inversion<scalar_t, accscalar_t, uniform_sampler_t>(count, qprob, standard_uniform);
245
+ }
246
+ } else {
247
+ // prob is nan?
248
+ return static_cast<scalar_t>(NAN);
249
+ }
250
+ }
251
+
252
+ /*
253
+ * This function is derived from the implementation of the digamma function in the Cephes Math Library.
254
+ * See note [3-Clause BSD License for the Cephes Math Library] in ATen/native/Math.h.
255
+ */
256
+ template<typename scalar_t, typename accscalar_t>
257
+ C10_DEVICE static inline scalar_t digamma_one(scalar_t x) {
258
+ constexpr accscalar_t PSI_10 = 2.25175258906672110764;
259
+ if (x == 0) {
260
+ return INFINITY;
261
+ }
262
+ accscalar_t additional_summand = 0;
263
+ int x_is_integer = x == compat_floor(x);
264
+ if (x < 0) {
265
+ if (x_is_integer) {
266
+ return INFINITY;
267
+ }
268
+ // it is more standard to write this as recursion, but
269
+ // nvcc does not like that
270
+ additional_summand = -c10::pi<scalar_t> /
271
+ compat_tan(c10::pi<scalar_t> * x);
272
+ x = 1 - x;
273
+ }
274
+
275
+ // Push x to be >= 10
276
+ accscalar_t result = 0;
277
+ while (x < 10) {
278
+ result -= 1 / x;
279
+ x += 1;
280
+ }
281
+ if (x == 10) {
282
+ return result + PSI_10 + additional_summand;
283
+ }
284
+
285
+ // Compute asymptotic digamma
286
+ static const accscalar_t A[] = {
287
+ 8.33333333333333333333E-2,
288
+ -2.10927960927960927961E-2,
289
+ 7.57575757575757575758E-3,
290
+ -4.16666666666666666667E-3,
291
+ 3.96825396825396825397E-3,
292
+ -8.33333333333333333333E-3,
293
+ 8.33333333333333333333E-2,
294
+ };
295
+
296
+ accscalar_t y = 0;
297
+ if (x < 1.0e17f) {
298
+ accscalar_t z = 1.0 / (x * x);
299
+ y = z * polevl<accscalar_t>(z, A, 6);
300
+ }
301
+ return static_cast<scalar_t>(
302
+ result + compat_log(x) - (0.5f / x) - y + additional_summand);
303
+ }
304
+
305
+ // Computes the reparameterized gradient -(d/dalpha cdf(x;alpha)) / pdf(x;alpha)
306
+ // for random number x drawn from a standard Gamma distribution Gamma(alpha).
307
+ template <typename scalar_t, typename accscalar_t>
308
+ C10_HOST_DEVICE scalar_t standard_gamma_grad_one(scalar_t alpha_, scalar_t x_) {
309
+ // Use a Taylor series expansion for small x.
310
+ accscalar_t x = static_cast<accscalar_t>(x_);
311
+ accscalar_t alpha = static_cast<accscalar_t>(alpha_);
312
+ if (x < 0.8f) {
313
+ accscalar_t numer = 1;
314
+ accscalar_t denom = alpha;
315
+ auto series1 = numer / denom;
316
+ auto series2 = numer / (denom * denom);
317
+ for (int i = 1; i <= 5; ++i) {
318
+ numer *= -x / static_cast<accscalar_t>(i);
319
+ denom += 1;
320
+ series1 += numer / denom;
321
+ series2 += numer / (denom * denom);
322
+ }
323
+ const auto pow_x_alpha = compat_pow(x, alpha);
324
+ const auto gamma_pdf = compat_pow(x, alpha - 1) * compat_exp(-x);
325
+ const auto gamma_cdf = pow_x_alpha * series1;
326
+ const auto gamma_cdf_alpha =
327
+ (compat_log(x) - digamma_one<accscalar_t, accscalar_t>(alpha)) *
328
+ gamma_cdf -
329
+ pow_x_alpha * series2;
330
+ const auto result = -gamma_cdf_alpha / gamma_pdf;
331
+ return isnan(result) ? static_cast<scalar_t>( 0.f ) : static_cast<scalar_t>(result);
332
+ }
333
+
334
+ // Use a Rice saddle point expansion for large alpha.
335
+ if (alpha > 8.0f) {
336
+ if (0.9f * alpha <= x && x <= 1.1f * alpha) {
337
+ const auto numer_1 = 1 + 24 * alpha * (1 + 12 * alpha);
338
+ const auto numer_2 = 1440 * (alpha * alpha) + 6 * x * (53 - 120 * x)
339
+ - 65 * x * x / alpha + alpha * (107 + 3600 * x);
340
+ const auto denom = 1244160 * (alpha * alpha) * (alpha * alpha);
341
+ return static_cast<scalar_t>(numer_1 * numer_2 / denom);
342
+ }
343
+ const auto denom = compat_sqrt(8 * alpha);
344
+ const auto term2 = denom / (alpha - x);
345
+ const auto term3 = compat_pow(
346
+ x - alpha - alpha * compat_log(x / alpha),
347
+ static_cast<accscalar_t>(-1.5));
348
+ const auto term23 = (x < alpha) ? term2 - term3 : term2 + term3;
349
+ const auto term1 = compat_log(x / alpha) * term23 -
350
+ compat_sqrt(2 / alpha) * (alpha + x) / ((alpha - x) * (alpha - x));
351
+ const auto stirling = 1 + 1 / (12 * alpha) * (1 + 1 / (24 * alpha));
352
+ const auto numer = x * term1;
353
+ return static_cast<scalar_t>(-stirling * numer / denom);
354
+ }
355
+
356
+ // Use a bivariate rational approximation to the reparameterized gradient.
357
+ const auto u = compat_log(x / alpha);
358
+ const auto v = compat_log(alpha);
359
+ static const accscalar_t coef_uv[3][8] = {
360
+ {0.16009398, -0.094634809, 0.025146376, -0.0030648343,
361
+ 1, 0.32668115, 0.10406089, 0.0014179084},
362
+ {0.53487893, 0.1298071, 0.065735949, -0.0015649758,
363
+ 0.16639465, 0.020070113, -0.0035938915, -0.00058392623},
364
+ {0.040121004, -0.0065914022, -0.0026286047, -0.0013441777,
365
+ 0.017050642, -0.0021309326, 0.00085092367, -1.5247877e-07},
366
+ };
367
+ accscalar_t coef_v[8];
368
+ for (int i = 0; i < 8; ++ i) {
369
+ coef_v[i] = coef_uv[0][i] + u * (coef_uv[1][i] + u * coef_uv[2][i]);
370
+ }
371
+ const auto p = coef_v[0] + v * (coef_v[1] + v * (coef_v[2] + v * coef_v[3]));
372
+ const auto q = coef_v[4] + v * (coef_v[5] + v * (coef_v[6] + v * coef_v[7]));
373
+ return static_cast<scalar_t>(compat_exp(p / q));
374
+ }
375
+
376
+ // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
377
+ // Assumes x is close to zero and uses a Taylor expansion.
378
+ template <typename scalar_t, typename accscalar_t>
379
+ C10_DEVICE static inline scalar_t _beta_grad_alpha_small(scalar_t x, scalar_t alpha, scalar_t beta) {
380
+ const scalar_t factor = digamma_one<scalar_t, accscalar_t>(alpha)
381
+ - digamma_one<scalar_t, accscalar_t>(alpha + beta) - compat_log(x);
382
+ scalar_t numer = 1;
383
+ scalar_t series = numer / alpha * (factor + 1 / alpha);
384
+ for (int i = 1; i <= 10; ++i) {
385
+ scalar_t casted_i = static_cast<scalar_t>(i);
386
+ numer *= (casted_i - beta) * x / casted_i;
387
+ const scalar_t denom = alpha + casted_i;
388
+ series += numer / denom * (factor + 1 / denom);
389
+ }
390
+ const scalar_t result = x * compat_pow(1 - x, -beta) * series;
391
+ return isnan(result) ? static_cast<scalar_t>( 0.f ) : result;
392
+ }
393
+
394
+ // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta.
395
+ // Assumes x is close to zero and uses a Taylor expansion.
396
+ template <typename scalar_t, typename accscalar_t>
397
+ C10_DEVICE static inline scalar_t _beta_grad_beta_small(scalar_t x, scalar_t alpha, scalar_t beta) {
398
+ const scalar_t factor = digamma_one<scalar_t, accscalar_t>(alpha + beta) - digamma_one<scalar_t, accscalar_t>(beta);
399
+ scalar_t numer = 1, betas = 1, dbetas = 0, series = factor / alpha;
400
+ for (int i = 1; i <= 8; ++i) {
401
+ scalar_t casted_i = static_cast<scalar_t>(i);
402
+ numer *= -x / casted_i;
403
+ dbetas = dbetas * (beta - casted_i) + betas;
404
+ betas = betas * (beta - casted_i);
405
+ series += numer / (alpha + casted_i) * (dbetas + factor * betas);
406
+ }
407
+ const scalar_t result = -compat_pow(1 - x, 1 - beta) * series;
408
+ return isnan(result) ? static_cast<scalar_t>( 0.f ) : result;
409
+ }
410
+
411
+ // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha.
412
+ // Assumes alpha and beta are both large and uses a Rice saddle point expansion.
413
+ // To ensure numerical stability, this computation is performed at higher precision.
414
+ template<typename scalar_t, typename accscalar_t>
415
+ C10_DEVICE static inline scalar_t _beta_grad_alpha_mid(accscalar_t x, accscalar_t alpha, accscalar_t beta) {
416
+ const accscalar_t total = alpha + beta;
417
+ const accscalar_t mean = alpha / total;
418
+ const accscalar_t std = compat_sqrt(alpha * beta / (total + 1)) / total;
419
+ if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) {
420
+ // Avoid the singularity at x = mean.
421
+ const accscalar_t poly = 47 * x * (beta * beta) * (beta * beta) + alpha * (
422
+ (43 + 20 * (16 + 27 * beta) * x) * (beta * beta) * beta + alpha * (
423
+ 3 * (59 + 180 * beta - 90 * x) * (beta * beta) + alpha * (
424
+ (453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * (
425
+ 8 * (1 - x) * (135 * beta - 11)))));
426
+ const accscalar_t prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total);
427
+ const accscalar_t prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total);
428
+ return prefactor_num / (1 - x) * poly / prefactor_den;
429
+ }
430
+ const accscalar_t prefactor = -x / compat_sqrt(2 * alpha * beta / total);
431
+ const accscalar_t stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha * alpha))
432
+ * (1 + 1 / (12 * beta) + 1 / (288 * beta * beta))
433
+ / (1 + 1 / (12 * total) + 1 / (288 * total * total));
434
+ const accscalar_t term1_num = 2 * (alpha * alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta * beta);
435
+ const accscalar_t axbx = alpha * (x - 1) + beta * x;
436
+ const accscalar_t term1_den = compat_sqrt(2 * alpha / beta) * compat_pow(total, static_cast<accscalar_t>(1.5f)) * axbx * axbx;
437
+ const accscalar_t term1 = term1_num / term1_den;
438
+ const accscalar_t term2 = 0.5f * compat_log(alpha / (total * x));
439
+ const accscalar_t term3_num = compat_sqrt(8 * alpha * beta / total);
440
+ const accscalar_t term3_den = beta * x + alpha * (x - 1);
441
+ const accscalar_t term3 = term3_num / term3_den;
442
+ const accscalar_t term4_base = beta * compat_log(beta / (total * (1 - x))) +
443
+ alpha * compat_log(alpha / (total * x));
444
+ const accscalar_t term4 = compat_pow(term4_base, static_cast<accscalar_t>(-1.5f));
445
+ const accscalar_t term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4));
446
+ return static_cast<scalar_t>(stirling * prefactor * term1234);
447
+ }
448
+
449
+ // Computes a scaled reparameterized gradient
450
+ // -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x)
451
+ // for random number x drawn from a Beta distribution Beta(alpha,beta).
452
+ // This function inputs total=alpha+beta to make it easy to implement
453
+ // Dirichlet reparameterized gradients in terms of Betas.
454
+ template<typename scalar_t, typename accscalar_t>
455
+ C10_HOST_DEVICE static inline scalar_t dirichlet_grad_one(scalar_t x, scalar_t alpha, scalar_t total) {
456
+ accscalar_t x_ = static_cast<accscalar_t>(x);
457
+ accscalar_t alpha_ = static_cast<accscalar_t>(alpha);
458
+ accscalar_t total_ = static_cast<accscalar_t>(total);
459
+
460
+ const scalar_t beta = total - alpha;
461
+ const accscalar_t beta_ = total_ - alpha_;
462
+ const scalar_t boundary = total * x * (1 - x);
463
+
464
+ // Use an asymptotic approximation for x close to 0.
465
+ if (x <= 0.5f && boundary < 2.5f) {
466
+ return _beta_grad_alpha_small<scalar_t, accscalar_t>(x, alpha, beta);
467
+ }
468
+
469
+ // Use an asymptotic approximation for x close to 1.
470
+ if (x >= 0.5f && boundary < 0.75f) {
471
+ return -_beta_grad_beta_small<scalar_t, accscalar_t>(1 - x, beta, alpha);
472
+ }
473
+
474
+ // Use an asymptotic approximation when alpha and (total - alpha) are both large.
475
+ if (alpha > 6 && beta > 6) {
476
+ return _beta_grad_alpha_mid<scalar_t, accscalar_t>(x_, alpha_, beta_);
477
+ }
478
+
479
+ // Use a rational correction to an analytic approximation.
480
+ static const accscalar_t c[2][3][3][4] = {
481
+ {{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863},
482
+ {0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033},
483
+ {-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}},
484
+ {{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814},
485
+ {-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057},
486
+ {0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}},
487
+ {{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565},
488
+ {0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181},
489
+ {0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}},
490
+ {{{1, -0.02924021934, -0.04438342661, 0.007285809825},
491
+ {0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521},
492
+ {-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}},
493
+ {{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273},
494
+ {0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956},
495
+ {-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}},
496
+ {{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05},
497
+ {0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05},
498
+ {-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}},
499
+ };
500
+ const accscalar_t u = compat_log(x_);
501
+ const accscalar_t a = compat_log(alpha_) - u;
502
+ const accscalar_t b = compat_log(total_) - a;
503
+ const accscalar_t pow_u[3] = {1, u, u * u};
504
+ const accscalar_t pow_a[3] = {1, a, a * a};
505
+ accscalar_t p = 0.0;
506
+ accscalar_t q = 0.0;
507
+ for (int i = 0; i < 3; ++i) {
508
+ for (int j = 0; j < 3; ++j) {
509
+ const accscalar_t ua = pow_u[i] * pow_a[j];
510
+ p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3])));
511
+ q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3])));
512
+ }
513
+ }
514
+ const accscalar_t approx = x_ * (digamma_one<scalar_t, accscalar_t>(total_) - digamma_one<scalar_t, accscalar_t>(alpha_)) / beta_;
515
+ return static_cast<scalar_t>(p / q * approx);
516
+ }
517
+
518
+ } // namespace
venv/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/Config.h>
3
+ #include <cstdint>
4
+
5
+ #ifdef USE_FBGEMM
6
+ #include <fbgemm/FbgemmEmbedding.h>
7
+ #endif
8
+
9
+ namespace at::native {
10
+
11
+ void check_arguments(
12
+ const Tensor& weight,
13
+ const Tensor& indices,
14
+ const Tensor& offsets,
15
+ const int64_t mode,
16
+ const c10::optional<Tensor>& per_sample_weights,
17
+ bool include_last_offset);
18
+
19
+ void make_bag_size_out(
20
+ Tensor& bag_size_out,
21
+ const Tensor& offsets,
22
+ const Tensor& indices,
23
+ const int64_t mode,
24
+ const bool include_last_offset,
25
+ const bool requires_grad);
26
+
27
+ void make_max_indices_out(
28
+ Tensor& max_indices_out,
29
+ const Tensor& weight,
30
+ const Tensor& indices,
31
+ const Tensor& offsets,
32
+ const Tensor& bag_size,
33
+ const int64_t mode,
34
+ bool include_last_offset);
35
+
36
+ void make_offset2bag_out(
37
+ Tensor& offset2bag,
38
+ Tensor& output,
39
+ const Tensor& weight,
40
+ const Tensor& indices,
41
+ const Tensor& offsets,
42
+ const int64_t mode,
43
+ const c10::optional<Tensor>& per_sample_weights,
44
+ const int64_t padding_idx = -1);
45
+
46
+ #ifdef USE_FBGEMM
47
+
48
+ template<bool has_weight, typename TIndex, typename TData>
49
+ struct _CallbackAndBlockSize {
50
+ using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature<TData, TIndex, TIndex, TData>::Type;
51
+
52
+ int64_t blockSize = -1;
53
+ TCallback callback = nullptr;
54
+
55
+ static TCallback generateCallback(int64_t block_size) {
56
+ return fbgemm::GenerateEmbeddingSpMDM<TData, TIndex, TIndex, TData>(
57
+ block_size,
58
+ has_weight,
59
+ /* normalize_by_lengths */false,
60
+ /* prefetch */16,
61
+ /* is_weight_positional */false,
62
+ /* use_offsets */true);
63
+ }
64
+
65
+ _CallbackAndBlockSize() = default;
66
+
67
+ explicit _CallbackAndBlockSize(c10::optional<int64_t> maybe_block_size)
68
+ : blockSize(maybe_block_size.value_or(-1))
69
+ , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr)
70
+ {}
71
+ };
72
+
73
+ template<typename... StorageMixins>
74
+ struct _EmbeddingBagKernelCacheImpl : private StorageMixins... {
75
+
76
+ _EmbeddingBagKernelCacheImpl() = default;
77
+ // use each of the mixins to store corresponding kernel and block size
78
+ explicit _EmbeddingBagKernelCacheImpl(c10::optional<int64_t> maybe_block_size)
79
+ : StorageMixins(maybe_block_size)...
80
+ {}
81
+
82
+ // this method is thread safe (call sites may call from different threads)
83
+ template<bool has_weight, typename TIndex, typename TData>
84
+ typename _CallbackAndBlockSize<has_weight, TIndex, TData>::TCallback
85
+ getCallback(int64_t block_size) const {
86
+ // if the cache doesn't store the kernel for the incoming block size
87
+ // (so it is different from the one stored in corresponding mixin)
88
+ // regenerate the kernel (not writing it into the cache so we avoid locks)
89
+ if (block_size != _CallbackAndBlockSize<has_weight, TIndex, TData>::blockSize) {
90
+ return _CallbackAndBlockSize<has_weight, TIndex, TData>::generateCallback(block_size);
91
+ }
92
+ // else retrieve the cached kernel from the corresponding mixin
93
+ return _CallbackAndBlockSize<has_weight, TIndex, TData>::callback;
94
+ }
95
+ };
96
+
97
+ // instantiate the cache with the list of storage mixins
98
+ // for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file
99
+ using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl<
100
+ _CallbackAndBlockSize<true, int32_t, float>,
101
+ _CallbackAndBlockSize<false, int32_t, float>,
102
+ _CallbackAndBlockSize<true, int64_t, float>,
103
+ _CallbackAndBlockSize<false, int64_t, float>,
104
+ _CallbackAndBlockSize<true, int32_t, unsigned short>,
105
+ _CallbackAndBlockSize<false, int32_t, unsigned short>,
106
+ _CallbackAndBlockSize<true, int64_t, unsigned short>,
107
+ _CallbackAndBlockSize<false, int64_t, unsigned short>>;
108
+ #else
109
+ struct _EmbeddingBagKernelCache {
110
+ explicit _EmbeddingBagKernelCache(c10::optional<int64_t> /* maybe_block_size */) {}
111
+ };
112
+ #endif
113
+
114
+ void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag,
115
+ Tensor& bag_size, Tensor* max_indices,
116
+ const Tensor &weight, const Tensor &indices,
117
+ const Tensor &offsets, const int64_t mode = 0,
118
+ const c10::optional<Tensor>& per_sample_weights = c10::nullopt,
119
+ bool include_last_offset = false,
120
+ int64_t padding_idx = -1,
121
+ _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
122
+
123
+ void _embedding_bag_cpu_out(
124
+ at::Tensor& output,
125
+ at::Tensor& offset2bag,
126
+ at::Tensor& bag_size,
127
+ at::Tensor* p_max_indices,
128
+ const at::Tensor& weight,
129
+ const at::Tensor& indices,
130
+ const at::Tensor& offsets,
131
+ const bool scale_grad_by_freq,
132
+ const int64_t mode,
133
+ const bool sparse,
134
+ const c10::optional<at::Tensor>& per_sample_weights,
135
+ const bool include_last_offset,
136
+ const c10::optional<int64_t>& padding_idx,
137
+ _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
138
+
139
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Device.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/ScalarType.h>
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/native/utils/ParamsHash.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ #ifndef AT_PER_OPERATOR_HEADERS
12
+ #include <ATen/NativeFunctions.h>
13
+ #else
14
+ #include <ATen/ops/result_type_native.h>
15
+ #endif
16
+
17
+ #include <unordered_map>
18
+ #include <vector>
19
+
20
+ namespace at::native {
21
+ namespace {
22
+ // Check if tensor list has either a boolean tensor or a integer tensor
23
+ inline bool has_integral_tensor(TensorList tensors, const bool includeBool) {
24
+ return std::any_of(
25
+ tensors.begin(), tensors.end(), [&includeBool](const auto& t) {
26
+ return at::isIntegralType(t.scalar_type(), includeBool);
27
+ });
28
+ }
29
+ // check if tensor list has bool tensors
30
+ inline bool has_bool_tensor(TensorList tensors) {
31
+ return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool {
32
+ return t.scalar_type() == ScalarType::Bool;
33
+ });
34
+ }
35
+
36
+ // Check foreach API restrictions
37
+ // - Tensor lists must be non-empty.
38
+ // - All TensorLists and ScalarLists must have the same number of elements.
39
+ // - Corresponding tensors must have the same size.
40
+ inline void check_foreach_api_restrictions(TensorList tensors) {
41
+ TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor.");
42
+ }
43
+
44
+ inline void check_foreach_api_restrictions(
45
+ TensorList tensors,
46
+ ArrayRef<Scalar> scalars) {
47
+ check_foreach_api_restrictions(tensors);
48
+ TORCH_CHECK(
49
+ tensors.size() == scalars.size(),
50
+ "Tensor list must have same number of elements as scalar list.");
51
+ }
52
+
53
+ inline void check_foreach_api_restrictions(
54
+ TensorList tensors1,
55
+ TensorList tensors2) {
56
+ TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
57
+ TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
58
+ TORCH_CHECK(
59
+ tensors1.size() == tensors2.size(),
60
+ "Tensor lists must have the same number of tensors, got ",
61
+ tensors1.size(),
62
+ " and ",
63
+ tensors2.size());
64
+ }
65
+
66
+ inline void check_foreach_api_restrictions(
67
+ TensorList tensors1,
68
+ TensorList tensors2,
69
+ TensorList tensors3) {
70
+ TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
71
+ TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
72
+ TORCH_CHECK(!tensors3.empty(), "Tensor list must have at least one tensor.");
73
+ TORCH_CHECK(
74
+ tensors1.size() == tensors2.size(),
75
+ "Tensor lists must have the same number of tensors, got ",
76
+ tensors1.size(),
77
+ " and ",
78
+ tensors2.size());
79
+ TORCH_CHECK(
80
+ tensors1.size() == tensors3.size(),
81
+ "Tensor lists must have the same number of tensors, got ",
82
+ tensors1.size(),
83
+ " and ",
84
+ tensors3.size());
85
+ }
86
+
87
+ inline void check_foreach_api_restrictions(
88
+ TensorList tensors1,
89
+ TensorList tensors2,
90
+ TensorList tensors3,
91
+ ArrayRef<Scalar> scalars) {
92
+ check_foreach_api_restrictions(tensors1, tensors2, tensors3);
93
+ TORCH_CHECK(
94
+ tensors1.size() == scalars.size(),
95
+ "Tensor list must have same number of elements as scalar list, got ",
96
+ tensors1.size(),
97
+ " and ",
98
+ scalars.size());
99
+ }
100
+
101
+ // Helper function called in check_fast_path_restrictions to check whether all
102
+ // corresponding tensors (aligning in index across the tensorLists) share the
103
+ // same device and dtype.
104
+ inline bool _check_tensors_share_device_and_dtype(
105
+ ArrayRef<TensorList> tensorLists) {
106
+ const auto expected_dtype = tensorLists[0][0].dtype();
107
+ const auto expected_device = tensorLists[0][0].device();
108
+
109
+ auto is_tensor_okay = [&](const Tensor& tensor) {
110
+ return tensor.dtype() == expected_dtype &&
111
+ tensor.device() == expected_device && tensor.layout() == at::kStrided &&
112
+ tensor.is_non_overlapping_and_dense();
113
+ };
114
+
115
+ for (const auto& tensorList : tensorLists) {
116
+ for (const auto& tensor : tensorList) {
117
+ if (!is_tensor_okay(tensor)) {
118
+ return false;
119
+ }
120
+ }
121
+ }
122
+
123
+ return true;
124
+ }
125
+
126
+ // Helper function called in check_fast_path_restrictions to check if
127
+ // corresponding tensors in tensor lists have the same sizes and strides.
128
+ inline bool _check_tensors_share_sizes_and_strides(
129
+ ArrayRef<TensorList> tensorLists) {
130
+ for (const auto i : c10::irange(1, tensorLists.size())) {
131
+ for (const auto j : c10::irange(tensorLists[0].size())) {
132
+ if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() ||
133
+ tensorLists[0][j].strides() != tensorLists[i][j].strides()) {
134
+ return false;
135
+ }
136
+ }
137
+ }
138
+
139
+ return true;
140
+ }
141
+
142
+ // Helper function called in check_fast_path_restrictions to check whether
143
+ // all tensors type promote properly with the scalars in scalarList. This
144
+ // function assumes that _check_tensors_share_device_and_dtype has already been
145
+ // called so that all corresponding tensors in tensorLists have the same dtype.
146
+ // Then, it is sufficient to check the type promotion with just one tensorList.
147
+ inline bool _check_tensors_do_type_promotion_with_scalars(
148
+ TensorList tensorList,
149
+ ArrayRef<Scalar> scalarList = {},
150
+ bool does_op_promote_integer_inputs_to_float = false) {
151
+ for (const auto i : c10::irange(tensorList.size())) {
152
+ // For division, integer inputs will result in float.
153
+ if (does_op_promote_integer_inputs_to_float) {
154
+ if (at::isIntegralType(
155
+ tensorList[i].scalar_type(), /*includeBool*/ true)) {
156
+ return false;
157
+ }
158
+ }
159
+ if (!scalarList.empty()) {
160
+ const auto& scalar =
161
+ scalarList.size() == 1 ? scalarList[0] : scalarList[i];
162
+ const auto& tensor = tensorList[i];
163
+ // note(mkozuki): This check might be responsible for
164
+ // `_foreach_add(bool_tensors, bool_tensors)` being pushed to slow path.
165
+ if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) {
166
+ return false;
167
+ }
168
+ }
169
+ }
170
+
171
+ return true;
172
+ }
173
+
174
+ // To go via 'fast' path, several conditions must be satisfied
175
+ // - All tensors in all lists must have the same dtype.
176
+ // - All tensors must be on the same device
177
+ // - All tensors must have strided layout
178
+ // - All tensors must be non-overlapping and dense
179
+ // - Resulting tensor must have the same dtype as the input one
180
+
181
+ // Please, make sure to call check_foreach_api_restrictions before calling this
182
+ // method. There is a set of preconditions that have to be satisfied.
183
+ inline bool check_fast_path_restrictions(
184
+ ArrayRef<TensorList> tensorLists,
185
+ ArrayRef<Scalar> scalarList = {},
186
+ bool does_op_promote_integer_inputs_to_float = false) {
187
+ return _check_tensors_share_device_and_dtype(tensorLists) &&
188
+ _check_tensors_share_sizes_and_strides(tensorLists) &&
189
+ _check_tensors_do_type_promotion_with_scalars(
190
+ tensorLists[0],
191
+ scalarList,
192
+ does_op_promote_integer_inputs_to_float);
193
+ }
194
+
195
+ inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
196
+ const Tensor& scalarList_,
197
+ int64_t expect_length) {
198
+ std::vector<c10::Scalar> scalarList;
199
+ TORCH_CHECK(
200
+ scalarList_.device() == c10::kCPU,
201
+ "Expected scalars to be on CPU, got ",
202
+ scalarList_.device(),
203
+ " instead.");
204
+ TORCH_CHECK(
205
+ scalarList_.is_contiguous(), "Expected scalars to be contiguous.");
206
+ TORCH_CHECK(
207
+ scalarList_.dim() == 1,
208
+ "Expected packed scalar Tensor to be of dimension 1. Got ",
209
+ scalarList_.dim(),
210
+ " instead.");
211
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
212
+ kComplexHalf,
213
+ kHalf,
214
+ kBool,
215
+ kBFloat16,
216
+ scalarList_.scalar_type(),
217
+ "convert_tensor_to_scalar_list",
218
+ [&]() {
219
+ const scalar_t* scalar_data = scalarList_.data_ptr<scalar_t>();
220
+ TORCH_CHECK(
221
+ (expect_length == scalarList_.size(0)),
222
+ "Expected length of scalars to match input of length ",
223
+ expect_length,
224
+ " but got ",
225
+ scalarList_.size(0),
226
+ " instead.");
227
+ for (int64_t i = 0; i < scalarList_.size(0); i++) {
228
+ scalarList.emplace_back(scalar_data[i]);
229
+ }
230
+ });
231
+ return scalarList;
232
+ }
233
+
234
+ inline bool can_use_fast_route(
235
+ ArrayRef<TensorList> tensorLists,
236
+ ArrayRef<Scalar> scalarList = {},
237
+ bool does_op_promote_integer_inputs_to_float = false) {
238
+ return check_fast_path_restrictions(
239
+ tensorLists, scalarList, does_op_promote_integer_inputs_to_float);
240
+ }
241
+
242
+ inline bool can_use_fast_route(
243
+ TensorList tensors1,
244
+ TensorList tensors2,
245
+ bool does_op_promote_integer_inputs_to_float = false) {
246
+ return can_use_fast_route(
247
+ {tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float);
248
+ }
249
+
250
+ using DeviceDtypeKey = std::pair<at::Device, at::ScalarType>;
251
+ using IndicesT = std::vector<size_t>;
252
+ using nested_optional_tensorvec_t =
253
+ std::vector<std::vector<c10::optional<at::Tensor>>>;
254
+ using TensorsAndIndicesT = std::pair<nested_optional_tensorvec_t, IndicesT>;
255
+ using FlatMap = std::unordered_map<
256
+ DeviceDtypeKey,
257
+ TensorsAndIndicesT,
258
+ ParamsHash<DeviceDtypeKey>>;
259
+
260
+ inline FlatMap _group_tensors_by_first_tensors_device_and_dtype(
261
+ const nested_optional_tensorvec_t& nested_tensorlist,
262
+ const bool with_indices) {
263
+ FlatMap grouped_tensors_with_indices;
264
+
265
+ TORCH_CHECK(!nested_tensorlist.empty());
266
+ TORCH_CHECK(!nested_tensorlist[0].empty());
267
+ const auto num_lists = nested_tensorlist.size();
268
+ const auto num_tensors = nested_tensorlist[0].size();
269
+
270
+ TORCH_CHECK(std::all_of(
271
+ nested_tensorlist.cbegin(),
272
+ nested_tensorlist.cend(),
273
+ [&](const auto& tensorlist) -> bool {
274
+ // note(crcrpar): Allow empty tensorlists following
275
+ // ref:
276
+ // https://github.com/pytorch/pytorch/blob/85885301fd3c6adb8b9dc3cf7afadf6945566684/torch/utils/_foreach_utils.py#L21-L24
277
+ return tensorlist.size() == num_tensors || tensorlist.size() == 0;
278
+ }));
279
+
280
+ for (const auto& tensor_index : c10::irange(num_tensors)) {
281
+ const auto key = [&]() -> DeviceDtypeKey {
282
+ const auto t = nested_tensorlist[0][tensor_index];
283
+ TORCH_CHECK(
284
+ t.has_value(),
285
+ "Tensors of the first list of nested Tensor lists are supposed to be defined but ",
286
+ "the ",
287
+ tensor_index,
288
+ "-th Tensor is not.");
289
+ return {t->device(), t->scalar_type()};
290
+ }();
291
+ TORCH_CHECK(
292
+ std::all_of(
293
+ nested_tensorlist.cbegin(),
294
+ nested_tensorlist.cend(),
295
+ [&](const auto& tensorlist) -> bool {
296
+ if (tensorlist.size() == 0) {
297
+ return true;
298
+ }
299
+ const auto& tensor = tensorlist[tensor_index];
300
+ // note(crcrpar): Currently the scope of this function is
301
+ // optimizers so there could be `state_steps` and other scalars
302
+ // whose elements are float tensors no matter what the parameter's
303
+ // dtype is.
304
+ if (!tensor.has_value()) {
305
+ return true;
306
+ } else {
307
+ const auto s = tensor->scalar_type();
308
+ const auto d = tensor->device();
309
+ // Note: `step` or `state_step` is float32 by default.
310
+ if (key.first == d) {
311
+ return key.second == s || s == at::ScalarType::Float ||
312
+ s == at::ScalarType::Double;
313
+ } else if (d.is_cpu()) {
314
+ // note(crcrpar): There are some test cases (e.g.
315
+ // TestOptim::test_adam) where state_steps are on CPU and the
316
+ // others are on CUDA. Currently a state_step Tensor has the
317
+ // dtype of float.
318
+ return s == at::ScalarType::Float ||
319
+ s == at::ScalarType::Double;
320
+ } else {
321
+ return false;
322
+ }
323
+ }
324
+ }),
325
+ "Tensors of the same index must be on the same device and the same dtype except `step` tensors that can be CPU and float32/64 notwithstanding");
326
+ if (!grouped_tensors_with_indices.count(key)) {
327
+ grouped_tensors_with_indices.insert(
328
+ {key,
329
+ TensorsAndIndicesT{
330
+ [&]() -> nested_optional_tensorvec_t {
331
+ nested_optional_tensorvec_t nested_tensorvec;
332
+ nested_tensorvec.reserve(num_lists);
333
+ for (const auto& i : c10::irange(num_lists)) {
334
+ std::vector<c10::optional<at::Tensor>> tensors;
335
+ if (!nested_tensorlist[i].empty()) {
336
+ // NB: num_tensors is the max possible length for any of
337
+ // the inner lists of tensor references. Reserving the max
338
+ // trades memory for perf. This should not have significant
339
+ // impact.
340
+ tensors.reserve(num_tensors);
341
+ }
342
+ nested_tensorvec.emplace_back(tensors);
343
+ }
344
+ return nested_tensorvec;
345
+ }(),
346
+ [&]() -> IndicesT {
347
+ if (!with_indices) {
348
+ return {};
349
+ } else {
350
+ IndicesT indices;
351
+ indices.reserve(num_tensors);
352
+ return indices;
353
+ }
354
+ }()}});
355
+ }
356
+ for (const auto& list_index : c10::irange(num_lists)) {
357
+ if (!nested_tensorlist[list_index].empty()) {
358
+ grouped_tensors_with_indices[key].first[list_index].emplace_back(
359
+ nested_tensorlist[list_index][tensor_index]);
360
+ }
361
+ }
362
+ if (with_indices) {
363
+ grouped_tensors_with_indices[key].second.emplace_back(tensor_index);
364
+ }
365
+ }
366
+
367
+ return grouped_tensors_with_indices;
368
+ }
369
+
370
+ } // namespace
371
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/FractionalMaxPooling.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <c10/util/irange.h>
5
+
6
+ namespace at::native {
7
+
8
+ template<typename scalar_t>
9
+ static inline std::vector<int> generate_intervals(
10
+ scalar_t sample,
11
+ int64_t inputSize,
12
+ int64_t outputSize,
13
+ int64_t poolSize) {
14
+ std::vector<int> sequence(outputSize);
15
+ if (outputSize > 1) {
16
+ scalar_t alpha = static_cast<scalar_t>(inputSize - poolSize) /
17
+ static_cast<scalar_t>(outputSize - 1);
18
+
19
+ for (const auto i : c10::irange(outputSize - 1)) {
20
+ sequence[i] =
21
+ static_cast<int>((i + sample) * alpha) - static_cast<int>(sample * alpha);
22
+ }
23
+ }
24
+ if (outputSize > 0) {
25
+ sequence[outputSize - 1] = inputSize - poolSize;
26
+ }
27
+ return sequence;
28
+ }
29
+
30
+ template <int64_t ndim>
31
+ static inline void fractional_max_pool_check_shape(
32
+ const Tensor& input,
33
+ const Tensor& randomSamples) {
34
+
35
+ TORCH_CHECK(
36
+ input.scalar_type() == randomSamples.scalar_type(),
37
+ "Expect _random_samples to have the same dtype as input");
38
+
39
+ int64_t ndimension = randomSamples.ndimension();
40
+ TORCH_CHECK(
41
+ ndimension == 3,
42
+ "Expect _random_samples to have 3 dimensions, got ", ndimension);
43
+
44
+ int64_t N = randomSamples.size(0);
45
+ int64_t C = randomSamples.size(1);
46
+ int64_t D = randomSamples.size(2);
47
+
48
+ int64_t input_batch, input_channel;
49
+ if (ndim == 2) {
50
+ // fractional_max_pool2d
51
+ if (input.ndimension() == 3) {
52
+ input_batch = 1;
53
+ input_channel = input.size(0);
54
+ } else {
55
+ input_batch = input.size(0);
56
+ input_channel = input.size(1);
57
+ }
58
+ } else {
59
+ // factional_max_pool3d
60
+ if (input.ndimension() == 4) {
61
+ input_batch = 1;
62
+ input_channel = input.size(0);
63
+ } else {
64
+ input_batch = input.size(0);
65
+ input_channel = input.size(1);
66
+ }
67
+ }
68
+
69
+ TORCH_CHECK(
70
+ N >= input_batch,
71
+ "Expect _random_samples.size(0) no less then input batch size.");
72
+ TORCH_CHECK(
73
+ C == input_channel,
74
+ "Expect _random_samples.size(1) equals to input channel size.");
75
+ TORCH_CHECK(
76
+ D == ndim,
77
+ "Expect _random_samples.size(2) equals to ", ndim, "; got ", D, ".");
78
+ }
79
+
80
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/FunctionOfAMatrixUtils.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <cstdint>
5
+
6
+ namespace at {
7
+ struct TensorIterator;
8
+
9
+ namespace native {
10
+
11
+ using _compute_linear_combination_fn = void(*)(
12
+ TensorIterator& iter,
13
+ int64_t in_stride,
14
+ int64_t coeff_stride,
15
+ int64_t num_summations
16
+ );
17
+
18
+ DECLARE_DISPATCH(_compute_linear_combination_fn, _compute_linear_combination_stub);
19
+
20
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <cmath>
5
+ #include <cstdint>
6
+ #include <utility>
7
+
8
+ #include <ATen/native/GridSamplerUtils.h>
9
+
10
+ namespace at::native {
11
+
12
+ using detail::GridSamplerInterpolation;
13
+ using detail::GridSamplerPadding;
14
+
15
+ // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
16
+ // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
17
+ // if align_corners: -1 and +1 get sent to the centers of the corner pixels
18
+ // -1 --> 0
19
+ // +1 --> (size - 1)
20
+ // scale_factor = (size - 1) / 2
21
+ // if not align_corners: -1 and +1 get sent to the image edges
22
+ // -1 --> -0.5
23
+ // +1 --> (size - 1) + 0.5 == size - 0.5
24
+ // scale_factor = size / 2
25
+ template <typename scalar_t>
26
+ static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size,
27
+ bool align_corners) {
28
+ if (align_corners) {
29
+ // unnormalize coord from [-1, 1] to [0, size - 1]
30
+ return ((coord + 1) / 2) * (size - 1);
31
+ } else {
32
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
33
+ return ((coord + 1) * size - 1) / 2;
34
+ }
35
+ }
36
+
37
+ // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
38
+ // except that it also returns the `d output / d input` via pointer argument
39
+ // `grad_in`.
40
+ // This is useful in the backward pass of grid_sampler.
41
+ template <typename scalar_t>
42
+ static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size,
43
+ bool align_corners, scalar_t *grad_in) {
44
+ if (align_corners) {
45
+ // unnormalize coord from [-1, 1] to [0, size - 1]
46
+ *grad_in = static_cast<scalar_t>(size - 1) / 2;
47
+ return ((coord + 1) / 2) * (size - 1);
48
+ } else {
49
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
50
+ *grad_in = static_cast<scalar_t>(size) / 2;
51
+ return ((coord + 1) * size - 1) / 2;
52
+ }
53
+ }
54
+
55
+ // Clips coordinates to between 0 and clip_limit - 1
56
+ template<typename scalar_t>
57
+ static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) {
58
+ return std::min(static_cast<scalar_t>(clip_limit - 1), std::max(in, static_cast<scalar_t>(0)));
59
+ }
60
+
61
+ // clip_coordinates_set_grad works similarly to clip_coordinates except that
62
+ // it also returns the `d output / d input` via pointer argument `grad_in`.
63
+ // This is useful in the backward pass of grid_sampler.
64
+ template<typename scalar_t>
65
+ static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit,
66
+ scalar_t *grad_in) {
67
+ // Note that it is important for the gradient calculation that borders
68
+ // are considered out of bounds.
69
+ if (in <= static_cast<scalar_t>(0)) {
70
+ *grad_in = static_cast<scalar_t>(0);
71
+ return static_cast<scalar_t>(0);
72
+ } else {
73
+ scalar_t max = static_cast<scalar_t>(clip_limit - 1);
74
+ if (in >= max) {
75
+ *grad_in = static_cast<scalar_t>(0);
76
+ return max;
77
+ } else {
78
+ *grad_in = static_cast<scalar_t>(1);
79
+ return in;
80
+ }
81
+ }
82
+ }
83
+
84
+ // Reflects coordinates until they fall between low and high (inclusive).
85
+ // The bounds are passed as twice their value so that half-integer values
86
+ // can be represented as ints.
87
+ template<typename scalar_t>
88
+ static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low,
89
+ int64_t twice_high) {
90
+ if (twice_low == twice_high) {
91
+ return static_cast<scalar_t>(0);
92
+ }
93
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
94
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
95
+ in = std::fabs(in - min);
96
+ // `fmod` returns same sign as `in`, which is positive after the `fabs` above.
97
+ scalar_t extra = std::fmod(in, span);
98
+ int flips = static_cast<int>(std::floor(in / span));
99
+ if (flips % 2 == 0) {
100
+ return extra + min;
101
+ } else {
102
+ return span - extra + min;
103
+ }
104
+ }
105
+
106
+ // reflect_coordinates_set_grad works similarly to reflect_coordinates except
107
+ // that it also returns the `d output / d input` via pointer argument
108
+ // `grad_in`.
109
+ // This is useful in the backward pass of grid_sampler.
110
+ template<typename scalar_t>
111
+ static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low,
112
+ int64_t twice_high, scalar_t *grad_in) {
113
+ if (twice_low == twice_high) {
114
+ *grad_in = static_cast<scalar_t>(0);
115
+ return static_cast<scalar_t>(0);
116
+ }
117
+ int grad_in_mult_;
118
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
119
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
120
+ in = in - min;
121
+ if (in < static_cast<scalar_t>(0)) {
122
+ grad_in_mult_ = -1;
123
+ in = -in;
124
+ } else {
125
+ grad_in_mult_ = 1;
126
+ }
127
+ // `fmod` returns same sign as `in`, which is positive after the `if` above.
128
+ scalar_t extra = std::fmod(in, span);
129
+ int flips = static_cast<int>(std::floor(in / span));
130
+ if (flips % 2 == 0) {
131
+ *grad_in = static_cast<scalar_t>(grad_in_mult_);
132
+ return extra + min;
133
+ } else {
134
+ *grad_in = static_cast<scalar_t>(-grad_in_mult_);
135
+ return span - extra + min;
136
+ }
137
+ }
138
+
139
+ // Mapping the out-of-boundary points back into boundary
140
+ // This would only affect padding_mode=border or reflection
141
+ template<typename scalar_t>
142
+ static inline scalar_t compute_coordinates(scalar_t coord, int64_t size,
143
+ GridSamplerPadding padding_mode,
144
+ bool align_corners) {
145
+ if (padding_mode == GridSamplerPadding::Border) {
146
+ // clip coordinates to image borders
147
+ coord = clip_coordinates(coord, size);
148
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
149
+ // reflect coordinates by image borders
150
+ if (align_corners) {
151
+ coord = reflect_coordinates(coord, 0, 2*(size - 1));
152
+ } else {
153
+ coord = reflect_coordinates(coord, -1, 2*size - 1);
154
+ }
155
+ // clip coordinates to image borders
156
+ coord = clip_coordinates(coord, size);
157
+ }
158
+ return coord;
159
+ }
160
+
161
+ // Computes the pixel source index value for a grid coordinate
162
+ template <typename scalar_t>
163
+ static inline scalar_t grid_sampler_compute_source_index(
164
+ scalar_t coord,
165
+ int64_t size,
166
+ GridSamplerPadding padding_mode,
167
+ bool align_corners) {
168
+ coord = grid_sampler_unnormalize(coord, size, align_corners);
169
+ coord = compute_coordinates(coord, size, padding_mode, align_corners);
170
+ return coord;
171
+ }
172
+
173
+ // grid_sampler_compute_source_index_set_grad works similarly to
174
+ // grid_sampler_compute_source_index except that it also returns the
175
+ // `d output / d input` via pointer argument `grad_in`.
176
+ // This is useful in the backward pass of grid_sampler.
177
+ template <typename scalar_t>
178
+ static inline scalar_t grid_sampler_compute_source_index_set_grad(
179
+ scalar_t coord,
180
+ int64_t size,
181
+ GridSamplerPadding padding_mode,
182
+ bool align_corners,
183
+ scalar_t *grad_in) {
184
+ scalar_t grad_clip, grad_refl;
185
+ coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
186
+ if (padding_mode == GridSamplerPadding::Border) {
187
+ // clip coordinates to image borders
188
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
189
+ *grad_in = (*grad_in) * grad_clip;
190
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
191
+ // reflect coordinates by image borders
192
+ if (align_corners) {
193
+ coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
194
+ } else {
195
+ coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
196
+ }
197
+ // clip coordinates to image borders
198
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
199
+ *grad_in = (*grad_in) * grad_refl * grad_clip;
200
+ }
201
+ return coord;
202
+ }
203
+
204
+ static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
205
+ return h >= 0 && h < H && w >= 0 && w < W;
206
+ }
207
+
208
+ static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
209
+ return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
210
+ }
211
+
212
+ template<typename scalar_t>
213
+ static inline scalar_t get_value_bounded(
214
+ scalar_t* data,
215
+ scalar_t x,
216
+ scalar_t y,
217
+ int64_t W,
218
+ int64_t H,
219
+ int64_t sW,
220
+ int64_t sH,
221
+ GridSamplerPadding padding_mode,
222
+ bool align_corners) {
223
+
224
+ x = compute_coordinates(x, W, padding_mode, align_corners);
225
+ y = compute_coordinates(y, H, padding_mode, align_corners);
226
+
227
+ int64_t ix = static_cast<int64_t>(x);
228
+ int64_t iy = static_cast<int64_t>(y);
229
+
230
+ if (within_bounds_2d(iy, ix, H, W)) {
231
+ return data[iy * sH + ix * sW];
232
+ }
233
+ return static_cast<scalar_t>(0);
234
+ }
235
+
236
+ template<typename scalar_t>
237
+ static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
238
+ int64_t sH, int64_t sW, int64_t H, int64_t W,
239
+ scalar_t delta) {
240
+ if (within_bounds_2d(h, w, H, W)) {
241
+ data[h * sH + w * sW] += delta;
242
+ }
243
+ }
244
+
245
+ template<typename scalar_t>
246
+ static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
247
+ int64_t sD, int64_t sH, int64_t sW,
248
+ int64_t D, int64_t H, int64_t W,
249
+ scalar_t delta) {
250
+ if (within_bounds_3d(d, h, w, D, H, W)) {
251
+ data[d * sD + h * sH + w * sW] += delta;
252
+ }
253
+ }
254
+
255
+ template<typename scalar_t>
256
+ static inline void add_value_bounded(
257
+ scalar_t* data,
258
+ scalar_t x,
259
+ scalar_t y,
260
+ int64_t W,
261
+ int64_t H,
262
+ int64_t sW,
263
+ int64_t sH,
264
+ scalar_t delta,
265
+ GridSamplerPadding padding_mode,
266
+ bool align_corners) {
267
+
268
+ x = compute_coordinates(x, W, padding_mode, align_corners);
269
+ y = compute_coordinates(y, H, padding_mode, align_corners);
270
+
271
+ int64_t ix = static_cast<int64_t>(x);
272
+ int64_t iy = static_cast<int64_t>(y);
273
+
274
+ safe_add_2d(data, iy, ix, sH, sW, H, W, delta);
275
+ }
276
+
277
+ // Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
278
+ template<typename scalar_t>
279
+ static inline void get_cubic_coefficients_grad(
280
+ scalar_t coeffs[4],
281
+ scalar_t t) {
282
+
283
+ // Must be the same as forward calculation in
284
+ // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients
285
+ scalar_t A = -0.75;
286
+
287
+ scalar_t x;
288
+ x = -1 - t; // 1 < x = |-1 - tx| < 2
289
+ coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
290
+ x = -t; // x = |0 - tx| <= 1
291
+ coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
292
+ x = 1 - t; // x = |1 - tx| <= 1
293
+ coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
294
+ x = 2 - t; // 1 < x = |2 - tx| < 2
295
+ coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
296
+ }
297
+
298
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using histogramdd_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&);
9
+ using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&, bool);
10
+ using histogram_select_outer_bin_edges_fn = void(*)(const Tensor& input, const int64_t N, std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges);
11
+
12
+ DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub);
13
+ DECLARE_DISPATCH(histogramdd_linear_fn, histogramdd_linear_stub);
14
+ DECLARE_DISPATCH(histogram_select_outer_bin_edges_fn, histogram_select_outer_bin_edges_stub);
15
+
16
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/IndexKernel.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/native/DispatchStub.h>
3
+ #include <c10/util/ArrayRef.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+ class TensorBase;
8
+ struct TensorIterator;
9
+ struct TensorIteratorBase;
10
+ }
11
+
12
+ namespace c10 {
13
+ class Scalar;
14
+ }
15
+
16
+ namespace at::native {
17
+
18
+ using index_fn = void(*)(TensorIteratorBase &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides);
19
+ using index_fill_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source);
20
+ using index_copy_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride);
21
+ using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides, bool accumulate);
22
+ using put_fn = void(*)(TensorIterator & iter, const TensorBase& self, const bool accumulate);
23
+ using take_fn = void(*)(TensorIterator & iter, const TensorBase& input);
24
+ using flip_fn = void(*)(TensorIterator &, const bool);
25
+ using masked_fill_fn = void(*)(TensorIterator &, const Scalar& scalar);
26
+ using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride);
27
+ using masked_scatter_fn = void(*)(TensorIterator &, const TensorBase &);
28
+
29
+ DECLARE_DISPATCH(index_fn, index_stub);
30
+ DECLARE_DISPATCH(index_fill_fn, index_fill_stub);
31
+ DECLARE_DISPATCH(index_copy_fn, index_copy_stub);
32
+ DECLARE_DISPATCH(index_put_fn, index_put_stub);
33
+ DECLARE_DISPATCH(put_fn, put_stub);
34
+ DECLARE_DISPATCH(take_fn, take_stub);
35
+ DECLARE_DISPATCH(flip_fn, flip_stub);
36
+ DECLARE_DISPATCH(masked_fill_fn, masked_fill_stub);
37
+ DECLARE_DISPATCH(masked_select_fn, masked_select_serial_stub);
38
+ DECLARE_DISPATCH(masked_select_fn, masked_select_stub);
39
+ DECLARE_DISPATCH(masked_scatter_fn, masked_scatter_stub);
40
+
41
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/IndexingUtils.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/ExpandUtils.h>
3
+ #include <ATen/native/CanUse32BitIndexMath.h>
4
+ #include <ATen/native/TensorIterator.h>
5
+ #include <ATen/core/IListRef.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at::native {
9
+
10
+ [[noreturn]]
11
+ static void invalid_mask(const Tensor & self, int64_t idx, const Tensor & mask, int64_t maskIdx) {
12
+ TORCH_CHECK_INDEX(false, "The shape of the mask ", mask.sizes(), " at index ", maskIdx,
13
+ " does not match the shape of the indexed tensor ", self.sizes(), " at index ", idx);
14
+ }
15
+
16
+
17
+ static C10_UNUSED std::vector<Tensor> expandTensors(const Tensor & self, IOptTensorListRef indices) {
18
+ // If indices come in as ByteTensor or BoolTensor (masks), expand them into the equivalent indexing by LongTensors
19
+ std::vector<Tensor> result;
20
+ for (const auto& index_opt : indices) {
21
+ if (!index_opt.has_value()) {
22
+ result.emplace_back();
23
+ } else {
24
+ const auto& index = *index_opt;
25
+ if (index.scalar_type() == kByte || index.scalar_type() == kBool) {
26
+ if (index.scalar_type() == kByte) {
27
+ TORCH_WARN("indexing with dtype torch.uint8 is now deprecated," \
28
+ " please use a dtype torch.bool instead.");
29
+ }
30
+ // The sizes of the ByteTensor mask or bool tensor must match the sizes of the
31
+ // corresponding dimensions in self
32
+ for (const auto j : c10::irange(index.dim())) {
33
+ int64_t srcIdx = static_cast<int64_t>(result.size() + j);
34
+ if (index.size(j) != self.size(srcIdx)) {
35
+ invalid_mask(self, srcIdx, index, j);
36
+ }
37
+ }
38
+ // Replace with nonzeros
39
+ auto nonzero = index.nonzero();
40
+ for (const auto j : c10::irange(index.dim())) {
41
+ result.emplace_back(nonzero.select(1, j));
42
+ }
43
+ } else {
44
+ result.emplace_back(index);
45
+ }
46
+ }
47
+ }
48
+ return result;
49
+ }
50
+
51
+ static C10_UNUSED void checkIndexTensorTypes(IOptTensorListRef indices, bool allow_int=false) {
52
+ for (const auto& tensor : indices) {
53
+ if (tensor.has_value() && tensor->defined()) {
54
+ auto scalarType = tensor->scalar_type();
55
+ if (allow_int) {
56
+ if (scalarType != kLong && scalarType != kByte && scalarType != kBool && scalarType != kInt) {
57
+ TORCH_CHECK_INDEX(false, "tensors used as indices must be long, int, byte or bool tensors");
58
+ }
59
+ } else {
60
+ if (scalarType != kLong && scalarType != kByte && scalarType != kBool) {
61
+ TORCH_CHECK_INDEX(false, "tensors used as indices must be long, byte or bool tensors");
62
+ }
63
+ }
64
+ }
65
+ }
66
+ }
67
+
68
+ inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<Tensor> list) {
69
+ torch::List<c10::optional<Tensor>> result;
70
+ result.reserve(list.size());
71
+ for (const Tensor& a : list) {
72
+ result.push_back(a);
73
+ }
74
+ return result;
75
+ }
76
+
77
+ inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<IValue> list) {
78
+ torch::List<c10::optional<Tensor>> result;
79
+ result.reserve(list.size());
80
+ for (const IValue& a : list) {
81
+ result.push_back(a.isTensor() ? c10::optional<Tensor>(a.toTensor()) : c10::optional<Tensor>());
82
+ }
83
+ return result;
84
+ }
85
+
86
+ static C10_UNUSED bool hasContiguousSubspace(TensorList tl) {
87
+ // true if all the non-null tensors are adjacent
88
+ auto isDefined = [](const Tensor & tensor){ return tensor.defined(); };
89
+ auto isNull = [](const Tensor & tensor){ return !tensor.defined(); };
90
+ auto start = std::find_if(tl.begin(), tl.end(), isDefined);
91
+ auto stop = std::find_if(tl.rbegin(), tl.rend(), isDefined);
92
+ auto it = std::find_if(start, stop.base(), isNull);
93
+ return it == stop.base();
94
+ }
95
+
96
+
97
+ // Transposes the tensor and indices together so that all the non-null indices
98
+ // index the first k dimensions of the tensor. Returns the transposed tensor
99
+ // and the reordered indices. For example:
100
+ // transposeToFront(tensor, {nullptr, a, nullptr, b})
101
+ // returns
102
+ // tensor.permute([1, 3, 0, 2]), {a, b, nullptr, nullptr}
103
+ static C10_UNUSED std::tuple<Tensor, std::vector<Tensor>>
104
+ transposeToFront(const Tensor& self, TensorList indices) {
105
+ std::vector<int64_t> dims;
106
+ std::vector<Tensor> transposedIndices;
107
+ dims.reserve(self.dim());
108
+ for (const auto i : c10::irange(self.dim())) {
109
+ if (indices[i].defined()) {
110
+ dims.push_back(i);
111
+ transposedIndices.emplace_back(indices[i]);
112
+ }
113
+ }
114
+ for (const auto i : c10::irange(self.dim())) {
115
+ if (!indices[i].defined()) {
116
+ dims.push_back(i);
117
+ transposedIndices.emplace_back();
118
+ }
119
+ }
120
+ return std::make_tuple(self.permute(dims), std::move(transposedIndices));
121
+ }
122
+
123
+ inline std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
124
+ transposeToFrontAndInvPerm(const Tensor& self, TensorList indices) {
125
+ std::vector<int64_t> dims;
126
+ std::vector<int64_t> invPerm;
127
+ std::vector<Tensor> transposedIndices;
128
+ dims.reserve(self.dim());
129
+ invPerm.resize(self.dim());
130
+ for (const auto i : c10::irange(self.dim())) {
131
+ if (indices[i].defined()) {
132
+ dims.push_back(i);
133
+ transposedIndices.emplace_back(indices[i]);
134
+ }
135
+ }
136
+ for (const auto i : c10::irange(self.dim())) {
137
+ if (!indices[i].defined()) {
138
+ dims.push_back(i);
139
+ transposedIndices.emplace_back();
140
+ }
141
+ }
142
+ for (const auto i : c10::irange(self.dim())) {
143
+ invPerm[dims[i]] = i;
144
+ }
145
+ return std::make_tuple(self.permute(dims), std::move(transposedIndices), std::move(invPerm));
146
+ }
147
+
148
+ struct AdvancedIndex {
149
+ AdvancedIndex(const Tensor& src, TensorList indices);
150
+
151
+ Tensor src;
152
+ std::vector<Tensor> indices;
153
+ DimVector indexed_sizes;
154
+ DimVector indexed_strides;
155
+ int64_t dims_before;
156
+ int64_t dims_after;
157
+ };
158
+
159
+
160
+ } //namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Lerp.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/OpMathType.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <c10/core/Scalar.h>
7
+
8
+ namespace at::native {
9
+
10
+ template <typename scalar_t>
11
+ C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(scalar_t weight) {
12
+ return std::abs(weight) < scalar_t(0.5);
13
+ }
14
+ template <typename scalar_t>
15
+ C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(c10::complex<scalar_t> weight) {
16
+ // Avoid the sqrt in abs(weight)
17
+ return (weight.real() * weight.real() + weight.imag() * weight.imag()) < scalar_t(0.25);
18
+ }
19
+
20
+ template <typename scalar_t, typename weight_t>
21
+ C10_HOST_DEVICE C10_ALWAYS_INLINE scalar_t lerp(scalar_t self_, scalar_t end_, weight_t weight_) {
22
+ using opmath_t = at::opmath_type<scalar_t>;
23
+ using opmath_weight_t = at::opmath_type<weight_t>;
24
+
25
+ opmath_t self = self_;
26
+ opmath_t end = end_;
27
+ opmath_weight_t weight = weight_;
28
+
29
+ // Conditional for better numeric. This has been discussed in
30
+ // https://github.com/pytorch/pytorch/pull/18871
31
+ return is_lerp_weight_small(weight)
32
+ ? self + weight * (end - self)
33
+ : end - (end - self) * (opmath_t(1) - weight);
34
+ }
35
+
36
+ using lerp_fn_scalar = void (*)(
37
+ at::TensorIteratorBase& iter,
38
+ const Scalar& weight);
39
+
40
+ using lerp_fn_tensor = void (*)(
41
+ at::TensorIteratorBase& iter);
42
+
43
+ DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight);
44
+ DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight);
45
+
46
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebra.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/Optional.h>
5
+
6
+ namespace c10 {
7
+ class Scalar;
8
+ }
9
+
10
+ namespace at {
11
+ struct TensorIterator;
12
+ }
13
+
14
+ namespace at::native {
15
+
16
+ using addr_fn = void (*)(TensorIterator &, const Scalar& beta, const Scalar& alpha);
17
+ DECLARE_DISPATCH(addr_fn, addr_stub);
18
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/LinearAlgebraUtils.h ADDED
@@ -0,0 +1,623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/util/irange.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/strides.h>
7
+ #include <ATen/core/Tensor.h>
8
+ #include <ATen/ExpandUtils.h>
9
+ #include <ATen/TensorUtils.h>
10
+ #include <ATen/native/TensorIterator.h>
11
+ #include <ATen/native/TransposeType.h>
12
+ #include <limits>
13
+ #include <type_traits>
14
+ #include <sstream>
15
+ #include <cstring>
16
+ #include <cctype>
17
+
18
+ #ifndef AT_PER_OPERATOR_HEADERS
19
+ #include <ATen/Functions.h>
20
+ #else
21
+ #include <ATen/ops/arange.h>
22
+ #include <ATen/ops/empty.h>
23
+ #include <ATen/ops/empty_like.h>
24
+ #include <ATen/ops/empty_strided.h>
25
+ #include <ATen/ops/zeros.h>
26
+ #endif
27
+
28
+ namespace at::native {
29
+
30
+ static inline c10::MaybeOwned<Tensor> expect_resolved_conj(const Tensor& tensor) {
31
+ if (tensor.is_conj()) {
32
+ return c10::MaybeOwned<Tensor>::owned(tensor.resolve_conj());
33
+ } else {
34
+ return c10::MaybeOwned<Tensor>::borrowed(tensor);
35
+ }
36
+ }
37
+
38
+ static inline DimVector batched_matrix_contiguous_strides(
39
+ const IntArrayRef sizes,
40
+ const bool f_contig = false) {
41
+ // f_contig chooses between the strides of a batch of Fortran (F-contiguous)
42
+ // and C-contiguous matrices
43
+ auto strides = c10::contiguous_strides(sizes);
44
+ auto dim = strides.size();
45
+
46
+ if (f_contig && dim >= 2) {
47
+ // Fix the strides of the last two dimensions, so that we return
48
+ // C-contiguous batches of F-contiguous matrices.
49
+ strides[dim - 1] = std::max(sizes[dim - 2], static_cast<int64_t>(1));
50
+ strides[dim - 2] = 1;
51
+ }
52
+ return strides;
53
+ }
54
+
55
+ /*
56
+ * Clones a Tensor so that the following conditions hold:
57
+ * If we think of a Tensor of having size (B, M, N), where B is any number
58
+ * of batch dimensions, then:
59
+ * - Each (M, N) matrix is in column major form
60
+ * - Let Tensor P have size (B, M, N) and Q have size (B, M', N').
61
+ * Then when laid out in memory, the M by N matrix starting at
62
+ * P.data_ptr()[B * M * N] is of the same corresponding batch as the M' by N'
63
+ * matrix starting at Q.data_ptr()[B * M' * N'].
64
+ */
65
+ static inline Tensor cloneBatchedColumnMajor(const Tensor& src) {
66
+ // If src is already in batched column major format, then
67
+ // this will be efficient (no reordering of the data will occur)
68
+ // because the first transpose will make the tensor contiguous,
69
+ // and cloning a contiguous tensor is fast.
70
+ auto result = src.mT().clone(at::MemoryFormat::Contiguous);
71
+ result.transpose_(-2, -1);
72
+ return result;
73
+ }
74
+
75
+ /*
76
+ * contig chooses between C-contig (true) and F-contig (false)
77
+ */
78
+ static inline c10::MaybeOwned<Tensor> borrow_else_clone(const bool cond, const Tensor& borrow, const Tensor& clone, const bool contig) {
79
+ return cond ? c10::MaybeOwned<Tensor>::borrowed(borrow)
80
+ : c10::MaybeOwned<Tensor>::owned(contig ? clone.clone(MemoryFormat::Contiguous)
81
+ : cloneBatchedColumnMajor(clone));
82
+ }
83
+
84
+ /*
85
+ * This method is designed to be a faster alternative to
86
+ * `cloneBatchedColumnMajor` with some additional features,
87
+ * namely:
88
+ * 1. It uses `copy` instead of `clone` which could be much faster.
89
+ * 2. `nrows` parameter used to create inputs with the number of rows larger
90
+ * than the original input, which is required for some LAPACK/MAGMA methods.
91
+ * 3. `desired_batch_size` is used to create copies with the batch size
92
+ * which is either the original batch size of the input, or its larger
93
+ * broadcasted shape.
94
+ */
95
+ static inline Tensor copyBatchedColumnMajor(const Tensor& src, int64_t nrows = -1,
96
+ at::OptionalIntArrayRef desired_batch_sizes = c10::nullopt) {
97
+ nrows = (nrows == -1) ? src.size(-2) : nrows;
98
+ auto copy_sizes = desired_batch_sizes.has_value()
99
+ ? desired_batch_sizes.value().vec()
100
+ : IntArrayRef(src.sizes().data(), src.dim() - 2).vec();
101
+ copy_sizes.insert(copy_sizes.end(), {nrows, src.size(-1)});
102
+ const auto copy_strides = batched_matrix_contiguous_strides(copy_sizes, /*f-contig*/true);
103
+ auto copy = at::empty_strided(copy_sizes, copy_strides, src.options());
104
+ copy.narrow(-2, 0, src.size(-2)).copy_(src);
105
+ return copy;
106
+ }
107
+
108
+ /*
109
+ * Given batches of matrices with arbitrary batch dim,
110
+ * computes the number of batches.
111
+ */
112
+ static inline int64_t batchCount(const Tensor& batched_matrices) {
113
+ int64_t result = 1;
114
+ for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
115
+ result *= batched_matrices.size(i);
116
+ }
117
+ return result;
118
+ }
119
+
120
+ // Computes the number of elements of a matrix in a batched matrix tensor
121
+ static inline int64_t matrixStride(const Tensor& batched_matrices) {
122
+ return batched_matrices.size(-1) * batched_matrices.size(-2);
123
+ }
124
+
125
+ // Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig, eig)
126
+ static inline void checkIsMatrix(const Tensor& A, const char* const f_name, const char* const arg_name = "A") {
127
+ TORCH_CHECK(A.dim() >= 2, f_name, ": The input tensor ", arg_name, " must have at least 2 dimensions.");
128
+ }
129
+ static inline void squareCheckInputs(const Tensor& self, const char* const f_name, const char* const arg_name = "A") {
130
+ checkIsMatrix(self, f_name, arg_name);
131
+ TORCH_CHECK(self.sym_size(-1) == self.sym_size(-2),
132
+ f_name,
133
+ ": ", arg_name, " must be batches of square matrices, "
134
+ "but they are ", self.sym_size(-2), " by ", self.sym_size(-1), " matrices");
135
+ }
136
+
137
+ static inline void checkInputsSolver(const Tensor& A,
138
+ const Tensor& B,
139
+ const bool left,
140
+ const char* const f_name) {
141
+ squareCheckInputs(A, f_name, "A");
142
+ checkIsMatrix(B, f_name, "B");
143
+ TORCH_CHECK(left ? A.size(-2) == B.size(-2) : A.size(-1) == B.size(-1),
144
+ f_name, ": Incompatible shapes of A and B for the equation ",
145
+ left ? "AX = B" : "XA = B",
146
+ " (", A.size(-2), "x", A.size(-1), " and ", B.size(-2), "x", B.size(-1), ")");
147
+ }
148
+
149
+ static inline bool is_row_or_column_contiguous(const Tensor& t) {
150
+ // This could be made more general, similar to how it's checked in matmul, which would allow to
151
+ // ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky.
152
+ // We choose to be conservative for simplicity
153
+ return t.is_contiguous() || t.transpose(-2, -1).is_contiguous();
154
+ }
155
+
156
+ static inline TransposeType to_transpose_type(const bool contig, const bool conj) {
157
+ if (conj) {
158
+ if (contig) { TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); }
159
+ else { return TransposeType::ConjTranspose; }
160
+ } else {
161
+ if (contig) { return TransposeType::NoTranspose; }
162
+ else { return TransposeType::Transpose; }
163
+ }
164
+ }
165
+
166
+
167
+ // This function is designed to be used with linear algebra methods that minimize
168
+ // L(ax - b) = 0, where L is generally the identity map (`solve`, for example)
169
+ // or the L2 norm (`lstsq`).
170
+ // It is expected that `a` and `b` are contiguous tensors of column-major matrices
171
+ // (so that a.view({-1, a.size(-2), a.size(-1)}) succeeds, same for `b`),
172
+ // with the following additional properties:
173
+ //
174
+ // 1. a.dim() == b.dim()
175
+ // 2. a.shape[:-2] broadcasts over b.shape[:-2]
176
+ // 3. a.size(i) <= b.size(i) for i=0,..., a.dim() - 3 (only for batch dimensions)
177
+ //
178
+ // MAGMA/LAPACK modify tensor `a` in-place, and the main goal of this method
179
+ // is to be memory efficient, which means that if there exists an index i such that
180
+ // a.shape[i] < b.shape[i], 0 <= i <= a.dim() - 3,
181
+ // then instead of materializing copies of `a` in the broadcasted shape, we keep
182
+ // a buffer copy of `a` along with flags that check whether specific batch dimension
183
+ // indices for `a` were already accessed. If they were, we copy the data from the buffer
184
+ // into `a`. The number of copies does not exceed
185
+ // prod(max(a.shape[:-2], b.shape[:-2]) - a.shape[:-2] + 1)
186
+ // and this value is attained by tensors with non-empty batch dimensions.
187
+ //
188
+ // func_t `f` is a callable that is being supplied with
189
+ // scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx.
190
+ // a_working_ptr and b_working_ptr can directly be passed to LAPACK/MAGMA routines,
191
+ // and a_linear_batch_idx is an index in the 3d representation which corresponds to
192
+ // the memory a_working_ptr points to, in other words:
193
+ // a_working_ptr == a.view({-1, a.size(-2), a.size(-1)}.select(0, a_linear_batch_idx).data_ptr<scalar_t>();
194
+ // a_linear_batch_idx is useful to store metadata related to `a`, such as, for example,
195
+ // its rank or singular values (see linalg_lstsq).
196
+ template<typename scalar_t, typename func_t>
197
+ void batch_iterator_with_broadcasting(const Tensor& a, const Tensor& b, const func_t& f) {
198
+ IntArrayRef a_batch_sizes(a.sizes().data(), a.dim() - 2);
199
+ IntArrayRef b_batch_sizes(b.sizes().data(), b.dim() - 2);
200
+
201
+ auto a_linear_batch_idx = at::arange(batchCount(a)).view(a_batch_sizes);
202
+ auto b_linear_batch_idx = at::arange(batchCount(b)).view(b_batch_sizes);
203
+
204
+ TensorIterator iter = TensorIteratorConfig()
205
+ .set_check_mem_overlap(false)
206
+ .check_all_same_dtype(false)
207
+ .resize_outputs(false)
208
+ .add_output(b_linear_batch_idx)
209
+ .add_input(a_linear_batch_idx)
210
+ .build();
211
+
212
+ auto m = a.size(-2);
213
+ auto n = a.size(-1);
214
+ auto a_3d = a.view({batchCount(a), m, n});
215
+ auto b_3d = b.view({batchCount(b), b.size(-2), b.size(-1)});
216
+
217
+ auto a_broadcasts_over_b = (a_batch_sizes != b_batch_sizes);
218
+ Tensor a_buffer, a_was_accessed, a_buffer_3d;
219
+ std::function<void(int64_t)> check_if_copy_needed_for_a
220
+ = [](int64_t /*a_curr_linear_batch_idx*/){};
221
+ if (a_broadcasts_over_b) {
222
+ a_buffer = at::empty_strided(a.sizes(), a.strides(), a.options())
223
+ .copy_(a);
224
+ a_was_accessed = at::zeros(batchCount(a), at::kBool);
225
+ a_buffer_3d = a_buffer.view({batchCount(a), m, n});
226
+ check_if_copy_needed_for_a = [&](int64_t a_curr_linear_batch_idx) {
227
+ auto* a_was_accessed_flag = a_was_accessed
228
+ .select(0, a_curr_linear_batch_idx)
229
+ .data_ptr<bool>();
230
+ if (!(*a_was_accessed_flag)) {
231
+ *a_was_accessed_flag = true;
232
+ }
233
+ else {
234
+ a_3d.select(0, a_curr_linear_batch_idx)
235
+ .copy_(a_buffer_3d.select(0, a_curr_linear_batch_idx));
236
+ }
237
+ };
238
+ }
239
+
240
+ auto loop = [&](char** data, const int64_t* strides, int64_t nelems) {
241
+ auto* b_batch_idx_ptr = data[0];
242
+ auto* a_batch_idx_ptr = data[1];
243
+
244
+ for (const auto elem C10_UNUSED : c10::irange(nelems)) {
245
+ auto b_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(b_batch_idx_ptr);
246
+ auto a_curr_linear_batch_idx = *reinterpret_cast<int64_t*>(a_batch_idx_ptr);
247
+
248
+ check_if_copy_needed_for_a(a_curr_linear_batch_idx);
249
+
250
+ auto* a_working_ptr = a_3d.select(0, a_curr_linear_batch_idx)
251
+ .data_ptr<scalar_t>();
252
+ auto* b_working_ptr = b_3d.select(0, b_curr_linear_batch_idx)
253
+ .data_ptr<scalar_t>();
254
+ f(a_working_ptr, b_working_ptr, a_curr_linear_batch_idx);
255
+
256
+ b_batch_idx_ptr += strides[0];
257
+ a_batch_idx_ptr += strides[1];
258
+ }
259
+ };
260
+ iter.serial_for_each(loop, {0, batchCount(b)});
261
+ }
262
+
263
+ // Returns the epsilon value for floating types except half
264
+ static inline double _get_epsilon(const ScalarType& sc_type) {
265
+ switch (sc_type) {
266
+ case at::ScalarType::Float:
267
+ return static_cast<double>(std::numeric_limits<float>::epsilon());
268
+ case at::ScalarType::Double:
269
+ return std::numeric_limits<double>::epsilon();
270
+ default:
271
+ AT_ERROR("This function doesn't handle types other than float and double");
272
+ }
273
+ }
274
+
275
+ // Validates input shapes and devices
276
+ // for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve)
277
+ static inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) {
278
+ TORCH_CHECK(self.device() == A.device(),
279
+ "Expected b and A to be on the same device, but found b on ",
280
+ self.device(), " and A on ", A.device(), " instead.");
281
+
282
+ TORCH_CHECK(self.scalar_type() == A.scalar_type(),
283
+ "Expected b and A to have the same dtype, but found b of type ",
284
+ self.scalar_type(), " and A of type ", A.scalar_type(), " instead.");
285
+
286
+ TORCH_CHECK(A.size(-1) == A.size(-2),
287
+ "A must be batches of square matrices, "
288
+ "but they are ", A.size(-2), " by ", A.size(-1), " matrices");
289
+
290
+ TORCH_CHECK(A.size(-1) == self.size(-2),
291
+ "Incompatible matrix sizes for ", name, ": each A "
292
+ "matrix is ", A.size(-1), " by ", A.size(-1),
293
+ " but each b matrix is ", self.size(-2), " by ", self.size(-1));
294
+ }
295
+
296
+ static inline void checkFloatingOrComplex(const Tensor& t, const char* const f_name, const bool allow_low_precision_dtypes=true) {
297
+ auto dtype = t.scalar_type();
298
+ TORCH_CHECK((at::isFloatingType(dtype) || at::isComplexType(dtype)),
299
+ f_name, ": Expected a floating point or complex tensor as input. Got ", dtype);
300
+ if (!allow_low_precision_dtypes) {
301
+ TORCH_CHECK(dtype == kFloat || dtype == kDouble || dtype == kComplexFloat || dtype == kComplexDouble,
302
+ f_name, ": Low precision dtypes not supported. Got ", dtype);
303
+ }
304
+ }
305
+
306
+
307
+ // Checks if all the Tensors in a TensorList are of the same dimensions
308
+ static inline void checkAllSameDim(TensorList tensors, int64_t dim) {
309
+ for (auto &t : tensors) {
310
+ TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead.");
311
+ }
312
+ }
313
+
314
+ static inline std::tuple<std::vector<int64_t>, std::vector<int64_t>> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2) {
315
+ // broadcast the batch dimensions of arg1 and arg2.
316
+ IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
317
+ IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
318
+ std::vector<int64_t> expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes);
319
+
320
+ std::vector<int64_t> arg1_expand_size({expand_batch_portion});
321
+ arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) });
322
+
323
+ std::vector<int64_t> arg2_expand_size({expand_batch_portion});
324
+ arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) });
325
+ return std::make_tuple(std::move(arg1_expand_size), std::move(arg2_expand_size));
326
+ }
327
+
328
+ static inline std::tuple<Tensor,Tensor> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) {
329
+ // If there's no name we assume we don't want to check the errors
330
+ if (name != nullptr) {
331
+ linearSolveCheckInputs(arg1, arg2, name);
332
+ }
333
+
334
+ auto [arg1_expand_size, arg2_expand_size] = at::native::_linalg_broadcast_batch_dims(arg1, arg2);
335
+
336
+ auto arg1_broadcasted = arg1_expand_size == arg1.sizes() ? arg1 : arg1.expand(arg1_expand_size);
337
+ auto arg2_broadcasted = arg2_expand_size == arg2.sizes() ? arg2 : arg2.expand(arg2_expand_size);
338
+ return std::make_tuple(arg1_broadcasted, arg2_broadcasted);
339
+ }
340
+
341
+ static inline std::vector<int64_t> broadcast_batch_size(const Tensor& t1, const Tensor& t2, int64_t n_batch_dims) {
342
+ IntArrayRef t1_batch_sizes(t1.sizes().data(), n_batch_dims);
343
+ IntArrayRef t2_batch_sizes(t2.sizes().data(), n_batch_dims);
344
+ auto broadcasted_batch_sizes = infer_size(t1_batch_sizes, t2_batch_sizes);
345
+ return broadcasted_batch_sizes;
346
+ }
347
+
348
+ // Return a permutation with the given axes moved to the end.
349
+ static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) {
350
+ const std::vector<int64_t> a = axes.vec();
351
+ const int64_t ndim = self.ndimension();
352
+ std::vector<int64_t> perm;
353
+
354
+ for (const auto i : c10::irange(ndim)) {
355
+ auto it = std::find(a.begin(), a.end(), i);
356
+ if (it == a.end()) {
357
+ perm.push_back(i);
358
+ }
359
+ }
360
+ for (auto i : a) {
361
+ perm.push_back(i);
362
+ }
363
+
364
+ TORCH_CHECK((int64_t)perm.size() == ndim,
365
+ "duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim);
366
+
367
+ return self.permute(perm);
368
+ }
369
+
370
+ // parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced)
371
+ static inline std::tuple<bool, bool> _parse_qr_mode(c10::string_view mode) {
372
+ bool compute_q;
373
+ bool reduced;
374
+ if (mode == "reduced") {
375
+ compute_q = true;
376
+ reduced = true;
377
+ } else if (mode == "complete") {
378
+ compute_q = true;
379
+ reduced = false;
380
+ } else if (mode == "r") {
381
+ compute_q = false;
382
+ reduced = true; // this is actually irrelevant in this mode
383
+ } else {
384
+ TORCH_CHECK(false, "qr received unrecognized mode '", mode,
385
+ "' but expected one of 'reduced' (default), 'r', or 'complete'");
386
+ }
387
+ return std::make_tuple(compute_q, reduced);
388
+ }
389
+
390
+ // Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition
391
+ static inline std::tuple<DimVector, DimVector, int64_t> _compute_geometry_for_Q(
392
+ const Tensor& input,
393
+ bool reduced) {
394
+ int64_t m = input.size(-2), n = input.size(-1);
395
+ int64_t n_columns_q;
396
+
397
+ // We need to compute the required size of Q based on the `reduced` option
398
+ DimVector q_sizes(input.sizes());
399
+ if (!reduced && m > n) {
400
+ q_sizes[input.dim() - 1] = m;
401
+ n_columns_q = m;
402
+ } else {
403
+ q_sizes[input.dim() - 1] = n;
404
+ n_columns_q = std::min(m, n);
405
+ }
406
+ auto q_strides = batched_matrix_contiguous_strides(q_sizes, /*f-contig*/true);
407
+ return std::make_tuple(q_sizes, q_strides, n_columns_q);
408
+ }
409
+
410
+ static inline bool svd_uses_cusolver(const Tensor& A) {
411
+ // if cusolver is available, it is used unconditionally
412
+ return A.is_cuda()
413
+ && at::globalContext().hasCuSOLVER()
414
+ && at::globalContext().linalgPreferredBackend() != at::LinalgBackend::Magma;
415
+ }
416
+
417
+
418
+ // Function used instead of .to so that the original strides are retained
419
+ // .to doesn't retain strides and make the output tensor contiguous
420
+ static inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) {
421
+ auto strided_to = at::empty_strided(original_tensor.sizes(),
422
+ original_tensor.strides(),
423
+ options);
424
+ strided_to.copy_(original_tensor);
425
+ return strided_to;
426
+ }
427
+
428
+ // Creates a dimension permutation array that can be given to `at::permute()`, which will shift
429
+ // the two specified dimensions to the end of a tensor, without changing the order of
430
+ // the other dimensions. `dim1` will be placed at the very end, and `dim0` will be
431
+ // placed just to the left of it.
432
+ //
433
+ // For instance, given a 4-D tensor, dimensions 1 and 3 can be shifted to the end by
434
+ // calling `create_dim_backshift_permutation(1, 3, 4)`. The resulting vector will
435
+ // be `vec(0, 2, 1, 3)`.
436
+ static inline std::vector<int64_t> create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) {
437
+ TORCH_CHECK(
438
+ (dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0),
439
+ "duplicate or invalid dimensions");
440
+ std::vector<int64_t> permutation(ndim);
441
+ int64_t cur_permuted_dim = 0;
442
+ for (const auto dim_ind : c10::irange(ndim)) {
443
+ if ((dim_ind != dim0) && (dim_ind != dim1)) {
444
+ permutation[cur_permuted_dim++] = dim_ind;
445
+ }
446
+ }
447
+ permutation[cur_permuted_dim++] = dim0;
448
+ permutation[cur_permuted_dim] = dim1;
449
+ return permutation;
450
+ }
451
+
452
+ // Creates a dimension permutation array that can be given to `at::permute()`, which
453
+ // will reverse a given permutation.
454
+ // The reverse permutation array is created by swapping the indices and their
455
+ // associated values from the given permutation array.
456
+ static inline std::vector<int64_t> create_reverse_permutation(std::vector<int64_t> permutation) {
457
+ int64_t ndim = permutation.size();
458
+ std::vector<int64_t> reverse_permutation(ndim);
459
+ for (const auto dim_ind : c10::irange(ndim)) {
460
+ reverse_permutation[permutation[dim_ind]] = dim_ind;
461
+ }
462
+ return reverse_permutation;
463
+ }
464
+
465
+ // Compute R-work array size for MAGMA/LAPACK cgesdd/zgesdd
466
+ // See https://github.com/Reference-LAPACK/lapack/blob/122506cd8b6ce050a200920c3d4c0b153b150fd8/SRC/cgesdd.f#L186
467
+ static inline int64_t computeLRWorkDim(const char jobz, int64_t m, int64_t n) {
468
+ auto mn = std::min(m, n);
469
+ auto mx = std::max(m, n);
470
+ if (jobz == 'N') {
471
+ #ifdef __APPLE__
472
+ // According to `vecLib.framework/Headers/clapack.h` Accelerate.framework is based on LAPACK 3.2.1
473
+ return 7 * mn;
474
+ #else
475
+ // These setting is valid for on LAPACK 3.6+
476
+ return 5 * mn;
477
+ #endif
478
+ }
479
+ if (mx > 10 * mn) {
480
+ return 5 * mn * mn + 5 * mn;
481
+ }
482
+ return std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn);
483
+ }
484
+
485
+ // This function checks whether the uplo argument input is valid
486
+ // Allowed strings are "u", "U", "l", "L"
487
+ static inline void checkUplo(const c10::string_view uplo) {
488
+ // To use std::toupper safely with plain chars (or signed chars), the argument should first be converted to unsigned char
489
+ char uplo_uppercase = static_cast<char>(std::toupper(static_cast<unsigned char>(uplo[0])));
490
+ TORCH_CHECK(uplo.size() == 1 && (uplo_uppercase == 'U' || uplo_uppercase == 'L'),
491
+ "Expected UPLO argument to be 'L' or 'U', but got ", uplo);
492
+ }
493
+
494
+ static inline void checkSameDevice(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
495
+ TORCH_CHECK(
496
+ result.device() == input.device(),
497
+ fn_name,
498
+ ": Expected ", result_name, " and input tensors to be on the same device, but got ",
499
+ result_name, " on ", result.device(), " and input on ", input.device());
500
+ }
501
+
502
+ // Check the dtype of result and input tensors (for _out variants).
503
+ // Most linear algebra functions have the same dtype for input and output
504
+ // (either floating or complex type input), so we can check whether input's dtype can be casted to result's dtype.
505
+ // According to https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
506
+ // c10::canCast is used for checking the "safe copy" dtype requirements.
507
+ static inline void checkLinalgCompatibleDtype(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") {
508
+ bool can_cast = c10::canCast(input.scalar_type(), result.scalar_type());
509
+ TORCH_CHECK(
510
+ can_cast,
511
+ fn_name,
512
+ ": Expected ", result_name, " to be safely castable from ", input.scalar_type(), " dtype, but got ",
513
+ result_name, " with dtype ", result.scalar_type());
514
+ }
515
+
516
+ // Alternatively, we can check whether the specific expected output type (result_type) can be safely casted to out tensor dtype (out_type)
517
+ static inline void checkLinalgCompatibleDtype(const std::string& fn_name, ScalarType out_type, ScalarType result_type, const std::string& out_name = "result") {
518
+ bool can_cast = c10::canCast(result_type, out_type);
519
+ TORCH_CHECK(
520
+ can_cast,
521
+ fn_name,
522
+ ": Expected ", out_name, " to be safely castable from ", result_type, " dtype, but got ",
523
+ out_name, " with dtype ", out_type);
524
+ }
525
+
526
+ static inline void checkNotComplexTolerance(const Tensor& tol, const c10::string_view f_name, const c10::string_view tol_name) {
527
+ TORCH_CHECK(!at::isComplexType(tol.scalar_type()),
528
+ f_name, ": ", tol_name, " tensor of complex type is not supported. Got ", tol.scalar_type());
529
+ }
530
+
531
+ /*
532
+ Two types of 'other' tensors are supported when solving
533
+ a system of linear equations matmul(input, x) = other:
534
+ * 1-dimensional (1D) tensor or batch of 1D tensors (vector case)
535
+ * 2-dimensional (2D) tensor or batch of 2D tensors (matrix case).
536
+ The original torch.solve supported only the matrix case, while NumPy works for both cases.
537
+ For the batched input we need to be able to distinguish them.
538
+ Let input.shape = (batch_dimensions, m, n), then 'other' is of vector type if other.shape == (batch_dimensions, m).
539
+ This rule is compatible with NumPy, see https://github.com/numpy/numpy/blob/v1.20.0/numpy/linalg/linalg.py#L384-L389
540
+ */
541
+ static inline bool linalg_solve_is_vector_rhs(const Tensor& input, const Tensor& other) {
542
+ auto expected_batched_rhs_shape = SymIntArrayRef(input.sym_sizes().data(), input.dim() - 1); // input.shape[:-1]
543
+ bool vector_case = other.dim() == 1 || (input.dim() - 1 == other.dim() && other.sym_sizes().equals(expected_batched_rhs_shape));
544
+ return vector_case;
545
+ }
546
+
547
+ /*
548
+ Computes linear indices for a tensor with original_shape to access its elements like it was a materialized broadcast tensor.
549
+ */
550
+ static inline Tensor get_linear_indices(int64_t numel, IntArrayRef original_shape, IntArrayRef broadcast_shape) {
551
+ TensorOptions options = at::TensorOptions().dtype(at::kLong).device(at::kCPU);
552
+ return at::arange(numel, options).view(original_shape).broadcast_to(broadcast_shape).contiguous();
553
+ }
554
+
555
+ class BroadcastLinearIndices {
556
+ private:
557
+ Tensor linear_indices_;
558
+ bool is_broadcasting_;
559
+
560
+ public:
561
+ BroadcastLinearIndices(
562
+ int64_t numel,
563
+ IntArrayRef original_shape,
564
+ IntArrayRef broadcast_shape) : is_broadcasting_(!original_shape.equals(broadcast_shape)) {
565
+ // The assumption is that the broadcast_shape is a materialized broadcast
566
+ // shape of the original_shape. We need to compute the linear indices
567
+ // compatible with the original_shape to access the elements in the original
568
+ // tensor corresponding to the broadcast tensor.
569
+ if (is_broadcasting_) {
570
+ linear_indices_ =
571
+ get_linear_indices(numel, original_shape, broadcast_shape);
572
+ }
573
+ }
574
+ int64_t operator()(int64_t broadcast_linear_index) {
575
+ return is_broadcasting_
576
+ ? linear_indices_.data_ptr<int64_t>()[broadcast_linear_index]
577
+ : broadcast_linear_index;
578
+ }
579
+ };
580
+
581
+ static inline bool is_blas_compatible_column_major_order(const Tensor& input) {
582
+ IntArrayRef input_strides = input.strides();
583
+ IntArrayRef input_sizes = input.sizes();
584
+ auto ndim = input.dim();
585
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
586
+ if (ndim > 3) {
587
+ return input.transpose(-2, -1).is_contiguous();
588
+ }
589
+ auto leading_dimension = input_strides[ndim - 1];
590
+ auto rows = input_sizes[ndim - 2];
591
+ bool batch_stride_compatible = true;
592
+ if (ndim == 3) {
593
+ auto cols = input_sizes[ndim - 1];
594
+ batch_stride_compatible =
595
+ input_strides[ndim - 3] >= leading_dimension * cols;
596
+ }
597
+ return (input_strides[ndim - 2] == 1) &&
598
+ (leading_dimension >= std::max<int64_t>(1, rows)) &&
599
+ batch_stride_compatible;
600
+ }
601
+
602
+ static inline bool is_blas_compatible_row_major_order(const Tensor& input) {
603
+ IntArrayRef input_strides = input.strides();
604
+ IntArrayRef input_sizes = input.sizes();
605
+ auto ndim = input.dim();
606
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2);
607
+ if (ndim > 3) {
608
+ return input.is_contiguous();
609
+ }
610
+ auto leading_dimension = input_strides[ndim - 2];
611
+ auto cols = input_sizes[ndim - 1];
612
+ bool batch_stride_compatible = true;
613
+ if (ndim == 3) {
614
+ auto rows = input_sizes[ndim - 2];
615
+ batch_stride_compatible =
616
+ input_strides[ndim - 3] >= leading_dimension * rows;
617
+ }
618
+ return (input_strides[ndim - 1] == 1) &&
619
+ (leading_dimension >= std::max<int64_t>(1, cols)) &&
620
+ batch_stride_compatible;
621
+ }
622
+
623
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/LossMulti.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/AccumulateType.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/TensorUtils.h>
6
+
7
+ namespace at::native {
8
+ namespace {
9
+ static C10_UNUSED void multilabel_margin_loss_shape_check(
10
+ int64_t& nframe,
11
+ int64_t& dim,
12
+ const int64_t& ndims,
13
+ const Tensor& input,
14
+ const Tensor& target) {
15
+ TORCH_CHECK(
16
+ (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0,
17
+ "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
18
+ input.sizes());
19
+
20
+ if (ndims <= 1) {
21
+ nframe = 1;
22
+ dim = ndims == 0 ? 1 : input.size(0);
23
+ TORCH_CHECK(
24
+ target.dim() <= 1 && target.numel() == dim,
25
+ "inconsistent target size: ", target.sizes(), " for input of size: ",
26
+ input.sizes());
27
+ } else {
28
+ nframe = input.size(0);
29
+ dim = input.size(1);
30
+ TORCH_CHECK(
31
+ target.dim() == 2 && target.size(0) == nframe &&
32
+ target.size(1) == dim,
33
+ "inconsistent target size: ", target.sizes(), " for input of size: ",
34
+ input.sizes());
35
+ }
36
+ }
37
+
38
+ static C10_UNUSED void multi_margin_loss_shape_check(
39
+ int64_t& nframe,
40
+ int64_t& dim,
41
+ const int64_t& ndims,
42
+ const Tensor& input,
43
+ const Tensor& target,
44
+ const c10::optional<Tensor>& weight) {
45
+ TORCH_CHECK(
46
+ (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0,
47
+ "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
48
+ input.sizes());
49
+
50
+ if (ndims <= 1) {
51
+ nframe = 1;
52
+ dim = ndims == 0 ? 1 : input.size(0);
53
+ } else {
54
+ nframe = input.size(0);
55
+ dim = input.size(1);
56
+ }
57
+
58
+ TORCH_CHECK(
59
+ target.dim() <= 1 && target.numel() == nframe,
60
+ "inconsistent target size, expected ", nframe, " but got ",
61
+ target.sizes());
62
+ if (weight && weight->defined()) {
63
+ TORCH_CHECK(
64
+ weight->dim() <= 1 && weight->numel() == dim,
65
+ "inconsistent weight size, expected ", dim, " but got ",
66
+ weight->sizes());
67
+ }
68
+ }
69
+
70
+
71
+ } // anonymous namespace
72
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ // views and their in-place version ops
5
+ #define TORCH_VIEW_FNS(m) \
6
+ m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \
7
+ m.impl("detach", torch::CppFunction::makeFallthrough()); \
8
+ m.impl("detach_", torch::CppFunction::makeFallthrough()); \
9
+ m.impl("diagonal", torch::CppFunction::makeFallthrough()); \
10
+ m.impl("expand", torch::CppFunction::makeFallthrough()); \
11
+ m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
12
+ m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \
13
+ m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \
14
+ m.impl("narrow", torch::CppFunction::makeFallthrough()); \
15
+ m.impl("permute", torch::CppFunction::makeFallthrough()); \
16
+ m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \
17
+ m.impl("select.int", torch::CppFunction::makeFallthrough()); \
18
+ m.impl("squeeze", torch::CppFunction::makeFallthrough()); \
19
+ m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \
20
+ m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \
21
+ m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \
22
+ m.impl("transpose_", torch::CppFunction::makeFallthrough()); \
23
+ m.impl("t", torch::CppFunction::makeFallthrough()); \
24
+ m.impl("t_", torch::CppFunction::makeFallthrough()); \
25
+ m.impl("real", torch::CppFunction::makeFallthrough()); \
26
+ m.impl("imag", torch::CppFunction::makeFallthrough()); \
27
+ m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \
28
+ m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \
29
+ m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \
30
+ m.impl("unfold", torch::CppFunction::makeFallthrough()); \
31
+ m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \
32
+ m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \
33
+ m.impl("view_as", torch::CppFunction::makeFallthrough()); \
34
+ m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \
35
+ m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \
36
+ m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \
37
+ m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \
38
+ m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \
39
+ m.impl("swapdims", torch::CppFunction::makeFallthrough()); \
40
+ m.impl("chunk", torch::CppFunction::makeFallthrough()); \
41
+ m.impl("reshape", torch::CppFunction::makeFallthrough()); \
42
+ m.impl("alias", torch::CppFunction::makeFallthrough()); \
43
+ m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \
44
+ m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \
45
+ m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \
46
+ m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \
47
+ m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \
48
+ m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \
49
+ m.impl("conj", torch::CppFunction::makeFallthrough()); \
50
+ m.impl("_conj", torch::CppFunction::makeFallthrough()); \
51
+ m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \
52
+ m.impl("resize_", torch::CppFunction::makeFallthrough());
53
+
54
+ #define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \
55
+ m.impl("empty_like", torch::CppFunction::makeFallthrough()); \
56
+ m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \
57
+ m.impl("empty.out", torch::CppFunction::makeFallthrough()); \
58
+ m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \
59
+ m.impl("full_like", torch::CppFunction::makeFallthrough()); \
60
+ m.impl("stride.int", torch::CppFunction::makeFallthrough()); \
61
+ m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \
62
+ m.impl("size.int", torch::CppFunction::makeFallthrough()); \
63
+ m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \
64
+ m.impl("is_complex", torch::CppFunction::makeFallthrough()); \
65
+ m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \
66
+ m.impl("requires_grad_", torch::CppFunction::makeFallthrough());
67
+ }
68
+
69
+ #define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \
70
+ m.impl("as_strided", torch::CppFunction::makeFallthrough()); \
71
+ m.impl("view", torch::CppFunction::makeFallthrough());
venv/lib/python3.10/site-packages/torch/include/ATen/native/NonSymbolicBC.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <c10/util/irange.h>
4
+ #include <ATen/core/IListRef.h>
5
+
6
+ namespace at::native {
7
+ // This file contains non-symbolic signatures for ops that we have sym-intified the signature of.
8
+ // However, in certain cases (such as static runtime), we call the native versions of the ops directly.
9
+ // In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation.
10
+ TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape);
11
+ TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length);
12
+ TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt, c10::optional<bool> is_coalesced=c10::nullopt);
13
+ TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
14
+ TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
15
+ // The below ops don't get a duplicated C++ implementation.
16
+ // They are backward ops, which make them very unlikely to be called directly
17
+ // by external code (at::native::trace_backward).
18
+ // They get their own declaration for BC purposes however.
19
+ TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
20
+ TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
21
+ TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim);
22
+ TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes);
23
+ TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index);
24
+ TORCH_API at::Tensor select(const at::Tensor& self, int64_t dim, int64_t index);
25
+ TORCH_API std::vector<Tensor> tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim);
26
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/TensorIterator.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm);
9
+ DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub);
10
+
11
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Padding.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using padding_fn = void (*)(const Tensor&, const Tensor&, IntArrayRef);
9
+
10
+ // reflection padding
11
+ DECLARE_DISPATCH(padding_fn, reflection_pad1d_kernel);
12
+ DECLARE_DISPATCH(padding_fn, reflection_pad1d_backward_kernel);
13
+ DECLARE_DISPATCH(padding_fn, reflection_pad2d_kernel);
14
+ DECLARE_DISPATCH(padding_fn, reflection_pad2d_backward_kernel);
15
+ DECLARE_DISPATCH(padding_fn, reflection_pad3d_kernel);
16
+ DECLARE_DISPATCH(padding_fn, reflection_pad3d_backward_kernel);
17
+
18
+ // replication padding
19
+ DECLARE_DISPATCH(padding_fn, replication_pad1d_kernel);
20
+ DECLARE_DISPATCH(padding_fn, replication_pad1d_backward_kernel);
21
+ DECLARE_DISPATCH(padding_fn, replication_pad2d_kernel);
22
+ DECLARE_DISPATCH(padding_fn, replication_pad2d_backward_kernel);
23
+ DECLARE_DISPATCH(padding_fn, replication_pad3d_kernel);
24
+ DECLARE_DISPATCH(padding_fn, replication_pad3d_backward_kernel);
25
+
26
+ namespace padding {
27
+
28
+ template <int dim>
29
+ static inline void check_valid_input(const Tensor& input, IntArrayRef padding) {
30
+
31
+ TORCH_CHECK(padding.size() == 2 * dim,
32
+ "padding size is expected to be ", 2 * dim,
33
+ ", but got: ", padding.size());
34
+
35
+ int input_dim = input.dim();
36
+
37
+ bool is_batch_mode = input_dim == (dim + 2);
38
+
39
+ bool valid_batch_mode = is_batch_mode;
40
+ bool valid_non_batch_mode = !is_batch_mode;
41
+
42
+ if (is_batch_mode) {
43
+ // allow batch size of 0-dim.
44
+ for (const auto d : c10::irange(1, input_dim)) {
45
+ valid_batch_mode = valid_batch_mode && input.size(d) != 0;
46
+ }
47
+ } else {
48
+ for (const auto d : c10::irange(0, input_dim)) {
49
+ valid_non_batch_mode = valid_non_batch_mode && input.size(d) != 0;
50
+ }
51
+ }
52
+
53
+ // allow empty batch size but not other dimensions.
54
+ TORCH_CHECK(valid_batch_mode || valid_non_batch_mode,
55
+ "Expected ", dim + 1, "D or ", dim + 2,
56
+ "D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
57
+ input.sizes());
58
+ }
59
+
60
+ } // namespace padding
61
+
62
+ } // at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/PointwiseOps.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Ternary and higher-order pointwise operations
2
+ #pragma once
3
+
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace c10 {
7
+ class Scalar;
8
+ }
9
+
10
+ namespace at {
11
+
12
+ struct TensorIterator;
13
+ struct TensorIteratorBase;
14
+
15
+ namespace native {
16
+
17
+ using pointwise_fn = void (*)(TensorIterator&, const Scalar& scalar);
18
+ using structured_pointwise_fn = void (*)(TensorIteratorBase&, const Scalar& scalar);
19
+ using pointwise_fn_double = void (*)(TensorIterator&, const Scalar&, double);
20
+
21
+ DECLARE_DISPATCH(structured_pointwise_fn, addcmul_stub);
22
+ DECLARE_DISPATCH(structured_pointwise_fn, addcdiv_stub);
23
+ DECLARE_DISPATCH(pointwise_fn_double, smooth_l1_backward_stub);
24
+ DECLARE_DISPATCH(pointwise_fn_double, huber_backward_stub);
25
+ DECLARE_DISPATCH(pointwise_fn, mse_backward_stub);
26
+
27
+ } // namespace native
28
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/Pool.h ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/div_rtn.h>
3
+ #include <ATen/TensorUtils.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ #include <utility>
8
+
9
+ #pragma once
10
+
11
+ namespace at::native {
12
+
13
+ using max_pool2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input,
14
+ int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
15
+ using max_pool2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
16
+
17
+ DECLARE_DISPATCH(max_pool2d_fn, max_pool2d_kernel);
18
+ DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel);
19
+
20
+ // averge pooling has same signature for forward and backward
21
+ using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH,
22
+ int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
23
+ using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH,
24
+ int dW, int dH, int padW, int padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
25
+
26
+ DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel);
27
+ DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel);
28
+
29
+ using max_pool3d_fn = void(*)(Tensor& output, Tensor& indices, const Tensor& input,
30
+ int kW, int kH, int kD, int dW, int dH, int dD, int pW, int pH, int pD, int dilationW, int dilationH, int dilationD);
31
+ using max_pool3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
32
+
33
+ DECLARE_DISPATCH(max_pool3d_fn, max_pool3d_kernel);
34
+ DECLARE_DISPATCH(max_pool3d_backward_fn, max_pool3d_backward_kernel);
35
+ namespace {
36
+
37
+ template <typename dest_t, typename src_t>
38
+ static inline dest_t
39
+ safe_downcast(src_t v)
40
+ {
41
+ TORCH_CHECK(std::numeric_limits<dest_t>::min() <= v && v <= std::numeric_limits<dest_t>::max(),
42
+ "integer out of range");
43
+
44
+ return static_cast<dest_t>(v);
45
+ }
46
+
47
+ template<typename T>
48
+ static inline T pooling_output_shape_pad_lr(
49
+ T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation,
50
+ bool ceil_mode) {
51
+ T outputSize = div_rtn<T>(
52
+ inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 +
53
+ (ceil_mode ? stride - 1 : 0), stride) + 1;
54
+ if (ceil_mode) {
55
+ // ensure that the last pooling starts inside the image
56
+ // needed to avoid problems in ceil mode
57
+ if ((outputSize - 1) * stride >= inputSize + pad_l) {
58
+ --outputSize;
59
+ }
60
+ }
61
+ return outputSize;
62
+ }
63
+
64
+ template<typename T>
65
+ static inline T pooling_output_shape(
66
+ T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) {
67
+ TORCH_CHECK(stride != 0, "stride should not be zero");
68
+ TORCH_CHECK(pad >= 0,
69
+ "pad must be non-negative, but got pad: ", pad);
70
+ TORCH_CHECK(pad <= ((kernelSize - 1) * dilation + 1) / 2,
71
+ "pad should be at most half of effective kernel size, but got pad=",
72
+ pad, ", kernel_size=", kernelSize, " and dilation=", dilation)
73
+ return pooling_output_shape_pad_lr(
74
+ inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode);
75
+ }
76
+
77
+ template <typename T>
78
+ std::pair<T, T> _pooling_same_mode_padding_lr(
79
+ T inputSize, T kernelSize, T stride, T dilation) {
80
+ // NOTE: with strides, the output shape is ceil(inputSize/stride)
81
+ auto total_padding = T(dilation) * (kernelSize - 1);
82
+
83
+ // Prefer symmetric padding if possible
84
+ if (stride > 2 && (total_padding % 2 == 1)) {
85
+ // The floor in the output size calculation gives us a little wiggle room
86
+ auto wiggle_room = inputSize % stride - 1;
87
+ if (wiggle_room > 0) {
88
+ total_padding = total_padding - 1;
89
+ }
90
+ }
91
+
92
+ auto left = total_padding / 2;
93
+ return {left, total_padding - left};
94
+ }
95
+
96
+ inline std::pair<int64_t, int64_t> pooling_same_mode_padding_lr(
97
+ int64_t inputSize, int64_t kernelSize, int64_t stride, int64_t dilation) {
98
+ return _pooling_same_mode_padding_lr(inputSize, kernelSize, stride, dilation);
99
+ }
100
+
101
+ inline std::pair<c10::SymInt, c10::SymInt> pooling_same_mode_padding_lr(
102
+ c10::SymInt inputSize, c10::SymInt kernelSize, c10::SymInt stride, c10::SymInt dilation) {
103
+ return _pooling_same_mode_padding_lr(std::move(inputSize), std::move(kernelSize), std::move(stride), std::move(dilation));
104
+ }
105
+
106
+ // AveragePool2d/DilatedMaxPool2d (forward)
107
+ static inline void
108
+ pool2d_shape_check(
109
+ const Tensor& input,
110
+ int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
111
+ int64_t nInputPlane,
112
+ int64_t inputHeight, int64_t inputWidth,
113
+ int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
114
+ {
115
+ const int64_t ndim = input.ndimension();
116
+ const int64_t nOutputPlane = nInputPlane;
117
+
118
+ TORCH_CHECK(kW > 0 && kH > 0,
119
+ "kernel size should be greater than zero, but got ",
120
+ "kH: ", kH, " kW: ", kW);
121
+ TORCH_CHECK(dW > 0 && dH > 0,
122
+ "stride should be greater than zero, but got "
123
+ "dH: ", dH, " dW: ", dW);
124
+ TORCH_CHECK(dilationH > 0 && dilationW > 0,
125
+ "dilation should be greater than zero, but got ",
126
+ "dilationH: ", dilationH, " dilationW: ", dilationW);
127
+
128
+ bool valid_dims = input.size(1) != 0 && input.size(2) != 0;
129
+ if (memory_format == at::MemoryFormat::ChannelsLast){
130
+ // Expect tensor in NHWC format and allow 0-dim only for N.
131
+ TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0),
132
+ "Expected 4D (batch mode) tensor expected for input with channels_last layout"
133
+ " with optional 0 dim batch size for input, but got: ", input.sizes());
134
+ } else {
135
+ TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) ||
136
+ (ndim == 4 && valid_dims && input.size(3) != 0),
137
+ "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got:",
138
+ input.sizes());
139
+ }
140
+
141
+ TORCH_CHECK(kW/2 >= padW && kH/2 >= padH,
142
+ "pad should be smaller than or equal to half of kernel size, but got ",
143
+ "padW = ", padW, ", padH = ", padH, ", kW = ", kW, ", kH = ", kH);
144
+
145
+ TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1,
146
+ "Given input size: (",
147
+ nInputPlane, "x", inputHeight, "x", inputWidth, "). ",
148
+ "Calculated output size: (",
149
+ nOutputPlane, "x", outputHeight, "x", outputWidth, "). ",
150
+ "Output size is too small");
151
+ }
152
+
153
+ // DilatedMaxPool2d (backward)
154
+ static inline void
155
+ max_pool2d_backward_shape_check(
156
+ const Tensor& input,
157
+ const Tensor& gradOutput,
158
+ const Tensor& indices,
159
+ int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
160
+ int64_t nInputPlane,
161
+ int64_t inputHeight, int64_t inputWidth,
162
+ int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
163
+ {
164
+ pool2d_shape_check(
165
+ input,
166
+ kH, kW, dH, dW, padH, padW, dilationH, dilationW,
167
+ nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, memory_format);
168
+
169
+ const int64_t ndim = input.ndimension();
170
+ const int64_t nOutputPlane = nInputPlane;
171
+
172
+ check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
173
+ check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
174
+ check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
175
+
176
+ check_dim_size(indices, ndim, ndim-3, nOutputPlane);
177
+ check_dim_size(indices, ndim, ndim-2, outputHeight);
178
+ check_dim_size(indices, ndim, ndim-1, outputWidth);
179
+ }
180
+
181
+ // AveragePool2d (backward)
182
+ static inline void
183
+ avg_pool2d_backward_shape_check(
184
+ const Tensor& input,
185
+ const Tensor& gradOutput,
186
+ int64_t /*nbatch*/,
187
+ int kH, int kW, int dH, int dW, int padH, int padW,
188
+ int64_t nInputPlane,
189
+ int64_t inputHeight, int64_t inputWidth,
190
+ int64_t outputHeight, int64_t outputWidth,
191
+ MemoryFormat memory_format)
192
+ {
193
+ pool2d_shape_check(
194
+ input,
195
+ kH, kW, dH, dW, padH, padW, 1, 1,
196
+ nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
197
+ memory_format);
198
+
199
+ const int64_t ndim = input.ndimension();
200
+ const int64_t nOutputPlane = nInputPlane;
201
+
202
+ check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
203
+ check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
204
+ check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
205
+ }
206
+
207
+ // AveragePool3d/DilatedMaxPool3d (forward)
208
+ static inline void
209
+ pool3d_shape_check(
210
+ const Tensor& input,
211
+ int64_t nslices,
212
+ int kT, int kH, int kW,
213
+ int dT, int dH, int dW,
214
+ int pT, int pH, int pW,
215
+ int dilationT, int dilationH, int dilationW,
216
+ int64_t itime, int64_t iheight, int64_t iwidth,
217
+ int64_t otime, int64_t oheight, int64_t owidth,
218
+ const char *fn_name,
219
+ bool check_input_size=false)
220
+ {
221
+ const int64_t ndim = input.ndimension();
222
+
223
+ TORCH_CHECK(kT > 0 && kW > 0 && kH > 0,
224
+ "kernel size should be greater than zero, but got ",
225
+ "kT: ", kT, " kH: ", kH, " kW: ", kW);
226
+ TORCH_CHECK(dT > 0 && dW > 0 && dH > 0,
227
+ "stride should be greater than zero, but got ",
228
+ "dT: ", dT, " dH: ", dH, " dW: ", dW);
229
+ TORCH_CHECK(dilationT > 0 && dilationW > 0 && dilationH > 0,
230
+ "dilation should be greater than zero, but got ",
231
+ "dilationT: ", dilationT, " dilationH: ", dilationH, " dilationW: ", dilationW);
232
+
233
+ TORCH_CHECK(ndim == 4 || ndim == 5,
234
+ fn_name, ": Expected 4D or 5D tensor for input, but got: ", input.sizes());
235
+
236
+ for (const auto i : c10::irange(ndim)) {
237
+ if (ndim == 5 && i == 0) {
238
+ // size of batch-dim can be 0.
239
+ continue;
240
+ }
241
+ TORCH_CHECK(
242
+ input.size(i) > 0,
243
+ fn_name,
244
+ ": Expected input's non-batch dimensions to have positive length,"
245
+ " but input has a shape of ",
246
+ input.sizes(),
247
+ " and non-batch dimension ",
248
+ input.size(i),
249
+ " has length zero!")
250
+ }
251
+
252
+ if (check_input_size) { // AveragePool3d
253
+ TORCH_CHECK(itime >= kT && iheight >= kH && iwidth >= kW,
254
+ "input image ", "(T: ", itime, " H: ", iheight, " W: ", iwidth, ") smaller than ",
255
+ "kernel size ", "(kT: ", kT, " kH: ", kH, " kW: ", kW, ")");
256
+ }
257
+
258
+ TORCH_CHECK(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH,
259
+ "pad should be smaller than or equal to half of kernel size, but got "
260
+ "kT: ", kT, " kW: ", kW, " kH: ", kH, " padT: ", pT, " padW: ", pW, " padH: ", pH);
261
+
262
+ TORCH_CHECK(otime >= 1 && owidth >= 1 && oheight >= 1,
263
+ "Given input size: (",
264
+ nslices,"x", itime, "x", iheight, "x", iwidth, "). ",
265
+ "Calculated output size: (",
266
+ nslices, "x", otime, "x", oheight, "x", owidth, "). ",
267
+ "Output size is too small");
268
+ }
269
+
270
+ static inline void
271
+ max_pool3d_backward_shape_check(
272
+ const Tensor& input,
273
+ const Tensor& gradOutput,
274
+ const Tensor& indices,
275
+ int64_t nslices,
276
+ int kT, int kH, int kW,
277
+ int dT, int dH, int dW,
278
+ int pT, int pH, int pW,
279
+ int dilationT, int dilationH, int dilationW,
280
+ int64_t itime, int64_t iheight, int64_t iwidth,
281
+ int64_t otime, int64_t oheight, int64_t owidth,
282
+ const char* fn_name)
283
+ {
284
+ const int64_t ndim = input.ndimension();
285
+
286
+ pool3d_shape_check(
287
+ input,
288
+ nslices,
289
+ kT, kH, kW,
290
+ dT, dH, dW,
291
+ pT, pH, pW,
292
+ dilationT, dilationH, dilationW,
293
+ itime, iheight, iwidth,
294
+ otime, oheight, owidth, fn_name);
295
+
296
+ check_dim_size(gradOutput, ndim, ndim-4, nslices);
297
+ check_dim_size(gradOutput, ndim, ndim-3, otime);
298
+ check_dim_size(gradOutput, ndim, ndim-2, oheight);
299
+ check_dim_size(gradOutput, ndim, ndim-1, owidth);
300
+
301
+ check_dim_size(indices, ndim, ndim-4, nslices);
302
+ check_dim_size(indices, ndim, ndim-3, otime);
303
+ check_dim_size(indices, ndim, ndim-2, oheight);
304
+ check_dim_size(indices, ndim, ndim-1, owidth);
305
+ }
306
+
307
+ static inline void
308
+ avg_pool3d_backward_shape_check(
309
+ const Tensor& input,
310
+ const Tensor& gradOutput,
311
+ int64_t nslices,
312
+ int kT, int kH, int kW,
313
+ int dT, int dH, int dW,
314
+ int pT, int pH, int pW,
315
+ int64_t itime, int64_t iheight, int64_t iwidth,
316
+ int64_t otime, int64_t oheight, int64_t owidth,
317
+ const char *fn_name)
318
+ {
319
+ const int64_t ndim = input.ndimension();
320
+
321
+ pool3d_shape_check(
322
+ input,
323
+ nslices,
324
+ kT, kH, kW,
325
+ dT, dH, dW,
326
+ pT, pH, pW,
327
+ 1, 1, 1,
328
+ itime, iheight, iwidth,
329
+ otime, oheight, owidth,
330
+ fn_name, true);
331
+
332
+ check_dim_size(gradOutput, ndim, ndim-4, nslices);
333
+ check_dim_size(gradOutput, ndim, ndim-3, otime);
334
+ check_dim_size(gradOutput, ndim, ndim-2, oheight);
335
+ check_dim_size(gradOutput, ndim, ndim-1, owidth);
336
+ }
337
+
338
+ } // anonymous namespace
339
+
340
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Pow.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace c10 {
6
+ class Scalar;
7
+ }
8
+
9
+ namespace at {
10
+
11
+ struct TensorIterator;
12
+ struct TensorIteratorBase;
13
+
14
+ namespace native {
15
+
16
+ #if defined(__CUDACC__) || defined(__HIPCC__)
17
+ #define HOST_DEVICE __host__ __device__
18
+ #else
19
+ #define HOST_DEVICE
20
+ #endif
21
+
22
+ // integral power in pytorch allows for negative exponents, giving truncated integral results.
23
+ // e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the
24
+ // only non-zero result.
25
+ template <class T,
26
+ typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
27
+ static inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) {
28
+ T result = 1;
29
+ while (b) {
30
+ if (b & 1) {
31
+ result *= a;
32
+ }
33
+ b /= 2;
34
+ a *= a;
35
+ }
36
+ return result;
37
+ }
38
+
39
+ template <class T,
40
+ typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, T>::type* = nullptr>
41
+ static inline HOST_DEVICE T powi(T a, T b) {
42
+ return powi_impl(a, b);
43
+ }
44
+
45
+ template <class T,
46
+ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, T>::type* = nullptr>
47
+ static inline HOST_DEVICE T powi(T a, T b) {
48
+ if ( b < 0 ) {
49
+ if ( a == 1 ) {
50
+ return 1;
51
+ } else if ( a == -1 ) {
52
+ auto negative = (-b) % static_cast<T>(2);
53
+ return negative ? -1 : 1;
54
+ } else {
55
+ return 0;
56
+ }
57
+ }
58
+ return powi_impl(a, b);
59
+ }
60
+
61
+ using pow_tensor_tensor_fn = void (*)(TensorIteratorBase&);
62
+ using pow_tensor_scalar_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
63
+
64
+ DECLARE_DISPATCH(pow_tensor_tensor_fn, pow_tensor_tensor_stub);
65
+ DECLARE_DISPATCH(pow_tensor_scalar_fn, pow_tensor_scalar_stub);
66
+
67
+ } // namespace native
68
+
69
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+
6
+ namespace at::native {
7
+
8
+ using lstm_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool, bool);
9
+ using rnn_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool, bool);
10
+ using lstm_packed_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool);
11
+ using rnn_packed_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool);
12
+
13
+ DECLARE_DISPATCH(lstm_fn, lstm_cudnn_stub);
14
+ DECLARE_DISPATCH(lstm_fn, lstm_miopen_stub);
15
+ DECLARE_DISPATCH(lstm_fn, lstm_mkldnn_stub);
16
+ DECLARE_DISPATCH(rnn_fn, gru_cudnn_stub);
17
+ DECLARE_DISPATCH(rnn_fn, gru_miopen_stub);
18
+ DECLARE_DISPATCH(rnn_fn, rnn_tanh_cudnn_stub);
19
+ DECLARE_DISPATCH(rnn_fn, rnn_tanh_miopen_stub);
20
+ DECLARE_DISPATCH(rnn_fn, rnn_relu_cudnn_stub);
21
+ DECLARE_DISPATCH(rnn_fn, rnn_relu_miopen_stub);
22
+ DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_cudnn_stub);
23
+ DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_miopen_stub);
24
+ DECLARE_DISPATCH(rnn_packed_fn, gru_packed_cudnn_stub);
25
+ DECLARE_DISPATCH(rnn_packed_fn, gru_packed_miopen_stub);
26
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_cudnn_stub);
27
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_miopen_stub);
28
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_cudnn_stub);
29
+ DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_miopen_stub);
30
+
31
+ inline void check_attributes(const Tensor& input, const TensorList& params, const TensorList& hiddens, bool check_dtype=false) {
32
+ auto input_device = input.device();
33
+ auto input_dtype = input.scalar_type();
34
+
35
+ auto check_tensors = [&](const std::string& name, const Tensor& t) {
36
+ if (!t.defined()) return;
37
+ auto t_device = t.device();
38
+ TORCH_CHECK(input_device == t_device,
39
+ "Input and ", name, " tensors are not at the same device, found input tensor at ",
40
+ input_device, " and ", name, " tensor at ", t_device);
41
+ if (check_dtype) {
42
+ auto t_dtype = t.scalar_type();
43
+ TORCH_CHECK(input_dtype == t_dtype,
44
+ "Input and ", name, " tensors are not the same dtype, found input tensor with ",
45
+ input_dtype, " and ", name, " tensor with ", t_dtype);
46
+ }
47
+ };
48
+
49
+ for (const auto& h : hiddens) check_tensors("hidden", h);
50
+ for (const auto& p : params) check_tensors("parameter", p);
51
+ }
52
+
53
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/RangeFactories.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/native/DispatchStub.h>
2
+ #include <c10/core/Scalar.h>
3
+
4
+ namespace at {
5
+ struct TensorIterator;
6
+
7
+ namespace native {
8
+
9
+ DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, const Scalar&), arange_stub);
10
+ DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, int64_t), linspace_stub);
11
+
12
+ }} // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ using reduce_all_fn = void (*)(Tensor & result, const Tensor & self);
12
+ using reduce_min_max_fn = void (*)(Tensor & max_result, Tensor & min_result, const Tensor & self);
13
+ DECLARE_DISPATCH(reduce_all_fn, min_all_stub);
14
+ DECLARE_DISPATCH(reduce_all_fn, max_all_stub);
15
+
16
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Optional.h>
6
+
7
+ namespace c10 {
8
+ class Scalar;
9
+ }
10
+
11
+ namespace at {
12
+ struct TensorIterator;
13
+ class Tensor;
14
+ }
15
+
16
+ namespace at::native {
17
+
18
+ using reduce_fn = void(*)(TensorIterator &);
19
+
20
+ DECLARE_DISPATCH(reduce_fn, sum_stub);
21
+ DECLARE_DISPATCH(reduce_fn, nansum_stub);
22
+ DECLARE_DISPATCH(reduce_fn, prod_stub);
23
+ DECLARE_DISPATCH(reduce_fn, mean_stub);
24
+ DECLARE_DISPATCH(reduce_fn, and_stub);
25
+ DECLARE_DISPATCH(reduce_fn, or_stub);
26
+ DECLARE_DISPATCH(reduce_fn, min_values_stub);
27
+ DECLARE_DISPATCH(reduce_fn, max_values_stub);
28
+ DECLARE_DISPATCH(reduce_fn, argmax_stub);
29
+ DECLARE_DISPATCH(reduce_fn, argmin_stub);
30
+
31
+ using reduce_std_var_function =
32
+ void (*)(TensorIterator&, double correction, bool take_sqrt);
33
+ DECLARE_DISPATCH(reduce_std_var_function, std_var_stub);
34
+
35
+ using reduce_norm_fn =
36
+ void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional<int64_t>);
37
+ DECLARE_DISPATCH(reduce_norm_fn, norm_kernel);
38
+
39
+ using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&);
40
+ DECLARE_DISPATCH(reduce_fn_flag, norm_stub);
41
+
42
+ using structured_cum_fn = void (*)(const Tensor&, const Tensor&, int64_t);
43
+ using cum_fn = void (*)(Tensor&, const Tensor&, int64_t);
44
+ DECLARE_DISPATCH(structured_cum_fn, cumsum_stub);
45
+ DECLARE_DISPATCH(structured_cum_fn, cumprod_stub);
46
+ DECLARE_DISPATCH(cum_fn, logcumsumexp_stub);
47
+
48
+ DECLARE_DISPATCH(void (*)(const Tensor&, int64_t, bool, Tensor&, Tensor&), aminmax_stub);
49
+ DECLARE_DISPATCH(void (*)(const Tensor&, Tensor&, Tensor&), aminmax_allreduce_stub);
50
+
51
+ // Used in cuda/Normalization.cu
52
+ TORCH_API std::tuple<Tensor&,Tensor&> var_mean_out(
53
+ Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim,
54
+ int64_t correction, bool keepdim);
55
+
56
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOpsUtils.h ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <limits>
4
+ #include <ATen/core/Tensor.h>
5
+ #include <ATen/native/Resize.h>
6
+ #include <ATen/native/TensorIterator.h>
7
+ #include <ATen/native/NonEmptyUtils.h>
8
+ #include <ATen/WrapDimUtilsMulti.h>
9
+ #include <c10/core/ScalarType.h>
10
+ #include <c10/util/irange.h>
11
+
12
+ #ifndef AT_PER_OPERATOR_HEADERS
13
+ #include <ATen/Functions.h>
14
+ #else
15
+ #include <ATen/ops/empty.h>
16
+ #include <ATen/ops/scalar_tensor.h>
17
+ #endif
18
+
19
+ namespace at::native {
20
+
21
+ // Maximum and minimum possible scalar values, including infinities
22
+ template <typename scalar_t>
23
+ constexpr scalar_t upper_bound() {
24
+ using lim = std::numeric_limits<scalar_t>;
25
+ return lim::has_infinity ? lim::infinity() : lim::max();
26
+ }
27
+
28
+ template <typename scalar_t>
29
+ constexpr scalar_t lower_bound() {
30
+ using lim = std::numeric_limits<scalar_t>;
31
+ return lim::has_infinity ? -lim::infinity() : lim::lowest();
32
+ }
33
+
34
+ static inline Tensor restride_dim(
35
+ const Tensor& src, int64_t dim,
36
+ IntArrayRef replacement_shape
37
+ ) {
38
+ auto strides = ensure_nonempty_vec(src.strides().vec());
39
+ strides[dim] = 0;
40
+ return src.as_strided(replacement_shape, strides);
41
+ }
42
+
43
+ inline void _dimreduce_setup(const Tensor &result, const Tensor &self,
44
+ int64_t dim) {
45
+ IntArrayRef self_sizes = self.sizes();
46
+ std::vector<int64_t> result_sizes;
47
+ result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end());
48
+ result_sizes[dim] = 1;
49
+ result.resize_(result_sizes);
50
+ }
51
+
52
+ inline bool _dimreduce_return_trivial(const Tensor &result, const Tensor &self,
53
+ const Scalar& ident, int64_t dim, bool keepdim) {
54
+ if (self.numel() == 1 && self.ndimension() == 0) {
55
+ result.resize_({});
56
+ result.fill_(self);
57
+ return true;
58
+ }
59
+ // Return identity
60
+ if (self.numel() == 0) {
61
+ _dimreduce_setup(result, self, dim);
62
+ result.fill_(ident);
63
+ if (!keepdim) result.squeeze_(dim);
64
+ return true;
65
+ }
66
+ return false;
67
+ }
68
+
69
+ inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self,
70
+ int64_t /*dim*/, bool /*keepdim*/, const char* /*fn_name*/) {
71
+ if (self.numel() == 1 && self.ndimension() == 0) {
72
+ result.resize_({});
73
+ result.fill_(self);
74
+ return true;
75
+ }
76
+
77
+ return false;
78
+ }
79
+
80
+ inline c10::optional<Tensor> _allreduce_return_trivial(
81
+ const Tensor& self,
82
+ const Scalar& ident) {
83
+ // Return identity
84
+ if (self.numel() == 0) {
85
+ return at::scalar_tensor(ident, self.options());
86
+ }
87
+ return c10::nullopt;
88
+ }
89
+
90
+ #define OPTION_TYPE_EQUALITY_CHECK(option, out, self) \
91
+ { \
92
+ TORCH_CHECK(\
93
+ out.option() == self.option(),\
94
+ "expected ", #option, " ",\
95
+ self.option(),\
96
+ " but found ", out.option())\
97
+ }
98
+
99
+ static inline void check_scalar_type_device_layout_equal(const Tensor& out, const Tensor& self) {
100
+ OPTION_TYPE_EQUALITY_CHECK(scalar_type, out, self);
101
+ OPTION_TYPE_EQUALITY_CHECK(device, out.options(), self.options());
102
+ OPTION_TYPE_EQUALITY_CHECK(layout, out.options(), self.options());
103
+ }
104
+
105
+ static inline Tensor integer_upcast(const Tensor& self, c10::optional<ScalarType> dtype) {
106
+ ScalarType scalarType = self.scalar_type();
107
+ TORCH_CHECK(!isBarebonesUnsignedType(scalarType), "integer upcasting for uint16, uint32 and uint64 is not currently implemented");
108
+ ScalarType upcast_scalarType = dtype.value_or(at::isIntegralType(scalarType, /*includeBool=*/true) ? ScalarType::Long : scalarType);
109
+ return self.toType(upcast_scalarType);
110
+ }
111
+
112
+ using DimMask = TensorIterator::DimMask;
113
+
114
+ static DimVector make_dim_vector(OptionalIntArrayRef opt_dims, int64_t ndim) {
115
+ if (opt_dims.has_value()) {
116
+ return DimVector(opt_dims.value());
117
+ } else {
118
+ std::vector<int64_t> all_dims(ndim);
119
+ std::iota(all_dims.begin(), all_dims.end(), 0);
120
+ return DimVector(all_dims);
121
+ }
122
+ }
123
+
124
+ static DimMask make_dim_mask(OptionalIntArrayRef opt_dims, int64_t ndim, bool allow_empty_dims=false) {
125
+ DimMask mask;
126
+ if (opt_dims.has_value()) {
127
+ auto dims = opt_dims.value();
128
+ if (dims.empty() && !allow_empty_dims) {
129
+ mask = DimMask().flip();
130
+ } else {
131
+ mask = at::dim_list_to_bitset(dims, ndim);
132
+ }
133
+ } else {
134
+ mask = DimMask().flip();
135
+ }
136
+ return mask;
137
+ }
138
+
139
+ inline DimVector shape_from_dim_mask(const Tensor& self, DimMask mask, bool keepdim) {
140
+ auto shape = DimVector(self.sizes());
141
+ for (int dim = shape.size() - 1; dim >= 0; dim--) {
142
+ if (mask[dim]) {
143
+ if (keepdim) {
144
+ shape[dim] = 1;
145
+ } else {
146
+ shape.erase(shape.begin() + dim);
147
+ }
148
+ }
149
+ }
150
+ return shape;
151
+ }
152
+
153
+ static void resize_reduction_result(
154
+ Tensor& result, const Tensor& self, DimMask mask, bool keepdim,
155
+ ScalarType /*dtype*/)
156
+ {
157
+ auto shape = shape_from_dim_mask(self, mask, keepdim);
158
+ TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor.");
159
+ at::native::resize_output(result, shape);
160
+ }
161
+
162
+ inline Tensor create_reduction_result(
163
+ const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype
164
+ ) {
165
+ DimMask mask = make_dim_mask(dim, self.dim());
166
+ auto shape = shape_from_dim_mask(self, mask, keepdim);
167
+ return at::empty(shape, self.options().dtype(dtype));
168
+ }
169
+
170
+ static Tensor review_reduce_result(const Tensor& result, int ndim, DimMask mask, bool keepdim) {
171
+ if (keepdim) {
172
+ return result;
173
+ }
174
+ auto shape = DimVector(result.sizes());
175
+ auto stride = DimVector(result.strides());
176
+ for (const auto dim : c10::irange(ndim)) {
177
+ if (mask[dim]) {
178
+ shape.insert(shape.begin() + dim, 1);
179
+ stride.insert(stride.begin() + dim, 0);
180
+ }
181
+ }
182
+ return result.as_strided(shape, stride);
183
+ }
184
+
185
+ static TensorIterator make_reduction(
186
+ const char* name, Tensor& result, const Tensor& self,
187
+ at::OptionalIntArrayRef dim_opt,
188
+ bool keepdim, ScalarType in_dtype, ScalarType out_dtype) {
189
+ // check that result type and dtype match if provided
190
+ TORCH_CHECK(
191
+ !result.defined() || result.scalar_type() == out_dtype,
192
+ name, ": provided dtype must match dtype of result. Got ",
193
+ toString(result.scalar_type()),
194
+ " and ",
195
+ toString(out_dtype),
196
+ ".");
197
+ // dim={} performs an all-reduce, same as dim=None
198
+ IntArrayRef dim = dim_opt.value_or(IntArrayRef{});
199
+ int64_t ndim = self.dim();
200
+ auto mask = make_dim_mask(dim, ndim);
201
+ resize_reduction_result(result, self, mask, keepdim, out_dtype);
202
+ auto viewed_result = review_reduce_result(result, ndim, mask, keepdim);
203
+ namedinference::propagate_names_for_reduction(result, self, dim, keepdim);
204
+ if (self.scalar_type() == in_dtype) {
205
+ return TensorIterator::reduce_op(viewed_result, self);
206
+ }
207
+ return TensorIterator::reduce_op(viewed_result, self.to(in_dtype));
208
+ }
209
+
210
+ static C10_UNUSED TensorIterator make_reduction(
211
+ const char* name, Tensor& result, const Tensor& self,
212
+ at::OptionalIntArrayRef dim, bool keepdim, ScalarType out_dtype) {
213
+ // special case for type promotion in mixed precision, improves computational
214
+ // efficiency.
215
+ // not generalize this to common mismatched input/output types to avoid cross
216
+ // product of templated kernel launches.
217
+ const bool gpu_lowp_to_f32 = (
218
+ self.is_cuda() && (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && out_dtype == kFloat);
219
+ auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type()
220
+ : self.is_complex() ? c10::toComplexType(out_dtype)
221
+ : out_dtype;
222
+ return make_reduction(name, result, self, dim, keepdim, in_dtype, out_dtype);
223
+ }
224
+
225
+ static TensorIterator make_reduction(
226
+ const char* name, Tensor& result1, Tensor& result2, const Tensor& self,
227
+ at::OptionalIntArrayRef dim_opt, bool keepdim, ScalarType dtype1,
228
+ ScalarType dtype2) {
229
+ // check that result type and dtype match if provided
230
+ TORCH_CHECK(
231
+ (!result1.defined() || result1.scalar_type() == dtype1) && (!result2.defined() || result2.scalar_type() == dtype2),
232
+ name, ": provided dtype must match dtype of result. Got ",
233
+ toString(result1.scalar_type()), toString(result2.scalar_type()),
234
+ " and ",
235
+ toString(dtype1), toString(dtype2),
236
+ ".");
237
+
238
+ // dim={} performs an all-reduce, same as dim=None
239
+ auto dim = dim_opt.value_or(IntArrayRef{});
240
+ int64_t ndim = self.dim();
241
+ DimMask mask = make_dim_mask(dim, ndim);
242
+ resize_reduction_result(result1, self, mask, keepdim, dtype1);
243
+ auto viewed_result1 = review_reduce_result(result1, ndim, mask, keepdim);
244
+
245
+ resize_reduction_result(result2, self, mask, keepdim, dtype2);
246
+ auto viewed_result2 = review_reduce_result(result2, ndim, mask, keepdim);
247
+
248
+ namedinference::propagate_names_for_reduction(result1, self, dim, keepdim);
249
+ namedinference::propagate_names_for_reduction(result2, self, dim, keepdim);
250
+
251
+ // special case for type promotion in mixed precision, improves computational
252
+ // efficiency.
253
+ // We don't generalize this to common mismatched input/output types to avoid cross
254
+ // product of templated kernel launches.
255
+ if (self.scalar_type() == dtype1 ||
256
+ (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) {
257
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self);
258
+ }
259
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1));
260
+ }
261
+
262
+ static C10_UNUSED TensorIterator make_reduction(
263
+ const char* name, Tensor& result1, Tensor& result2, const Tensor& self,
264
+ at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype) {
265
+ return make_reduction(name, result1, result2, self, dim, keepdim, dtype, dtype);
266
+ }
267
+
268
+ static void zero_numel_check_dims(const Tensor& self, const int64_t dim, const char *fn_name) {
269
+ if (self.ndimension() == 0) {
270
+ TORCH_CHECK_INDEX(dim == 0 || dim == -1, fn_name,
271
+ ": Expected reduction dim -1 or 0 for scalar but got ", dim);
272
+ }
273
+ else {
274
+ TORCH_CHECK_INDEX(self.size(dim) != 0, fn_name,
275
+ ": Expected reduction dim ", dim, " to have non-zero size.");
276
+ }
277
+ }
278
+
279
+ static void zero_numel_check_dims(const Tensor& self, const IntArrayRef dim, const char *fn_name) {
280
+ TORCH_CHECK(
281
+ !dim.empty(),
282
+ fn_name, ": Expected reduction dim to be specified for input.numel() == 0. ",
283
+ "Specify the reduction dim with the 'dim' argument.");
284
+ for (const int64_t d : dim) {
285
+ zero_numel_check_dims(self, d, fn_name);
286
+ }
287
+ }
288
+
289
+ static std::vector<int64_t> get_zero_numel_tensor_size(
290
+ const Tensor& self,
291
+ const int64_t dim,
292
+ const bool keepdim,
293
+ const char* fn_name) {
294
+ TORCH_INTERNAL_ASSERT(self.numel() == 0, fn_name, ": Expected self.numel() == 0.");
295
+ zero_numel_check_dims(self, dim, fn_name);
296
+ std::vector<int64_t> sizes;
297
+ if (keepdim) {
298
+ sizes = self.sizes().vec();
299
+ sizes[dim] = 1;
300
+ }
301
+ else {
302
+ for (const auto d : c10::irange(self.dim())) {
303
+ if (d != dim) {
304
+ sizes.push_back(self.sizes()[d]);
305
+ }
306
+ }
307
+ }
308
+ return sizes;
309
+ }
310
+
311
+ // Resize the result tensor and indices when result.numel() == 0 depending on values of
312
+ // dim and keepdim for returning tensors containing reduction results.
313
+ // This function should be called when you are reducing a zero-numel tensor and want to
314
+ // resize the output and return it. This function exists for resizing zero-numel
315
+ // tensors when the size of the reduction dimension is non-zero.
316
+ static C10_UNUSED void zero_numel_tensor_resize(Tensor& result, Tensor& result_indices,
317
+ const Tensor& self, const int64_t dim,
318
+ const bool keepdim, const char *fn_name) {
319
+ auto sizes = get_zero_numel_tensor_size(self, dim, keepdim, fn_name);
320
+ at::native::resize_output(result, sizes);
321
+ at::native::resize_output(result_indices, sizes);
322
+ }
323
+
324
+ inline ScalarType get_dtype_from_self(
325
+ const Tensor& self,
326
+ const c10::optional<ScalarType>& dtype,
327
+ bool promote_integers) {
328
+ if (dtype.has_value()) {
329
+ return dtype.value();
330
+ }
331
+ ScalarType src_type = self.scalar_type();
332
+ if (promote_integers && at::isIntegralType(src_type, /*includeBool=*/true)) {
333
+ return kLong;
334
+ }
335
+ return src_type;
336
+ }
337
+
338
+ inline ScalarType get_dtype_from_result(Tensor& result, c10::optional<ScalarType> dtype) {
339
+ TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor.");
340
+ if (dtype.has_value()) {
341
+ return dtype.value();
342
+ } else {
343
+ return result.scalar_type();
344
+ }
345
+ }
346
+
347
+
348
+ } // namespace at::native
349
+
350
+ namespace at::meta {
351
+
352
+ static C10_UNUSED DimVector get_reduction_shape(
353
+ const Tensor& self,
354
+ IntArrayRef dims,
355
+ bool keepdim,
356
+ bool allow_empty_dims=false) {
357
+ auto mask = native::make_dim_mask(dims, self.dim(), allow_empty_dims);
358
+ return native::shape_from_dim_mask(self, mask, keepdim);
359
+ }
360
+
361
+ static void resize_reduction(
362
+ impl::MetaBase& meta,
363
+ const Tensor& self,
364
+ OptionalIntArrayRef opt_dims,
365
+ bool keepdim,
366
+ ScalarType out_dtype,
367
+ bool allow_empty_dims=false) {
368
+ DimVector dims_ = at::native::make_dim_vector(opt_dims, self.dim());
369
+ maybe_wrap_dims(dims_, self.dim());
370
+ auto shape = get_reduction_shape(self, dims_, keepdim, allow_empty_dims);
371
+ meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype));
372
+ namedinference::propagate_names_for_reduction(
373
+ meta.maybe_get_output(), self, dims_, keepdim);
374
+ }
375
+
376
+ static void resize_reduction_with_indices(
377
+ impl::MetaBase& meta,
378
+ const Tensor& self,
379
+ IntArrayRef dims,
380
+ bool keepdim,
381
+ ScalarType out_dtype) {
382
+ DimVector dims_(dims);
383
+ maybe_wrap_dims(dims_, self.dim());
384
+ auto shape = get_reduction_shape(self, dims_, keepdim);
385
+ meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype));
386
+ meta.set_output_raw_strided(1, shape, {}, self.options().dtype(kLong));
387
+ namedinference::propagate_names_for_reduction(
388
+ meta.maybe_get_output(0), self, dims_, keepdim);
389
+ namedinference::propagate_names_for_reduction(
390
+ meta.maybe_get_output(1), self, dims_, keepdim);
391
+ }
392
+
393
+ static TensorIterator make_reduction(
394
+ const Tensor& self,
395
+ const Tensor& result,
396
+ OptionalIntArrayRef opt_dims,
397
+ bool keepdim,
398
+ ScalarType in_dtype) {
399
+ int64_t ndim = self.dim();
400
+ auto mask = at::native::make_dim_mask(opt_dims, ndim);
401
+ auto viewed_result =
402
+ at::native::review_reduce_result(result, ndim, mask, keepdim);
403
+ if (self.scalar_type() == in_dtype) {
404
+ return TensorIterator::reduce_op(viewed_result, self);
405
+ }
406
+ return TensorIterator::reduce_op(viewed_result, self.to(in_dtype));
407
+ }
408
+
409
+ static TensorIterator make_reduction(
410
+ const Tensor& self,
411
+ const Tensor& result1,
412
+ const Tensor& result2,
413
+ IntArrayRef dims,
414
+ bool keepdim,
415
+ ScalarType dtype1,
416
+ ScalarType /*dtype2*/) {
417
+ int64_t ndim = self.dim();
418
+ auto mask = at::native::make_dim_mask(dims, ndim);
419
+ auto viewed_result1 = at::native::review_reduce_result(result1, ndim, mask, keepdim);
420
+ auto viewed_result2 = at::native::review_reduce_result(result2, ndim, mask, keepdim);
421
+ // special case for type promotion in mixed precision, improves computational efficiency.
422
+ // We don't generalize this to common mismatched input/output types to avoid cross product
423
+ // of templated kernel launches.
424
+ if (self.scalar_type() == dtype1 ||
425
+ (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) {
426
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self);
427
+ }
428
+ return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1));
429
+ }
430
+
431
+ static C10_UNUSED TensorIterator make_reduction_from_out_ty(
432
+ const Tensor& self,
433
+ const Tensor& result,
434
+ OptionalIntArrayRef opt_dims,
435
+ bool keepdim,
436
+ ScalarType out_dtype) {
437
+ // special case for type promotion in mixed precision, improves computational
438
+ // efficiency.
439
+ // not generalize this to common mismatched input/output types to avoid cross
440
+ // product of templated kernel launches.
441
+ const bool gpu_lowp_to_f32 =
442
+ (self.is_cuda() &&
443
+ (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) &&
444
+ out_dtype == kFloat);
445
+ auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() : out_dtype;
446
+ return make_reduction(self, result, opt_dims, keepdim, in_dtype);
447
+ }
448
+
449
+ } // namespace at::meta
venv/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Scalar.h>
4
+
5
+ namespace at::native {
6
+
7
+ enum class ReductionType {MAX, MEAN, MIN, SUM, PROD};
8
+
9
+ static inline ReductionType get_reduction_enum(const c10::string_view& reduce) {
10
+ if (reduce == "max" || reduce == "amax") {
11
+ return ReductionType::MAX;
12
+ } else if (reduce == "mean") {
13
+ return ReductionType::MEAN;
14
+ } else if (reduce == "min" || reduce == "amin") {
15
+ return ReductionType::MIN;
16
+ } else if (reduce == "sum") {
17
+ return ReductionType::SUM;
18
+ } else if (reduce == "prod") {
19
+ return ReductionType::PROD;
20
+ } else {
21
+ TORCH_CHECK(false, "reduce argument must be either sum, prod, mean, amax or amin, got ", reduce);
22
+ }
23
+ }
24
+
25
+ // used for `scatter_reduce`, old options for BC.
26
+ static inline ReductionType get_operator_enum(const c10::string_view reduce, bool use_new_options) {
27
+ if (use_new_options) {
28
+ return get_reduction_enum(reduce);
29
+ } else {
30
+ if (reduce == "add") {
31
+ return ReductionType::SUM;
32
+ } else if (reduce == "multiply") {
33
+ return ReductionType::PROD;
34
+ } else {
35
+ TORCH_CHECK(false, "reduce argument must be either add or multiply.")
36
+ }
37
+ }
38
+ }
39
+
40
+ } // at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Repeat.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/TensorOperators.h>
5
+
6
+ #ifndef AT_PER_OPERATOR_HEADERS
7
+ #include <ATen/Functions.h>
8
+ #else
9
+ #include <ATen/ops/empty.h>
10
+ #include <ATen/ops/empty_like.h>
11
+ #endif
12
+
13
+ namespace at::native {
14
+
15
+ template <
16
+ typename index_t,
17
+ void compute(index_t*, int64_t*, index_t*, int64_t, int64_t)>
18
+ static inline Tensor repeat_interleave_common(
19
+ const Tensor& repeats,
20
+ c10::optional<int64_t> output_size) {
21
+ TORCH_CHECK(
22
+ repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat");
23
+ TORCH_CHECK(
24
+ repeats.scalar_type() == at::kLong || repeats.scalar_type() == at::kInt,
25
+ "repeats has to be Long or Int tensor");
26
+ if (repeats.size(0) == 0) {
27
+ return at::empty_like(repeats, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
28
+ }
29
+ Tensor repeats_ = repeats.contiguous();
30
+ Tensor cumsum = repeats.cumsum(0);
31
+ int64_t total;
32
+ if (output_size.has_value()) {
33
+ total = output_size.value();
34
+ } else {
35
+ total = cumsum[-1].item<int64_t>();
36
+ TORCH_CHECK(
37
+ (repeats >= 0).all().item<uint8_t>(), "repeats can not be negative");
38
+ }
39
+
40
+ Tensor result = at::empty({total}, repeats.options());
41
+ index_t* repeat_ptr = repeats_.data_ptr<index_t>();
42
+ int64_t* cumsum_ptr = cumsum.data_ptr<int64_t>();
43
+ index_t* result_ptr = result.data_ptr<index_t>();
44
+ compute(repeat_ptr, cumsum_ptr, result_ptr, repeats.size(0), total);
45
+ return result;
46
+ }
47
+
48
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/Resize.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/ResizeCommon.h>
5
+ #include <ATen/EmptyTensor.h>
6
+ #include <ATen/TensorUtils.h>
7
+
8
+ #include <c10/core/CPUAllocator.h>
9
+
10
+ #include <utility>
11
+
12
+
13
+ namespace at::native {
14
+
15
+ // TODO: make all operations that resize given outputs use this function
16
+ // for consistency and maintainability.
17
+ // Some operations like `cat` might not be able to make the use of
18
+ // resize_output directly. For more details to understand how it works in `cat`,
19
+ // see https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
20
+ // Resizes outputs
21
+ // Functions accepting output tensors, like with the "out" kwarg, should
22
+ // call this function to handle resizing their output tensor.
23
+ // Issues a warning if the output tensor has one or more elements and
24
+ // needs resizing
25
+ // NOTE: In the future the warning will become an error
26
+ // Returns a bool saying whether or not the resize actually happened or not
27
+ TORCH_API bool resize_output(const Tensor& output, IntArrayRef shape);
28
+ // WARNING: Do NOT call this directly. If you are resizing an output and want
29
+ // to support dynamic shapes call at::resize__symint and resize_output_check_symint.
30
+ // For more details, see: https://github.com/pytorch/pytorch/pull/111530/files#r1365845272
31
+ TORCH_API bool resize_output_symint(const Tensor& output, SymIntArrayRef shape);
32
+
33
+ // Utility for resize_output
34
+ // Returns a bool saying resize should happen or not and
35
+ // raises a warning if resizing for one or more elements
36
+ TORCH_API bool resize_output_check(const Tensor& output, IntArrayRef shape);
37
+ TORCH_API bool resize_output_check_symint(const Tensor& output, SymIntArrayRef shape);
38
+
39
+ TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes);
40
+ TORCH_API void resize_bytes_meta(StorageImpl* storage, c10::SymInt size_bytes);
41
+ TORCH_API void resize_bytes_nocuda(const Storage& storage, c10::SymInt size_bytes);
42
+
43
+ static inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) {
44
+ // It does not make sense to try to resize a storage
45
+ // to hold 0 elements, and this can break
46
+ // if storage_offset is positive but
47
+ // new_size is 0, so just bail in that case
48
+ // (same comment is in cuda/Resize.h)
49
+ if (self->numel() == 0) {
50
+ return;
51
+ }
52
+
53
+ const Storage& storage = self->unsafe_storage();
54
+ if (!storage) {
55
+ auto new_storage = c10::make_intrusive<StorageImpl>(
56
+ StorageImpl::use_byte_size_t(),
57
+ new_size_bytes,
58
+ c10::GetCPUAllocator(),
59
+ true);
60
+ self->set_storage_keep_dtype(std::move(new_storage));
61
+ } else if (new_size_bytes > storage.nbytes()) {
62
+ resize_bytes_cpu(storage.unsafeGetStorageImpl(), new_size_bytes);
63
+ }
64
+ }
65
+
66
+ TORCH_API TensorImpl* resize_impl_cpu_(
67
+ TensorImpl* self,
68
+ IntArrayRef size,
69
+ at::OptionalIntArrayRef stride,
70
+ bool resize_storage = true);
71
+
72
+ template <typename T>
73
+ T maybe_convert_symint(c10::SymInt) = delete;
74
+
75
+ template <>
76
+ inline c10::SymInt maybe_convert_symint(c10::SymInt x) { return x; }
77
+
78
+ template <>
79
+ inline int64_t maybe_convert_symint(c10::SymInt x) { return x.guard_int(__FILE__, __LINE__); }
80
+
81
+ template <typename T>
82
+ static inline void checkInBoundsForStorage(
83
+ ArrayRef<T> size,
84
+ ArrayRef<T> stride,
85
+ T storage_offset,
86
+ const caffe2::TypeMeta& data_type,
87
+ const Storage& new_storage) {
88
+ T storage_size_bytes =
89
+ at::detail::computeStorageNbytes(size, stride, data_type.itemsize());
90
+ T storage_offset_bytes = storage_offset * data_type.itemsize();
91
+ if (storage_size_bytes == 0) {
92
+ // NB: (a tensor with arbitrary 0 dims)'s storage can have any numel.
93
+ return;
94
+ }
95
+ T new_storage_size_bytes = maybe_convert_symint<T>(new_storage.sym_nbytes());
96
+ TORCH_CHECK(
97
+ storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes,
98
+ "setStorage: sizes ",
99
+ size,
100
+ ", strides ",
101
+ stride,
102
+ ","
103
+ " storage offset ",
104
+ storage_offset,
105
+ ", and itemsize ",
106
+ data_type.itemsize(),
107
+ " requiring a storage size of ",
108
+ storage_size_bytes + storage_offset_bytes,
109
+ " are out of bounds for storage of size ",
110
+ new_storage_size_bytes);
111
+ }
112
+
113
+ template <typename T>
114
+ static inline void checkSetStorage(Tensor& result, Storage storage, T storage_offset,
115
+ ArrayRef<T> size, ArrayRef<T> stride) {
116
+ // FIXME: stride should be optional
117
+ if (stride.data()) {
118
+ TORCH_CHECK(size.size() == stride.size(), "unequal size length (", size.size(),
119
+ ") and stride length (", stride.size(), ")");
120
+ }
121
+
122
+ #ifdef DEBUG
123
+ TORCH_CHECK(size.size() <= INT_MAX, "size length (", size.size(), ") greater than INT_MAX");
124
+ #endif
125
+
126
+ // storage: note this can't be replaced with result.set_(storage) as the semantics of that
127
+ // function is to set the tensor size to be equal to the size of the storage.
128
+ if (!result.storage().is_alias_of(storage)) {
129
+ // Caffe2 might have tensors whose storages are null, but we
130
+ // don't allow it in PyTorch.
131
+ TORCH_INTERNAL_ASSERT(storage);
132
+ TORCH_INTERNAL_ASSERT(result.storage());
133
+
134
+ // We used to allow this, but this breaks device caching.
135
+ // Let's put an actual error message for this one.
136
+ TORCH_CHECK(result.storage().device() == storage.device(),
137
+ "Attempted to set the storage of a tensor on device \"", result.storage().device(),
138
+ "\" to a storage on different device \"", storage.device(),
139
+ "\". This is no longer allowed; the devices must match.");
140
+ result.unsafeGetTensorImpl()->set_storage_keep_dtype(std::move(storage));
141
+ }
142
+
143
+ // storageOffset
144
+ TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
145
+ }
146
+
147
+ /**
148
+ * Set self's sizes, strides, and storage_offset.
149
+ * (size, stride, storage_offset) must be in bounds for self's storage.
150
+ */
151
+ template <typename T>
152
+ inline void setStrided(
153
+ const Tensor& self,
154
+ ArrayRef<T> size,
155
+ ArrayRef<T> stride,
156
+ T storage_offset) {
157
+ TORCH_CHECK(size.size() == stride.size(), "mismatch in length of strides and shape");
158
+ for (const auto& val : stride) {
159
+ TORCH_CHECK(val >= 0,
160
+ "as_strided: Negative strides are not supported at the moment, "
161
+ "got strides: ", stride);
162
+ }
163
+
164
+ auto* self_ = self.unsafeGetTensorImpl();
165
+ checkInBoundsForStorage(
166
+ size, stride, storage_offset, self_->dtype(), self_->storage());
167
+
168
+ /* storage offset */
169
+ TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
170
+ self_->set_sizes_and_strides(size, stride, c10::make_optional(storage_offset));
171
+ }
172
+
173
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ResizeCommon.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/native/TensorFactories.h>
5
+ #include <ATen/NamedTensorUtils.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #ifndef AT_PER_OPERATOR_HEADERS
9
+ #include <ATen/NativeFunctions.h>
10
+ #else
11
+ #include <ATen/ops/empty.h>
12
+ #endif
13
+
14
+ namespace at::native {
15
+
16
+ template <typename T>
17
+ inline T storage_size_for(ArrayRef<T> size, ArrayRef<T> stride) {
18
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(),
19
+ "storage_size_for(size, stride) requires that size and stride ",
20
+ "have the same size as a precondition.");
21
+ T storage_size = 1;
22
+ for (const auto dim : c10::irange(size.size())) {
23
+ if (size[dim] == 0) {
24
+ storage_size = 0;
25
+ break;
26
+ }
27
+ storage_size += (size[dim] - 1) * stride[dim];
28
+ }
29
+ return storage_size;
30
+ }
31
+
32
+ inline const Tensor& resize_named_tensor_(
33
+ const Tensor& self,
34
+ IntArrayRef size,
35
+ c10::optional<MemoryFormat> optional_memory_format) {
36
+ TORCH_INTERNAL_ASSERT(self.has_names());
37
+ TORCH_CHECK(
38
+ self.sizes() == size,
39
+ "Cannot resize named tensor with resize_ or resize_as_ (tried to resize "
40
+ "Tensor",
41
+ self.names(),
42
+ " with size ",
43
+ self.sizes(),
44
+ " to ",
45
+ size,
46
+ "). This may be caused by passing a named tensor ",
47
+ "as an `out=` argument; please ensure that the sizes are the same. ");
48
+ TORCH_CHECK(
49
+ !optional_memory_format.has_value(),
50
+ "Unsupported memory format for named tensor resize ",
51
+ optional_memory_format.value());
52
+ return self;
53
+ }
54
+
55
+ // For deterministic output, fill new elements that were added after a storage
56
+ // resize with NaN or MAX_INT. `old_storage_nbytes` is the size of the storage
57
+ // before the resize happened.
58
+ inline const Tensor& fill_resize_deterministic_(const Tensor& tensor, int64_t old_storage_nbytes) {
59
+ const at::Storage& storage = tensor.unsafeGetTensorImpl()->unsafe_storage();
60
+ int64_t new_storage_nbytes = storage.nbytes();
61
+ int64_t old_storage_numel = old_storage_nbytes / tensor.itemsize();
62
+ int64_t new_storage_numel = new_storage_nbytes / tensor.itemsize();
63
+ if (new_storage_numel > old_storage_numel) {
64
+ at::Tensor tensor_view = at::empty({}, at::TensorOptions().dtype(tensor.scalar_type()).device(tensor.device()));
65
+ tensor_view.set_(
66
+ storage,
67
+ /*storage_offset=*/old_storage_numel,
68
+ /*size=*/{new_storage_numel - old_storage_numel},
69
+ /*stride=*/{1});
70
+ at::native::fill_empty_deterministic_(tensor_view);
71
+ }
72
+ return tensor;
73
+ }
74
+
75
+ } // namespace at::native
venv/lib/python3.10/site-packages/torch/include/ATen/native/ScatterGatherChecks.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+ #include <ATen/core/Tensor.h>
5
+ #include <ATen/native/ReduceOpsUtils.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at::native {
9
+
10
+ namespace {
11
+
12
+ // checks whether index.dtype == int64
13
+ // and self.dtype == src.dtype if src is a Tensor
14
+ static void scatter_gather_dtype_check(
15
+ const std::string& method_name,
16
+ const Tensor& self,
17
+ const Tensor& index,
18
+ const c10::optional<Tensor>& src_opt = c10::nullopt
19
+ ) {
20
+ if (index.numel() != 0) {
21
+ TORCH_CHECK(
22
+ index.scalar_type() == at::ScalarType::Long,
23
+ method_name, "(): Expected dtype int64 for index"
24
+ );
25
+ }
26
+
27
+ if (src_opt.has_value()) {
28
+ const auto& src = src_opt.value();
29
+ TORCH_CHECK(
30
+ self.scalar_type() == src.scalar_type(),
31
+ method_name, "(): Expected self.dtype to be equal to src.dtype"
32
+ );
33
+ }
34
+ }
35
+
36
+ // Used for `gather`-like methods
37
+ // Note: self means the input tensor here
38
+ // Test:
39
+ // 1. index.size(d) <= self.size(d) for all d != dim
40
+ // 2. index.dim() == self.dim()
41
+ static C10_UNUSED void gather_shape_check(const Tensor& self, int64_t dim,
42
+ const Tensor& index
43
+ ) {
44
+ auto self_dims = ensure_nonempty_dim(self.dim());
45
+ TORCH_CHECK(self_dims == ensure_nonempty_dim(index.dim()),
46
+ "Index tensor must have the same number of dimensions as input tensor"
47
+ );
48
+
49
+ for (const auto i : c10::irange(self_dims)) {
50
+ if (i != dim) {
51
+ TORCH_CHECK(
52
+ ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
53
+ "Size does not match at dimension ", i,
54
+ " expected index ", index.sizes(),
55
+ " to be smaller than self ", self.sizes(),
56
+ " apart from dimension ", dim
57
+ );
58
+ }
59
+ }
60
+ }
61
+
62
+ // Used for `scatter` and `scatter_add`
63
+ // Tests:
64
+ // 1. index.size(d) <= self.size(d) for all d != dim
65
+ // 2. index.size(d) <= src.size(d) for all d if src is a Tensor
66
+ // 3. index.dim() == self.dim() == src.dim()
67
+ static C10_UNUSED void scatter_shape_check(
68
+ const Tensor& self, int64_t dim, const Tensor& index,
69
+ const c10::optional<Tensor>& src_opt = c10::nullopt
70
+ ) {
71
+ if (index.numel() == 0) return;
72
+ TORCH_CHECK(
73
+ ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
74
+ "Index tensor must have the same number of dimensions as self tensor"
75
+ );
76
+
77
+ bool is_wrong_shape = false;
78
+ int64_t self_dims = ensure_nonempty_dim(self.dim());
79
+
80
+ // Check: index.size(d) <= self.size(d) for all d != dim
81
+ for (const auto d : c10::irange(self_dims)) {
82
+ int64_t index_d_size = ensure_nonempty_size(index, d);
83
+ if (d == dim) continue;
84
+ if (index_d_size > ensure_nonempty_size(self, d)) {
85
+ is_wrong_shape = true;
86
+ break;
87
+ }
88
+ }
89
+
90
+ // Check: index.size(d) <= src.size(d) for all d if src is Tensor
91
+ if (!is_wrong_shape && src_opt.has_value()) {
92
+ const auto& src = src_opt.value();
93
+ for (const auto d : c10::irange(self_dims)) {
94
+ int64_t index_d_size = ensure_nonempty_size(index, d);
95
+ if (index_d_size > ensure_nonempty_size(src, d)) {
96
+ is_wrong_shape = true;
97
+ break;
98
+ }
99
+ }
100
+ }
101
+
102
+ if (src_opt.has_value()) {
103
+ const auto& src = src_opt.value();
104
+
105
+ TORCH_CHECK(
106
+ ensure_nonempty_dim(src.dim()) == ensure_nonempty_dim(index.dim()),
107
+ "Index tensor must have the same number of dimensions as src tensor"
108
+ );
109
+
110
+ TORCH_CHECK(!is_wrong_shape,
111
+ "Expected index ", index.sizes(),
112
+ " to be smaller than self ", self.sizes(),
113
+ " apart from dimension ", dim,
114
+ " and to be smaller size than src ", src.sizes()
115
+ );
116
+ }
117
+ else {
118
+ TORCH_CHECK(!is_wrong_shape,
119
+ "Expected index ", index.sizes(),
120
+ " to be smaller than self ", self.sizes(),
121
+ " apart from dimension ", dim
122
+ );
123
+ }
124
+ }
125
+
126
+ } // anonymous namespace
127
+
128
+ } // namespace at::native