applied-ai-018 commited on
Commit
baa5d99
·
verified ·
1 Parent(s): 7a7f770

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/14.attention.dense.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  7. ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  9. ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/fp32.pt +3 -0
  10. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_utils.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ops_handler.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_case.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py +0 -0
  33. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cpu.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cuda.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/multi_kernel.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_split_scan.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/implementation.cpp +87 -0
  47. venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp +354 -0
  48. venv/lib/python3.10/site-packages/torch/_inductor/codegen/common.py +1755 -0
  49. venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py +0 -0
  50. venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h +595 -0
ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:672ac62007f4d5b0094db89b6e378aea5f0ed8e3656d9c3d2d2fb43dd1419e4f
3
+ size 16778396
ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae792e91631277c80ce78041c1de2fcff0d3d24293cc53b5035888196cdda01
3
+ size 16778411
ckpts/universal/global_step120/zero/14.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ff19c396df8c65a9592af0c9cd970fbea74a2dccfdd2c23bcc17957de73054d
3
+ size 16778317
ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a8754c2fd57a65223af79547ac66b9a1539edcef28da71f44e191e1ff863e8
3
+ size 9387
ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7acc58c0f2d1cb173b4b9b538fca10594ccbdb3b92d1a6fe3e69ff1f7c739b0
3
+ size 9293
ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2469365250bfaaa7c552d61f32d3d934aab0c1767a61d168e6dc14e3a24b3f67
3
+ size 33555612
ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93d5b79cf17f99252750835ee71067869d97b7acffd549659b745bfe0d76c747
3
+ size 33555627
ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f82ef81749678d000d44b89893a9f52f8d0a81e7b6293a47cb3b25859fdbc2
3
+ size 50332843
ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bf05127491c52147ca09dff39178f56ad06f239a3d15a6f6713f43488f73017
3
+ size 50332749
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc ADDED
Binary file (5.15 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc ADDED
Binary file (7.26 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_utils.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc ADDED
Binary file (9.31 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc ADDED
Binary file (223 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc ADDED
Binary file (9.84 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ops_handler.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc ADDED
Binary file (489 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (77 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc ADDED
Binary file (33.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc ADDED
Binary file (21.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_case.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc ADDED
Binary file (7.93 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc ADDED
Binary file (33.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc ADDED
Binary file (45.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc ADDED
Binary file (51.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc ADDED
Binary file (119 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cpu.cpython-310.pyc ADDED
Binary file (54.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cuda.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc ADDED
Binary file (3.37 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc ADDED
Binary file (26.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/multi_kernel.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc ADDED
Binary file (113 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc ADDED
Binary file (7.96 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_split_scan.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc ADDED
Binary file (3.56 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc ADDED
Binary file (50.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/implementation.cpp ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // NOTE: Like interface.cpp, this file will be copied into AOTInductor
2
+ // generated output. This file is intended to keep implementation
3
+ // details separate from the implementation of the AOTI public
4
+ // interface. Note also that #includes should go into interface.cpp
5
+ // for simplicity of maintenance.
6
+
7
+ namespace torch {
8
+ namespace aot_inductor {
9
+ template <typename T>
10
+ void convert_output_to_handle(
11
+ const ArrayRefTensor<T>& output,
12
+ AtenTensorHandle& handle) {
13
+ handle = output.expensiveCopyToTensor();
14
+ }
15
+
16
+ template <typename... Ts, std::size_t... Is>
17
+ void convert_outputs_to_handles_helper(
18
+ const std::tuple<ArrayRefTensor<Ts>...>& outputs,
19
+ AtenTensorHandle* output_handles,
20
+ std::index_sequence<Is...>) {
21
+ (convert_output_to_handle(std::get<Is>(outputs), output_handles[Is]), ...);
22
+ }
23
+ template <typename... Ts>
24
+ void convert_outputs_to_handles(
25
+ const std::tuple<ArrayRefTensor<Ts>...>& outputs,
26
+ AtenTensorHandle* output_handles) {
27
+ convert_outputs_to_handles_helper(
28
+ outputs, output_handles, std::make_index_sequence<sizeof...(Ts)>());
29
+ }
30
+
31
+ template <typename T>
32
+ void convert_handle_to_arrayref_tensor(
33
+ AtenTensorHandle handle,
34
+ ArrayRefTensor<T>& input) {
35
+ void* data_ptr;
36
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(handle, &data_ptr));
37
+ int64_t dim;
38
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_dim(handle, &dim));
39
+ int64_t numel;
40
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_numel(handle, &numel));
41
+ int64_t* sizes;
42
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes(handle, &sizes));
43
+ int64_t* strides;
44
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(handle, &strides));
45
+ int32_t dtype;
46
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(handle, &dtype));
47
+ int32_t device_type;
48
+ AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_device_type(handle, &device_type));
49
+ int32_t device_index;
50
+ AOTI_TORCH_ERROR_CODE_CHECK(
51
+ aoti_torch_get_device_index(handle, &device_index));
52
+
53
+ input = ArrayRefTensor<T>(
54
+ MiniArrayRef<T>(reinterpret_cast<T*>(data_ptr), numel),
55
+ MiniArrayRef<const int64_t>(sizes, dim),
56
+ MiniArrayRef<const int64_t>(strides, dim),
57
+ device_type,
58
+ device_index);
59
+ }
60
+
61
+ template <typename... Ts, std::size_t... Is>
62
+ void convert_handles_to_inputs_helper(
63
+ AtenTensorHandle* input_handles,
64
+ std::tuple<ArrayRefTensor<Ts>...>& inputs,
65
+ std::index_sequence<Is...>) {
66
+ (convert_handle_to_arrayref_tensor(input_handles[Is], std::get<Is>(inputs)),
67
+ ...);
68
+ }
69
+
70
+ template <typename... Ts>
71
+ void convert_handles_to_inputs(
72
+ AtenTensorHandle* input_handles,
73
+ std::tuple<ArrayRefTensor<Ts>...>& inputs) {
74
+ convert_handles_to_inputs_helper(
75
+ input_handles, inputs, std::make_index_sequence<sizeof...(Ts)>());
76
+ }
77
+
78
+ template <typename T>
79
+ void assert_numel(const ArrayRefTensor<T>& tensor, int64_t numel) {
80
+ if (tensor.numel() != numel) {
81
+ std::stringstream err;
82
+ err << "incorrect numel for input tensor. expected " << numel << ", got " << tensor.numel();
83
+ throw std::runtime_error(err.str());
84
+ }
85
+ }
86
+ } // namespace aot_inductor
87
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/csrc/inductor/aoti_runtime/arrayref_tensor.h>
2
+ #include <torch/csrc/inductor/aoti_runtime/interface.h>
3
+ #include <torch/csrc/inductor/aoti_runtime/model_container.h>
4
+ #include <torch/csrc/inductor/aoti_runtime/scalar_to_tensor.h>
5
+ #include <torch/csrc/inductor/aoti_runtime/thread_local.h>
6
+
7
+ #include <iostream>
8
+ #include <sstream>
9
+ #include <stdexcept>
10
+ #include <vector>
11
+
12
+ #define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \
13
+ try { \
14
+ __VA_ARGS__ \
15
+ } catch (const std::exception& e) { \
16
+ std::cerr << "Error: " << e.what() << std::endl; \
17
+ return AOTI_RUNTIME_FAILURE; \
18
+ } catch (...) { \
19
+ std::cerr << "Unknown exception occurred." << std::endl; \
20
+ return AOTI_RUNTIME_FAILURE; \
21
+ } \
22
+ return AOTI_RUNTIME_SUCCESS;
23
+
24
+ #define AOTI_VECTOR_SIZE_CHECK(actual_size, expected_size, name) \
25
+ do { \
26
+ AOTI_RUNTIME_CHECK( \
27
+ actual_size == expected_size, \
28
+ "expected " + std::string(name) + " vector size to be " + \
29
+ std::to_string(expected_size) + ", but got " + \
30
+ std::to_string(actual_size)); \
31
+ } while (0)
32
+
33
+ // AOTInductor uses at::addmm_out, which doesn't supports
34
+ // arguments that requires gradient. For this reason, we
35
+ // enforce no_grad context for run APIs.
36
+ //
37
+ // A RAII, thread local (!) guard that enables or disables grad mode upon
38
+ // construction, and sets it back to the original value upon destruction.
39
+ struct AOTINoGradGuard {
40
+ AOTINoGradGuard() : prev_mode(aoti_torch_grad_mode_is_enabled()) {
41
+ aoti_torch_grad_mode_set_enabled(false);
42
+ }
43
+ ~AOTINoGradGuard() {
44
+ aoti_torch_grad_mode_set_enabled(prev_mode);
45
+ }
46
+ bool prev_mode;
47
+ };
48
+
49
+ extern "C" {
50
+
51
+ AOTIRuntimeError AOTInductorModelContainerCreate(
52
+ AOTInductorModelContainerHandle* container_handle,
53
+ size_t num_models,
54
+ bool is_cpu,
55
+ const char* cubin_dir) {
56
+ return AOTInductorModelContainerCreateWithDevice(
57
+ container_handle,
58
+ num_models,
59
+ is_cpu ? "cpu" : "cuda",
60
+ cubin_dir);
61
+ }
62
+
63
+ AOTIRuntimeError AOTInductorModelContainerCreateWithDevice(
64
+ AOTInductorModelContainerHandle* container_handle,
65
+ size_t num_models,
66
+ const char* device_str,
67
+ const char* cubin_dir) {
68
+ if (num_models == 0) {
69
+ std::cerr << "Error: num_models must be positive, but got 0" << std::endl;
70
+ return AOTI_RUNTIME_FAILURE;
71
+ }
72
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
73
+ std::optional<std::string> cubin_dir_opt;
74
+ if (cubin_dir != nullptr) {
75
+ cubin_dir_opt.emplace(cubin_dir);
76
+ }
77
+ auto* container = new torch::aot_inductor::AOTInductorModelContainer(
78
+ num_models, std::string(device_str), cubin_dir_opt);
79
+ *container_handle =
80
+ reinterpret_cast<AOTInductorModelContainerHandle>(container);
81
+ })
82
+ }
83
+
84
+ AOTIRuntimeError AOTInductorModelContainerDelete(
85
+ AOTInductorModelContainerHandle container_handle) {
86
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
87
+ auto* container =
88
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
89
+ container_handle);
90
+ delete container;
91
+ });
92
+ }
93
+
94
+ AOTIRuntimeError AOTInductorModelContainerRun(
95
+ AOTInductorModelContainerHandle container_handle,
96
+ AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles
97
+ // are stolen; the array itself is borrowed
98
+ size_t num_inputs,
99
+ AtenTensorHandle*
100
+ output_handles, // array for writing output AtenTensorHandle; handles
101
+ // will be stolen by the caller; the array itself is
102
+ // borrowed
103
+ size_t num_outputs,
104
+ AOTInductorStreamHandle stream_handle,
105
+ AOTIProxyExecutorHandle proxy_executor_handle) {
106
+ auto* container =
107
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
108
+ container_handle);
109
+ AOTI_VECTOR_SIZE_CHECK(num_inputs, container->num_inputs(), "inputs");
110
+ AOTI_VECTOR_SIZE_CHECK(num_outputs, container->num_outputs(), "outputs");
111
+
112
+ auto stream =
113
+ reinterpret_cast<torch::aot_inductor::DeviceStreamType>(stream_handle);
114
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
115
+ AOTINoGradGuard guard;
116
+ container->run(
117
+ input_handles, output_handles, stream, proxy_executor_handle);
118
+ })
119
+ }
120
+
121
+ AOTIRuntimeError AOTInductorModelContainerGetNumConstants(
122
+ AOTInductorModelContainerHandle container_handle,
123
+ size_t* num_constants) {
124
+ auto* container =
125
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
126
+ container_handle);
127
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
128
+ { *num_constants = container->num_constants(); })
129
+ }
130
+
131
+ AOTIRuntimeError AOTInductorModelContainerGetConstantName(
132
+ AOTInductorModelContainerHandle container_handle,
133
+ size_t idx,
134
+ const char** name) {
135
+ auto* container =
136
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
137
+ container_handle);
138
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
139
+ { *name = container->constant_name(idx); })
140
+ }
141
+
142
+ AOTIRuntimeError AOTInductorModelContainerGetConstantOriginalFQN(
143
+ AOTInductorModelContainerHandle container_handle,
144
+ size_t idx,
145
+ const char** original_fqn) {
146
+ auto* container =
147
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
148
+ container_handle);
149
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
150
+ { *original_fqn = container->constant_original_fqn(idx); })
151
+ }
152
+
153
+ AOTIRuntimeError AOTInductorModelContainerGetConstantFromFolded(
154
+ AOTInductorModelContainerHandle container_handle,
155
+ size_t idx,
156
+ bool* from_folded) {
157
+ auto* container =
158
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(container_handle);
159
+ CONVERT_EXCEPTION_TO_ERROR_CODE({ *from_folded = container->constant_from_folded(idx); })
160
+ }
161
+
162
+ AOTIRuntimeError AOTInductorModelContainerGetConstantDtype(
163
+ AOTInductorModelContainerHandle container_handle,
164
+ size_t idx,
165
+ int32_t* dtype) {
166
+ auto* container =
167
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
168
+ container_handle);
169
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
170
+ { *dtype = container->constant_dtype(idx); })
171
+ }
172
+
173
+ AOTIRuntimeError AOTInductorModelContainerUpdateConstantBuffer(
174
+ AOTInductorModelContainerHandle container_handle,
175
+ AOTInductorConstantMapHandle constant_map_handle,
176
+ bool use_inactive,
177
+ bool validate_full_update) {
178
+ auto* container =
179
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
180
+ container_handle);
181
+ auto input_map = reinterpret_cast<std::unordered_map<std::string, AtenTensorHandle>*>(constant_map_handle);
182
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
183
+ container->update_constant_buffer(
184
+ *input_map, use_inactive, validate_full_update);
185
+ })
186
+ }
187
+
188
+ AOTIRuntimeError AOTInductorModelContainerUpdateInactiveConstantBuffer(
189
+ AOTInductorModelContainerHandle container_handle,
190
+ AOTInductorConstantMapHandle constant_map_handle) {
191
+ return AOTInductorModelContainerUpdateConstantBuffer(container_handle,
192
+ constant_map_handle,
193
+ /*use_inactive*/ true,
194
+ /*validate_full_update*/ true);
195
+ }
196
+
197
+ AOTIRuntimeError AOTInductorModelContainerRunConstantFolding(
198
+ AOTInductorModelContainerHandle container_handle,
199
+ bool use_inactive,
200
+ AOTInductorStreamHandle stream_handle,
201
+ AOTIProxyExecutorHandle proxy_executor_handle) {
202
+ auto* container =
203
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
204
+ container_handle);
205
+ auto stream =
206
+ reinterpret_cast<torch::aot_inductor::DeviceStreamType>(stream_handle);
207
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
208
+ AOTINoGradGuard guard;
209
+ container->run_const_fold(use_inactive, stream, proxy_executor_handle);
210
+ })
211
+ }
212
+
213
+ AOTIRuntimeError AOTInductorModelContainerSwapConstantBuffer(
214
+ AOTInductorModelContainerHandle container_handle) {
215
+ auto* container =
216
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
217
+ container_handle);
218
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
219
+ container->swap_constant_buffer();
220
+ })
221
+ }
222
+
223
+ AOTIRuntimeError AOTInductorModelContainerGetNumInputs(
224
+ AOTInductorModelContainerHandle container_handle,
225
+ size_t* ret_num_inputs) {
226
+ auto* container =
227
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
228
+ container_handle);
229
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
230
+ { *ret_num_inputs = container->num_inputs(); })
231
+ }
232
+
233
+ AOTIRuntimeError AOTInductorModelContainerGetInputName(
234
+ AOTInductorModelContainerHandle container_handle,
235
+ size_t input_idx,
236
+ const char** ret_input_names) {
237
+ auto* container =
238
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
239
+ container_handle);
240
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
241
+ { *ret_input_names = container->input_name(input_idx); })
242
+ }
243
+
244
+ AOTIRuntimeError AOTInductorModelContainerGetNumOutputs(
245
+ AOTInductorModelContainerHandle container_handle,
246
+ size_t* ret_num_outputs) {
247
+ auto* container =
248
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
249
+ container_handle);
250
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
251
+ { *ret_num_outputs = container->num_outputs(); })
252
+ }
253
+
254
+ AOTIRuntimeError AOTInductorModelContainerGetOutputName(
255
+ AOTInductorModelContainerHandle container_handle,
256
+ size_t output_idx,
257
+ const char** ret_output_names) {
258
+ auto* container =
259
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
260
+ container_handle);
261
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
262
+ { *ret_output_names = container->output_name(output_idx); })
263
+ }
264
+
265
+ AOTIRuntimeError AOTInductorModelContainerGetCallSpec(
266
+ AOTInductorModelContainerHandle container_handle,
267
+ const char** in_spec,
268
+ const char** out_spec) {
269
+ auto* container =
270
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
271
+ container_handle);
272
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
273
+ *in_spec = container->get_in_spec();
274
+ *out_spec = container->get_out_spec();
275
+ })
276
+ }
277
+
278
+ AOTIRuntimeError AOTInductorModelCreate(
279
+ AOTInductorModelHandle* model_handle,
280
+ AOTInductorConstantMapHandle constant_map_handle){
281
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
282
+ auto constant_map = std::make_shared<torch::aot_inductor::ConstantMap>();
283
+ auto constant_array = std::make_shared<std::vector<torch::aot_inductor::ConstantHandle>>();
284
+ auto input_map = reinterpret_cast<std::unordered_map<std::string, AtenTensorHandle>*>(constant_map_handle);
285
+
286
+ auto model = new torch::aot_inductor::AOTInductorModel(
287
+ constant_map,
288
+ constant_array,
289
+ "cpu", // device_str is hardcoded, as AOTInductorModelCreate is only use for CPU models
290
+ ""
291
+ );
292
+
293
+ if (input_map) {
294
+ for (auto const& kv : *input_map) {
295
+ constant_map->emplace(kv.first, kv.second);
296
+ }
297
+ } else {
298
+ model->load_constants();
299
+ }
300
+
301
+ *model_handle = reinterpret_cast<AOTInductorModelHandle>(model);
302
+ })}
303
+
304
+ AOTIRuntimeError AOTInductorModelRun(
305
+ AOTInductorModelHandle model_handle,
306
+ AtenTensorHandle* input_handles,
307
+ AtenTensorHandle* output_handles) {
308
+ auto model =
309
+ reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
310
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
311
+ AOTINoGradGuard guard;
312
+ model->run_impl(
313
+ input_handles,
314
+ output_handles,
315
+ (torch::aot_inductor::DeviceStreamType) nullptr,
316
+ nullptr);
317
+ })
318
+ }
319
+
320
+ AOTIRuntimeError AOTInductorModelDelete(AOTInductorModelHandle model_handle){
321
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
322
+ auto model = reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(
323
+ model_handle);
324
+ delete model;
325
+ })}
326
+
327
+ AOTIRuntimeError AOTInductorModelGetNumOutputs(
328
+ AOTInductorModelHandle model_handle,
329
+ size_t* ret_num_outputs) {
330
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
331
+ auto model = reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
332
+ *ret_num_outputs = model->num_outputs();
333
+ })
334
+ }
335
+
336
+ AOTIRuntimeError AOTInductorModelUpdateConstantsMap(
337
+ AOTInductorModelHandle model_handle,
338
+ AOTInductorConstantMapHandle constant_map_handle) {
339
+ auto model =
340
+ reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
341
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
342
+ auto constant_map = std::make_shared<torch::aot_inductor::ConstantMap>();
343
+ auto input_map =
344
+ reinterpret_cast<std::unordered_map<std::string, AtenTensorHandle>*>(
345
+ constant_map_handle);
346
+
347
+ for (auto const& kv : *input_map) {
348
+ constant_map->emplace(kv.first, kv.second);
349
+ }
350
+ model->update_constants_map(std::move(constant_map));
351
+ })
352
+ }
353
+
354
+ } // extern "C"
venv/lib/python3.10/site-packages/torch/_inductor/codegen/common.py ADDED
@@ -0,0 +1,1755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import dataclasses
3
+ import functools
4
+ import itertools
5
+ import logging
6
+ import operator
7
+ import re
8
+ from itertools import chain
9
+ from typing import (
10
+ Any,
11
+ Callable,
12
+ ClassVar,
13
+ Dict,
14
+ List,
15
+ NamedTuple,
16
+ Optional,
17
+ Set,
18
+ Tuple,
19
+ TYPE_CHECKING,
20
+ Union,
21
+ )
22
+
23
+ import sympy
24
+ from sympy.printing.printer import Printer
25
+
26
+ import torch
27
+ import torch.fx
28
+ from torch._prims_common import ELEMENTWISE_TYPE_PROMOTION_KIND
29
+ from torch.utils import _pytree as pytree
30
+ from torch.utils._sympy.value_ranges import ValueRanges
31
+
32
+ from .. import config, metrics
33
+ from ..utils import (
34
+ DeferredLineBase,
35
+ do_bench,
36
+ free_symbol_startswith,
37
+ IndentedBuffer,
38
+ sympy_dot,
39
+ sympy_index_symbol,
40
+ sympy_subs,
41
+ unique,
42
+ )
43
+ from ..virtualized import ops, OpsHandler, OpsValue, ReductionType, StoreMode, V
44
+
45
+ if TYPE_CHECKING:
46
+ from ..ir import TensorBox
47
+
48
+ schedule_log = torch._logging.getArtifactLogger(__name__, "schedule")
49
+
50
+
51
+ def data_type_logger(msg):
52
+ if schedule_log.isEnabledFor(logging.DEBUG):
53
+ schedule_log.debug("Data type propagation: %s", msg)
54
+
55
+
56
+ @dataclasses.dataclass
57
+ class WorkspaceArg:
58
+ """A temporary buffer used for a single kernel, then discarded.
59
+
60
+ Not registered as a traditional buffer since there are no users,
61
+ so it would be dead code eliminated.
62
+ """
63
+
64
+ nbytes: sympy.Expr
65
+ zero_fill: bool
66
+
67
+
68
+ @dataclasses.dataclass
69
+ class TensorArg:
70
+ name: str
71
+ buffer: str
72
+ dtype: torch.dtype
73
+ offset: sympy.Expr = sympy.Integer(0)
74
+
75
+
76
+ @dataclasses.dataclass
77
+ class SizeArg:
78
+ name: str
79
+ expr: sympy.Expr
80
+
81
+
82
+ @dataclasses.dataclass
83
+ class DeviceCodegen:
84
+ scheduling: type
85
+ wrapper_codegen: type
86
+
87
+
88
+ KernelArgType = Union[WorkspaceArg, TensorArg, SizeArg]
89
+
90
+ device_codegens: Dict[str, DeviceCodegen] = {}
91
+
92
+
93
+ class DeviceOpOverrides:
94
+ def import_get_raw_stream_as(self, name):
95
+ raise NotImplementedError()
96
+
97
+ def set_device(self, device_idx):
98
+ raise NotImplementedError()
99
+
100
+ def synchronize(self):
101
+ raise NotImplementedError()
102
+
103
+ def device_guard(self, device_idx):
104
+ raise NotImplementedError()
105
+
106
+
107
+ device_op_overrides_dict: Dict[str, DeviceOpOverrides] = {}
108
+
109
+
110
+ # The code generated by Inductor consists of two main parts: kernel code and wrapper code.
111
+ # For any new backend looking to integrate with Inductor, customization of these two main
112
+ # parts are necessary to generate its specific code.
113
+ #
114
+ # Kernel code generation is determined by different Scheduling. Consequently, a new
115
+ # backend needs to provide a custom Scheduling for its unique kernel code generation. Currently,
116
+ # CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively.
117
+ #
118
+ # For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code
119
+ # that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen,
120
+ # and override specific member functions to create backend-specific Python wrapper code.
121
+ #
122
+ # Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part
123
+ # of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces
124
+ # provide flexibility to the backend. A backend can choose to implement these classes from scratch,
125
+ # or reuse them by extending and overriding as necessary. And Inductor provides the registration API,
126
+ # register_backend_for_device, to equip a new backend at runtime.
127
+ #
128
+ # Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces.
129
+ # This backend can be used as a reference:
130
+ # https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9
131
+ def register_backend_for_device(
132
+ device: str, device_scheduling: type, device_wrapper_codegen: type
133
+ ):
134
+ device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen)
135
+
136
+
137
+ def get_scheduling_for_device(device: str):
138
+ return device_codegens[device].scheduling if device in device_codegens else None
139
+
140
+
141
+ def get_wrapper_codegen_for_device(device: str):
142
+ return (
143
+ device_codegens[device].wrapper_codegen if device in device_codegens else None
144
+ )
145
+
146
+
147
+ def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes):
148
+ from ..ir import FlexibleLayout
149
+
150
+ # added contiguous index prevents reordering
151
+ return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))]
152
+
153
+
154
+ def register_device_op_overrides(device: str, device_op_overrides: DeviceOpOverrides):
155
+ device_op_overrides_dict[device] = device_op_overrides
156
+
157
+
158
+ def get_device_op_overrides(device: str):
159
+ assert isinstance(device, str)
160
+
161
+ if not device_op_overrides_dict.keys():
162
+ from .cuda import device_op_overrides # noqa: F401
163
+
164
+ if device in device_op_overrides_dict.keys():
165
+ return device_op_overrides_dict[device]
166
+
167
+ return DeviceOpOverrides()
168
+
169
+
170
+ @functools.lru_cache(None)
171
+ def boolean_ops():
172
+ return (
173
+ "is_inf",
174
+ "is_nan",
175
+ "bitwise_xor",
176
+ "logical_not",
177
+ "signbit",
178
+ "le",
179
+ "lt",
180
+ "ge",
181
+ "gt",
182
+ "eq",
183
+ "ne",
184
+ )
185
+
186
+
187
+ DTYPE_TO_COMPUTATION_DTYPE = {
188
+ torch.bfloat16: torch.float,
189
+ torch.float16: torch.float,
190
+ **{
191
+ dtype: dtype
192
+ for dtype in [
193
+ torch.bool,
194
+ torch.float32,
195
+ torch.float64,
196
+ torch.int8,
197
+ torch.int16,
198
+ torch.int32,
199
+ torch.int64,
200
+ torch.uint8,
201
+ torch.uint16,
202
+ torch.uint32,
203
+ torch.uint64,
204
+ ]
205
+ },
206
+ }
207
+
208
+
209
+ class DataTypePropagation:
210
+ def __init__(self, body) -> None:
211
+ self.body = body
212
+ self.graphs: Dict[Union[Callable[..., Any], str], Any] = {
213
+ "root": body.root_block.graph
214
+ }
215
+ for k, v in body.subblocks.items():
216
+ self.graphs[k] = v.graph
217
+
218
+ def deduce_node_dtype_by_inputs(self, node: torch.fx.Node):
219
+ inputs = node.all_input_nodes
220
+ input_nodes = [
221
+ n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder"
222
+ ]
223
+ if len(input_nodes) == 0:
224
+ return None
225
+
226
+ all_input_nodes_propogated = all(
227
+ OptimizationContext.key in n.meta
228
+ and n.meta[OptimizationContext.key].dtype is not None
229
+ for n in input_nodes
230
+ )
231
+ if not all_input_nodes_propogated:
232
+ return None
233
+
234
+ return functools.reduce(
235
+ torch.promote_types,
236
+ [n.meta[OptimizationContext.key].dtype for n in input_nodes],
237
+ )
238
+
239
+ def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node):
240
+ sub_graph = self.graphs[node.target]
241
+ dtype = self.propagate_graph(sub_graph)
242
+ assert dtype
243
+ return dtype
244
+
245
+ def deduce_node_dtype(self, node: torch.fx.Node):
246
+ if node.target in boolean_ops():
247
+ return torch.bool
248
+
249
+ if node.op == "placeholder":
250
+ return None
251
+
252
+ if node.target == "output":
253
+ # we can infer output node if it only have 1 arg
254
+ if len(node.args) != 1:
255
+ return None
256
+
257
+ if node.target in (
258
+ "to_dtype",
259
+ "index_expr",
260
+ ):
261
+ return node.args[-1]
262
+
263
+ if node.target in (
264
+ "rand",
265
+ "randn",
266
+ ):
267
+ return torch.float
268
+
269
+ if node.target in (
270
+ "get_index",
271
+ "index_expr",
272
+ ):
273
+ return torch.int64
274
+
275
+ if node.target in (
276
+ "load",
277
+ "store",
278
+ "store_reduction",
279
+ ):
280
+ buf_name = node.args[1]
281
+ return V.graph.get_dtype(buf_name) # type: ignore[arg-type]
282
+
283
+ if node.target == operator.getitem:
284
+ return self.deduce_node_dtype(node.args[0]) # type: ignore[arg-type]
285
+
286
+ assert isinstance(node.target, str)
287
+
288
+ if node.target == "reduction":
289
+ return node.args[1]
290
+
291
+ if node.target == "constant":
292
+ return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]] # type: ignore[index]
293
+
294
+ if node.target.startswith("masked_subblock"):
295
+ return self.deduce_node_dtype_by_subgraph(node)
296
+
297
+ return self.deduce_node_dtype_by_inputs(node)
298
+
299
+ def propagate_graph(self, graph: torch.fx.Graph):
300
+ assert graph.nodes
301
+ graph_dtype = None
302
+ # For masked_subblock, we use output's dtype to represent
303
+ # the dtype of this subgraph. For other cases, graph_dtype
304
+ # might be None
305
+ for node in graph.nodes:
306
+ if OptimizationContext.key in node.meta:
307
+ opt_ctx = node.meta[OptimizationContext.key]
308
+ else:
309
+ opt_ctx = OptimizationContext()
310
+
311
+ opt_ctx.dtype = self.deduce_node_dtype(node)
312
+ node.meta[OptimizationContext.key] = opt_ctx
313
+ if node.target == "output":
314
+ graph_dtype = opt_ctx.dtype
315
+ return graph_dtype
316
+
317
+ def propagate(self):
318
+ self.propagate_graph(self.graphs["root"])
319
+
320
+ @classmethod
321
+ def propagate_loopbody(cls, body):
322
+ return cls(body).propagate()
323
+
324
+ @classmethod
325
+ def propagate_scheduler_node(cls, node):
326
+ from ..ir import LoopBody
327
+ from ..scheduler import SchedulerNode
328
+
329
+ assert isinstance(node, SchedulerNode)
330
+ assert isinstance(node._body, LoopBody)
331
+ DataTypePropagation.propagate_loopbody(node._body)
332
+
333
+
334
+ class ExprPrinter(Printer):
335
+ @staticmethod
336
+ def paren(string):
337
+ def all_in_parens(string):
338
+ if string[0] != "(" or len(string) < 2:
339
+ return False
340
+ count = 1
341
+ for i, char in enumerate(string[1:]):
342
+ if char == "(":
343
+ count += 1
344
+ elif char == ")":
345
+ count -= 1
346
+ if count == 0 and i != len(string) - 2:
347
+ return False
348
+ assert count == 0
349
+ return True
350
+
351
+ if (
352
+ isinstance(string, CSEVariable)
353
+ or re.match(r"^[a-z0-9_.]+$", string, re.I)
354
+ or re.match(r"^\([^)]*\)$", string, re.I)
355
+ or string == ""
356
+ ):
357
+ return string
358
+ # don't put extra parens for strings that are already wrapped in parens
359
+ if all_in_parens(string):
360
+ return string
361
+ return f"({string})"
362
+
363
+ def _print_Infinity(self, expr):
364
+ return "math.inf"
365
+
366
+ def _print_NegativeInfinity(self, expr):
367
+ return "-math.inf"
368
+
369
+ def _print_Relational(self, expr):
370
+ return f" {expr.rel_op} ".join(map(self.paren, map(self._print, expr.args)))
371
+
372
+ def _print_Mul(self, expr):
373
+ return "*".join(map(self.paren, map(self._print, expr.args)))
374
+
375
+ def _print_Add(self, expr):
376
+ return " + ".join(map(self.paren, map(self._print, expr.args)))
377
+
378
+ def _print_Mod(self, expr):
379
+ return " % ".join(map(self.paren, map(self._print, expr.args)))
380
+
381
+ def _print_FloorDiv(self, expr):
382
+ raise NotImplementedError(f"_print_FloorDiv not implemented for {type(self)}")
383
+
384
+ def _print_CleanDiv(self, expr):
385
+ return self._print_FloorDiv(expr)
386
+
387
+ def _print_GreaterThan(self, expr):
388
+ # GreaterThan: >=
389
+ # StrictlyGreaterThan: >
390
+ # Go figure...
391
+ return " >= ".join(map(self.paren, map(self._print, expr.args)))
392
+
393
+ def _print_align(self, expr):
394
+ assert len(expr.args) == 1
395
+ return f"align({self._print(expr.args[0])})"
396
+
397
+
398
+ class PythonPrinter(ExprPrinter):
399
+ def _print_ModularIndexing(self, expr):
400
+ x, div, mod = expr.args
401
+ x = self.paren(self.doprint(x))
402
+ div = self.paren(self.doprint(div))
403
+ mod = self.paren(self.doprint(mod))
404
+ if div != "1":
405
+ x = f"({x} // {div})"
406
+ return f"{x} % {mod}"
407
+
408
+ def _print_FloorDiv(self, expr):
409
+ x, div = expr.args
410
+ x = self.paren(self.doprint(x))
411
+ div = self.paren(self.doprint(div))
412
+ return f"({x} // {div})"
413
+
414
+ def _helper_sqrt(self, expr):
415
+ return f"math.sqrt({self._print(expr)})"
416
+
417
+ def _print_Pow(self, expr):
418
+ # Pow() confuses triton
419
+ base, exp = expr.args
420
+ # NB: Remember this is sizevar computation! You don't typically
421
+ # expect to have to do floating point computation including exponents
422
+ # in sizevar compute. Instead of adding support for floating
423
+ # point pow, you should make upstream retranslate the Sympy expression
424
+ # into Tensor expressions earlier and do that instead.
425
+ if exp == 0.5:
426
+ return self._helper_sqrt(base)
427
+ elif exp == -0.5:
428
+ return "1/" + self._helper_sqrt(base)
429
+ base = self._print(base)
430
+ assert exp == int(exp), exp
431
+ exp = int(exp)
432
+ if exp > 0:
433
+ return "*".join([self.paren(base)] * exp)
434
+ elif exp < 0:
435
+ return "1/" + self.paren("*".join([self.paren(base)] * abs(exp)))
436
+ else: # exp == 0
437
+ return "1"
438
+
439
+ def _print_floor(self, expr):
440
+ assert len(expr.args) == 1
441
+ return f"math.floor({self._print(expr.args[0])})"
442
+
443
+ def _print_ceiling(self, expr):
444
+ assert len(expr.args) == 1
445
+ return f"math.ceil({self._print(expr.args[0])})"
446
+
447
+ def _print_Abs(self, expr):
448
+ assert len(expr.args) == 1
449
+ return f"abs({self._print(expr.args[0])})"
450
+
451
+ def _print_Max(self, expr):
452
+ assert len(expr.args) >= 2
453
+ return f"max({', '.join(map(self._print, expr.args))})"
454
+
455
+ def _print_Min(self, expr):
456
+ assert len(expr.args) >= 2
457
+ return f"min({', '.join(map(self._print, expr.args))})"
458
+
459
+ def _print_cos(self, expr):
460
+ assert len(expr.args) == 1
461
+ return f"math.cos({self._print(expr.args[0])})"
462
+
463
+ def _print_cosh(self, expr):
464
+ assert len(expr.args) == 1
465
+ return f"math.cosh({self._print(expr.args[0])})"
466
+
467
+ def _print_acos(self, expr):
468
+ assert len(expr.args) == 1
469
+ return f"math.acos({self._print(expr.args[0])})"
470
+
471
+ def _print_sin(self, expr):
472
+ assert len(expr.args) == 1
473
+ return f"math.sin({self._print(expr.args[0])})"
474
+
475
+ def _print_sinh(self, expr):
476
+ assert len(expr.args) == 1
477
+ return f"math.sinh({self._print(expr.args[0])})"
478
+
479
+ def _print_asin(self, expr):
480
+ assert len(expr.args) == 1
481
+ return f"math.asin({self._print(expr.args[0])})"
482
+
483
+ def _print_tan(self, expr):
484
+ assert len(expr.args) == 1
485
+ return f"math.tan({self._print(expr.args[0])})"
486
+
487
+ def _print_tanh(self, expr):
488
+ assert len(expr.args) == 1
489
+ return f"math.tanh({self._print(expr.args[0])})"
490
+
491
+ def _print_atan(self, expr):
492
+ assert len(expr.args) == 1
493
+ return f"math.atan({self._print(expr.args[0])})"
494
+
495
+ def _print_Round(self, expr):
496
+ assert len(expr.args) == 1
497
+ return f"round({self._print(expr.args[0])})"
498
+
499
+ def _print_RoundDecimal(self, expr):
500
+ assert len(expr.args) == 2
501
+ number, ndigits = expr.args
502
+ assert isinstance(ndigits, sympy.Integer)
503
+ return f"round({self._print(number)}, {ndigits})"
504
+
505
+
506
+ class OpOverrides:
507
+ def __init__(self, parent):
508
+ super().__init__()
509
+ self._parent = parent
510
+
511
+ def __getattr__(self, item):
512
+ return getattr(self._parent, item)
513
+
514
+ @staticmethod
515
+ def identity(value):
516
+ # used to trigger cse
517
+ return value
518
+
519
+ @staticmethod
520
+ def constant(value, dtype):
521
+ return repr(value)
522
+
523
+ @staticmethod
524
+ def reciprocal(x):
525
+ return ops.truediv("1", x)
526
+
527
+ @staticmethod
528
+ def square(x):
529
+ return ops.mul(x, x)
530
+
531
+ @staticmethod
532
+ def bitwise_not(x):
533
+ return f"~{ExprPrinter.paren(x)}"
534
+
535
+ @staticmethod
536
+ def logical_not(a):
537
+ return f"{ExprPrinter.paren(a)} == 0"
538
+
539
+ @staticmethod
540
+ def bitwise_and(x, y):
541
+ return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}"
542
+
543
+ @staticmethod
544
+ def bitwise_or(x, y):
545
+ return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}"
546
+
547
+ @staticmethod
548
+ def bitwise_xor(x, y):
549
+ return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}"
550
+
551
+ @staticmethod
552
+ def bitwise_left_shift(x, y):
553
+ return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}"
554
+
555
+ @staticmethod
556
+ def bitwise_right_shift(x, y):
557
+ return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}"
558
+
559
+ @staticmethod
560
+ def remainder(a, b):
561
+ r = ops.mod(a, b)
562
+ return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r)
563
+
564
+ @staticmethod
565
+ def load_seed(name, offset):
566
+ return ops.load(name, sympy.Integer(offset))
567
+
568
+ @classmethod
569
+ def _initialize_pointwise_overrides(cls, target):
570
+ assert target in {"triton", "cpp", "cppvec"}, target
571
+
572
+ def pointwise_factory_1(impl):
573
+ def func(x):
574
+ return impl.format(x=x)
575
+
576
+ return func
577
+
578
+ def pointwise_factory_2(impl):
579
+ def func(x, y):
580
+ return impl.format(x=x, y=y)
581
+
582
+ return func
583
+
584
+ for funcname, data in pointwise_overrides_data.items():
585
+ impl = getattr(data, target)
586
+ if isinstance(impl, str):
587
+ nof_args = 2 if "{y}" in impl else 1
588
+ # extend the following dictionary with factory
589
+ # functions for a specific number of arguments as
590
+ # needed:
591
+ factory = {1: pointwise_factory_1, 2: pointwise_factory_2}[nof_args]
592
+ setattr(cls, funcname, staticmethod(factory(impl)))
593
+
594
+
595
+ @dataclasses.dataclass
596
+ class OverridesData:
597
+ name: str
598
+ cpp: str
599
+ triton: Optional[str] = None # None when not impl in libdevice/triton
600
+ cppvec: Optional[str] = None # None when not impl in aten/.../vec
601
+ type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND = (
602
+ ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
603
+ )
604
+
605
+
606
+ pointwise_overrides_data: Dict[str, OverridesData] = dict(
607
+ airy_ai=OverridesData(
608
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
609
+ cpp="airy_ai_forward({x})",
610
+ name="special_airy_ai",
611
+ ),
612
+ bessel_j0=OverridesData(
613
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
614
+ cpp="bessel_j0_forward({x})",
615
+ triton="libdevice.j0({x})",
616
+ name="special_bessel_j0",
617
+ ),
618
+ bessel_j1=OverridesData(
619
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
620
+ cpp="bessel_j1_forward({x})",
621
+ triton="libdevice.j1({x})",
622
+ name="special_bessel_j1",
623
+ ),
624
+ bessel_y0=OverridesData(
625
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
626
+ cpp="bessel_y0_forward({x})",
627
+ triton="libdevice.y0({x})",
628
+ name="special_bessel_y0",
629
+ ),
630
+ bessel_y1=OverridesData(
631
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
632
+ cpp="bessel_y1_forward({x})",
633
+ triton="libdevice.y1({x})",
634
+ name="special_bessel_y1",
635
+ ),
636
+ digamma=OverridesData(
637
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
638
+ cpp="calc_digamma({x})",
639
+ cppvec="{x}.digamma()",
640
+ name="digamma",
641
+ ),
642
+ # no cpp nor triton implementation for entr, it is defined as decomposition
643
+ # erf, erfc
644
+ erfcx=OverridesData(
645
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
646
+ cpp="calc_erfcx({x})",
647
+ triton="libdevice.erfcx({x})",
648
+ name="special_erfcx",
649
+ ),
650
+ # erfinv, exp2, expit, gammaln
651
+ igamma=OverridesData(
652
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
653
+ cpp="calc_igamma({x}, {y})",
654
+ name="igamma",
655
+ ),
656
+ igammac=OverridesData(
657
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
658
+ cpp="calc_igammac({x}, {y})",
659
+ name="igammac",
660
+ ),
661
+ gammainc=OverridesData(
662
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
663
+ cpp="calc_igamma({x}, {y})",
664
+ name="special_gammainc",
665
+ ),
666
+ gammaincc=OverridesData(
667
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
668
+ cpp="calc_igammac({x}, {y})",
669
+ name="special_gammaincc",
670
+ ),
671
+ i0=OverridesData(
672
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
673
+ cpp="calc_i0({x})",
674
+ triton="libdevice.cyl_bessel_i0({x})",
675
+ cppvec="{x}.i0()",
676
+ name="i0",
677
+ ),
678
+ i0e=OverridesData(
679
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
680
+ cpp="calc_i0e({x})",
681
+ cppvec="{x}.i0e()",
682
+ name="special_i0e",
683
+ ),
684
+ i1=OverridesData(
685
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
686
+ cpp="calc_i1({x})",
687
+ triton="libdevice.cyl_bessel_i1({x})",
688
+ name="special_i1",
689
+ ),
690
+ i1e=OverridesData(
691
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
692
+ cpp="calc_i1e({x})",
693
+ name="special_i1e",
694
+ ),
695
+ log_ndtr=OverridesData(
696
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
697
+ cpp="calc_log_ndtr({x})",
698
+ name="special_log_ndtr",
699
+ ),
700
+ # logit
701
+ modified_bessel_i0=OverridesData(
702
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
703
+ cpp="modified_bessel_i0_forward({x})",
704
+ triton="libdevice.cyl_bessel_i0({x})",
705
+ name="special_modified_bessel_i0",
706
+ ),
707
+ modified_bessel_i1=OverridesData(
708
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
709
+ cpp="modified_bessel_i1_forward({x})",
710
+ triton="libdevice.cyl_bessel_i1({x})",
711
+ name="special_modified_bessel_i1",
712
+ ),
713
+ modified_bessel_k0=OverridesData(
714
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
715
+ cpp="modified_bessel_k0_forward({x})",
716
+ name="special_modified_bessel_k0",
717
+ ),
718
+ modified_bessel_k1=OverridesData(
719
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
720
+ cpp="modified_bessel_k1_forward({x})",
721
+ name="special_modified_bessel_k1",
722
+ ),
723
+ # multigamma
724
+ ndtr=OverridesData(
725
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
726
+ cpp="calc_ndtr({x})",
727
+ name="special_ndtr",
728
+ ),
729
+ ndtri=OverridesData(
730
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
731
+ cpp="calc_ndtri({x})",
732
+ name="special_ndtri",
733
+ ),
734
+ polygamma=OverridesData(
735
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
736
+ cpp="calc_polygamma({y}, {x})",
737
+ name="polygamma",
738
+ ),
739
+ # psi - alias to digamma
740
+ # round
741
+ scaled_modified_bessel_k0=OverridesData(
742
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
743
+ cpp="scaled_modified_bessel_k0_forward({x})",
744
+ name="special_scaled_modified_bessel_k0",
745
+ ),
746
+ scaled_modified_bessel_k1=OverridesData(
747
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
748
+ cpp="scaled_modified_bessel_k1_forward({x})",
749
+ name="special_scaled_modified_bessel_k1",
750
+ ),
751
+ # sinc
752
+ spherical_bessel_j0=OverridesData(
753
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
754
+ cpp="spherical_bessel_j0_forward({x})",
755
+ name="special_spherical_bessel_j0",
756
+ ),
757
+ zeta=OverridesData(
758
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
759
+ cpp="zeta({x}, {y})",
760
+ name="special_zeta",
761
+ ),
762
+ chebyshev_polynomial_t=OverridesData(
763
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
764
+ cpp="chebyshev_polynomial_t_forward({x}, {y})",
765
+ name="special_chebyshev_polynomial_t",
766
+ ),
767
+ chebyshev_polynomial_u=OverridesData(
768
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
769
+ cpp="chebyshev_polynomial_u_forward({x}, {y})",
770
+ name="special_chebyshev_polynomial_u",
771
+ ),
772
+ chebyshev_polynomial_v=OverridesData(
773
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
774
+ cpp="chebyshev_polynomial_v_forward({x}, {y})",
775
+ name="special_chebyshev_polynomial_v",
776
+ ),
777
+ chebyshev_polynomial_w=OverridesData(
778
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
779
+ cpp="chebyshev_polynomial_w_forward({x}, {y})",
780
+ name="special_chebyshev_polynomial_w",
781
+ ),
782
+ legendre_polynomial_p=OverridesData(
783
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
784
+ cpp="legendre_polynomial_p_forward({x}, {y})",
785
+ name="special_legendre_polynomial_p",
786
+ ),
787
+ shifted_chebyshev_polynomial_t=OverridesData(
788
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
789
+ cpp="shifted_chebyshev_polynomial_t_forward({x}, {y})",
790
+ name="special_shifted_chebyshev_polynomial_t",
791
+ ),
792
+ shifted_chebyshev_polynomial_u=OverridesData(
793
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
794
+ cpp="shifted_chebyshev_polynomial_u_forward({x}, {y})",
795
+ name="special_shifted_chebyshev_polynomial_u",
796
+ ),
797
+ shifted_chebyshev_polynomial_v=OverridesData(
798
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
799
+ cpp="shifted_chebyshev_polynomial_v_forward({x}, {y})",
800
+ name="special_shifted_chebyshev_polynomial_v",
801
+ ),
802
+ shifted_chebyshev_polynomial_w=OverridesData(
803
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
804
+ cpp="shifted_chebyshev_polynomial_w_forward({x}, {y})",
805
+ name="special_shifted_chebyshev_polynomial_w",
806
+ ),
807
+ hermite_polynomial_h=OverridesData(
808
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
809
+ cpp="hermite_polynomial_h_forward({x}, {y})",
810
+ name="special_hermite_polynomial_h",
811
+ ),
812
+ hermite_polynomial_he=OverridesData(
813
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
814
+ cpp="hermite_polynomial_he_forward({x}, {y})",
815
+ name="special_hermite_polynomial_he",
816
+ ),
817
+ laguerre_polynomial_l=OverridesData(
818
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
819
+ cpp="laguerre_polynomial_l_forward({x}, {y})",
820
+ name="special_laguerre_polynomial_l",
821
+ ),
822
+ )
823
+
824
+
825
+ # Use mypy to check protocol implemented correctly
826
+ def _typecheck_OpOverrides(h: OpOverrides) -> OpsHandler[str]:
827
+ return h
828
+
829
+
830
+ class DeferredLine(DeferredLineBase):
831
+ """A line that can be 'unwritten' by adding name to V.graph.removed_buffers"""
832
+
833
+ def __init__(self, name, line):
834
+ super().__init__(line)
835
+ self.name = name
836
+ assert not isinstance(line, DeferredLineBase)
837
+
838
+ def __call__(self):
839
+ if all(
840
+ self.name not in x
841
+ for x in (
842
+ V.graph.removed_buffers,
843
+ V.kernel.removed_buffers,
844
+ V.graph.inplaced_to_remove,
845
+ V.kernel.inplaced_to_remove,
846
+ )
847
+ ):
848
+ return self.line
849
+ return None
850
+
851
+ def _new_line(self, line):
852
+ return DeferredLine(self.name, line)
853
+
854
+
855
+ class BracesBuffer(IndentedBuffer):
856
+ def indent(self, offset=1):
857
+ @contextlib.contextmanager
858
+ def ctx():
859
+ for _ in range(offset):
860
+ self.writeline("{")
861
+ self._indent += 1
862
+ for _ in range(-offset):
863
+ self._indent -= 1
864
+ self.writeline("}")
865
+ yield
866
+ for _ in range(-offset):
867
+ self.writeline("{")
868
+ self._indent += 1
869
+ for _ in range(offset):
870
+ self._indent -= 1
871
+ self.writeline("}")
872
+
873
+ return ctx()
874
+
875
+
876
+ class InplacedBuffer(NamedTuple):
877
+ inner_name: str
878
+ other_names: List[str]
879
+
880
+
881
+ class KernelArgs:
882
+ @staticmethod
883
+ def _lookup(prefix, odict, name):
884
+ assert isinstance(name, (str, sympy.Symbol))
885
+ if name not in odict:
886
+ odict[name] = f"{prefix}{len(odict)}"
887
+ return odict[name]
888
+
889
+ def __init__(self, sizevars=None):
890
+ self.input_buffers = dict()
891
+ self.output_buffers = dict()
892
+ self.inplace_buffers = dict()
893
+ self.sizevars = sizevars or dict()
894
+ self.workspace_arg = None
895
+
896
+ def __repr__(self):
897
+ return "KernelArgs({})".format(
898
+ ", ".join(
899
+ map(
900
+ repr,
901
+ [
902
+ self.input_buffers,
903
+ self.output_buffers,
904
+ self.inplace_buffers,
905
+ self.sizevars,
906
+ ],
907
+ )
908
+ )
909
+ )
910
+
911
+ def _buffer_is_marked_removed(self, name):
912
+ return isinstance(name, str) and name.startswith("REMOVED")
913
+
914
+ def input(self, name):
915
+ if V.graph.scheduler:
916
+ name = V.graph.scheduler.mutation_real_name.get(name, name)
917
+ assert name not in V.graph.removed_buffers, name
918
+ if name in self.output_buffers:
919
+ return self.output_buffers[name]
920
+ if name in self.inplace_buffers:
921
+ return self.inplace_buffers[name].inner_name
922
+ if name.startswith("seed"):
923
+ return self._lookup("seed", self.input_buffers, name)
924
+ return self._lookup("in_ptr", self.input_buffers, name)
925
+
926
+ def output(self, name):
927
+ if V.graph.scheduler:
928
+ name = V.graph.scheduler.mutation_real_name.get(name, name)
929
+ assert name not in V.graph.removed_buffers, name
930
+ if name in self.inplace_buffers:
931
+ return self.inplace_buffers[name].inner_name
932
+ return self._lookup("out_ptr", self.output_buffers, name)
933
+
934
+ def make_inplace(self, input_name, output_name):
935
+ assert output_name not in self.inplace_buffers
936
+ if input_name in self.inplace_buffers:
937
+ buf = self.inplace_buffers[input_name]
938
+ buf.other_names.append(output_name)
939
+ self.inplace_buffers[output_name] = buf
940
+ else:
941
+ buf = InplacedBuffer(
942
+ f"in_out_ptr{len(unique(self.inplace_buffers.values()))}",
943
+ [input_name, output_name],
944
+ )
945
+ self.inplace_buffers[input_name] = buf
946
+ self.inplace_buffers[output_name] = buf
947
+
948
+ def workspace(self, nbytes: sympy.Expr, zero_fill: bool):
949
+ if self.workspace_arg is None:
950
+ self.workspace_arg = WorkspaceArg(nbytes, zero_fill)
951
+ return "ws_ptr", 0
952
+
953
+ offset = self.workspace_arg.nbytes
954
+ zero_fill = zero_fill or self.workspace_arg.zero_fill
955
+ self.workspace_arg = WorkspaceArg(offset + nbytes, zero_fill)
956
+ return "ws_ptr", offset
957
+
958
+ def seed_offset(self, name, value):
959
+ if value in self.sizevars:
960
+ return self.sizevars[value]
961
+ if name in self.sizevars.values():
962
+ name = (
963
+ f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}"
964
+ )
965
+ self.sizevars[value] = name
966
+ return name
967
+
968
+ def size(self, name):
969
+ if str(name) == "seed":
970
+ self.sizevars["seed"] = "seed"
971
+ return "seed"
972
+ return self._lookup("ks", self.sizevars, name)
973
+
974
+ def call_names(self):
975
+ return chain(
976
+ self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys()
977
+ )
978
+
979
+ def wrap_ptr_arg(self, buf, dtype):
980
+ return buf
981
+
982
+ def wrap_size_arg(self, size):
983
+ return str(size)
984
+
985
+ def cpp_argdefs(self):
986
+ from .cpp import DTYPE_TO_CPP, INDEX_TYPE
987
+
988
+ call_args = []
989
+ arg_defs = []
990
+ arg_types = []
991
+ for inplaced in unique(self.inplace_buffers.values()):
992
+ if self._buffer_is_marked_removed(inplaced):
993
+ continue
994
+ outer = inplaced.other_names[-1]
995
+ inner = inplaced.inner_name
996
+ dtype = V.graph.get_dtype(outer)
997
+ cpp_dtype = DTYPE_TO_CPP[dtype]
998
+ arg_defs.append(f"{cpp_dtype}* {inner}")
999
+ call_args.append(self.wrap_ptr_arg(outer, dtype))
1000
+ arg_types.append(f"{cpp_dtype}*")
1001
+ for outer, inner in self.input_buffers.items():
1002
+ if outer in self.inplace_buffers:
1003
+ continue
1004
+ dtype = V.graph.get_dtype(outer)
1005
+ cpp_dtype = DTYPE_TO_CPP[dtype]
1006
+ arg_defs.append(f"const {cpp_dtype}* {inner}")
1007
+ call_args.append(self.wrap_ptr_arg(outer, dtype))
1008
+ arg_types.append(f"const {cpp_dtype}*")
1009
+ for outer, inner in self.output_buffers.items():
1010
+ if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
1011
+ continue
1012
+ dtype = V.graph.get_dtype(outer)
1013
+ cpp_dtype = DTYPE_TO_CPP[dtype]
1014
+ arg_defs.append(f"{cpp_dtype}* {inner}")
1015
+ call_args.append(self.wrap_ptr_arg(outer, dtype))
1016
+ arg_types.append(f"{cpp_dtype}*")
1017
+ for outer, inner in self.sizevars.items():
1018
+ arg_defs.append(f"const {INDEX_TYPE} {inner}")
1019
+ call_args.append(self.wrap_size_arg(outer))
1020
+ arg_types.append(f"const {INDEX_TYPE}")
1021
+ if V.graph.wrapper_code:
1022
+ V.graph.wrapper_code.ensure_size_computed(outer)
1023
+ assert self.workspace_arg is None, "Workspace not supported on CPU "
1024
+ return arg_defs, call_args, arg_types
1025
+
1026
+ def python_argdefs(self):
1027
+ arg_defs = []
1028
+ call_args = []
1029
+ precompile_args: List[Union[TensorArg, SizeArg, WorkspaceArg]] = []
1030
+ for inplaced in unique(self.inplace_buffers.values()):
1031
+ if self._buffer_is_marked_removed(inplaced):
1032
+ continue
1033
+ arg_defs.append(inplaced.inner_name)
1034
+ call_args.append(inplaced.other_names[-1])
1035
+ precompile_args.append(
1036
+ TensorArg(
1037
+ name=inplaced.inner_name,
1038
+ buffer=inplaced.other_names[-1],
1039
+ dtype=V.graph.get_dtype(inplaced.other_names[-1]),
1040
+ )
1041
+ )
1042
+ for outer, inner in chain(
1043
+ self.input_buffers.items(), self.output_buffers.items()
1044
+ ):
1045
+ if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
1046
+ continue
1047
+ arg_defs.append(inner)
1048
+ call_args.append(outer)
1049
+ precompile_args.append(
1050
+ TensorArg(
1051
+ name=inner,
1052
+ buffer=outer,
1053
+ dtype=V.graph.get_dtype(outer),
1054
+ )
1055
+ )
1056
+ for outer, inner in self.sizevars.items():
1057
+ arg_defs.append(inner)
1058
+ call_args.append(outer)
1059
+ precompile_args.append(SizeArg(inner, outer))
1060
+ if V.graph.wrapper_code:
1061
+ V.graph.wrapper_code.ensure_size_computed(outer)
1062
+ if self.workspace_arg is not None:
1063
+ arg_defs.append("ws_ptr")
1064
+ call_args.append("workspace")
1065
+ precompile_args.append(self.workspace_arg)
1066
+
1067
+ return arg_defs, call_args, precompile_args
1068
+
1069
+ def aliases(self):
1070
+ for inplaced in unique(self.inplace_buffers.values()):
1071
+ if self._buffer_is_marked_removed(inplaced):
1072
+ continue
1073
+ for other in inplaced.other_names:
1074
+ if (
1075
+ other in V.graph.inplaced_to_remove
1076
+ or other in V.kernel.inplaced_to_remove
1077
+ ):
1078
+ continue
1079
+ if other in self.input_buffers:
1080
+ yield self.input_buffers[other], inplaced.inner_name
1081
+ if other in self.output_buffers:
1082
+ yield self.output_buffers[other], inplaced.inner_name
1083
+
1084
+ def is_removed(self, name):
1085
+ def _is_removed(name, buffers):
1086
+ return name not in buffers or self._buffer_is_marked_removed(buffers[name])
1087
+
1088
+ return _is_removed(name, self.output_buffers) and _is_removed(
1089
+ name, self.inplace_buffers
1090
+ )
1091
+
1092
+ # Includes inplace buffers, excludes removed buffers. Essentially,
1093
+ # after you do a call into this kernel, which buffers actually contain
1094
+ # updated data? Modeled off of python_argdefs.
1095
+ def live_output_buffers(self):
1096
+ live_outs = set()
1097
+ for inplaced in unique(self.inplace_buffers.values()):
1098
+ if self._buffer_is_marked_removed(inplaced):
1099
+ continue
1100
+ live_outs.add(inplaced.other_names[-1])
1101
+ for outer, inner in self.output_buffers.items():
1102
+ if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
1103
+ continue
1104
+ live_outs.add(outer)
1105
+ return live_outs
1106
+
1107
+
1108
+ class CSEVariable:
1109
+ """A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis.
1110
+ To do so, the backends can simply overload `Kernel.create_cse_var`
1111
+ The "CSEVariable.update_on_args" method gives you a hook for annotations
1112
+ See example of TritonCSEVariable in triton.py
1113
+ """
1114
+
1115
+ def __init__(self, name, bounds: ValueRanges[Any]):
1116
+ assert isinstance(bounds, ValueRanges)
1117
+ self.name = name
1118
+ self.bounds = bounds
1119
+
1120
+ def __str__(self):
1121
+ return self.name
1122
+
1123
+ def __hash__(self) -> int:
1124
+ return hash(self.name)
1125
+
1126
+ def __eq__(self, other) -> bool:
1127
+ return type(other) == type(self) and other.name == self.name
1128
+
1129
+ def update_on_args(self, name, args, kwargs):
1130
+ pass
1131
+
1132
+
1133
+ class CppWrapperKernelArgs(KernelArgs):
1134
+ def wrap_ptr_arg(self, buf, dtype):
1135
+ from .cpp import DTYPE_TO_CPP
1136
+
1137
+ if config.abi_compatible:
1138
+ # In the abi_compatible model, we just return the buf here.
1139
+ # We will form correct call args later in wrapper.generate_kernel_all.
1140
+ return buf
1141
+ else:
1142
+ return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())"
1143
+
1144
+ def wrap_size_arg(self, size):
1145
+ return f"{size}"
1146
+
1147
+
1148
+ class CSE:
1149
+ """Common subexpression elimination"""
1150
+
1151
+ def __init__(
1152
+ self,
1153
+ prefix="",
1154
+ suffix="",
1155
+ name_prefix="tmp",
1156
+ iter_buffers=None,
1157
+ store_cache=None,
1158
+ reduction_cache=None,
1159
+ varname_map=None,
1160
+ ):
1161
+ self.prefix = prefix
1162
+ self.suffix = suffix
1163
+ self.cache = {}
1164
+ self.name_prefix = name_prefix
1165
+ self.store_cache = store_cache or {}
1166
+ self.reduction_cache = reduction_cache or {}
1167
+ self.iter_buffer_ids = iter_buffers or itertools.count()
1168
+ self.invalidated_stores = set()
1169
+ self.varname_map = varname_map or {}
1170
+
1171
+ def invalidate(self, keep_vars: Set[str]):
1172
+ for name, tmp in list(self.store_cache.items()):
1173
+ if tmp not in keep_vars:
1174
+ del self.store_cache[name]
1175
+ self.invalidated_stores.add(name)
1176
+ self.cache = {k: v for k, v in self.cache.items() if v in keep_vars}
1177
+
1178
+ def clone(self):
1179
+ # Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional
1180
+ return CSE(
1181
+ prefix=self.prefix,
1182
+ suffix=self.suffix,
1183
+ name_prefix=self.name_prefix,
1184
+ iter_buffers=self.iter_buffer_ids,
1185
+ store_cache=self.store_cache,
1186
+ varname_map=self.varname_map,
1187
+ )
1188
+
1189
+ def generate(
1190
+ self,
1191
+ buffer: IndentedBuffer,
1192
+ expr: Union[str, CSEVariable, OpsValue, IndentedBuffer],
1193
+ *,
1194
+ bounds: ValueRanges[Any] = ValueRanges.unknown(),
1195
+ write=True,
1196
+ assignment=True,
1197
+ ) -> CSEVariable:
1198
+ if isinstance(expr, OpsValue):
1199
+ expr = expr.value
1200
+
1201
+ assert isinstance(expr, (str, CSEVariable, IndentedBuffer)), type(expr)
1202
+ assert write or assignment
1203
+ if isinstance(expr, CSEVariable):
1204
+ # If the expressions were always created with all the information, we could
1205
+ # assert expr.bounds == bounds, but sometimes the expression is created
1206
+ # with the loose ValueRanges.unknown(), so we need to tighten the bounds
1207
+ expr.bounds = expr.bounds.tighten(bounds)
1208
+ return expr
1209
+ cache_key = expr.getvalue() if isinstance(expr, IndentedBuffer) else expr
1210
+ var = self.cache.get(cache_key, None)
1211
+ if not var:
1212
+ var = self.newvar(bounds) if assignment else None
1213
+ self.cache[cache_key] = var
1214
+ if write:
1215
+ if V.kernel.current_node:
1216
+ V.kernel.current_node.codegen_originating_info(
1217
+ buffer, only_once=True
1218
+ )
1219
+ if isinstance(expr, IndentedBuffer):
1220
+ if assignment:
1221
+ buffer.writeline(f"{self.prefix}{var} =")
1222
+ buffer.splice(expr)
1223
+ buffer.writeline(self.suffix)
1224
+ else:
1225
+ if assignment:
1226
+ line = f"{self.prefix}{var} = {expr}{self.suffix}"
1227
+ else:
1228
+ line = f"{expr}{self.suffix}"
1229
+ buffer.writeline(line)
1230
+ else:
1231
+ var.bounds = var.bounds.tighten(bounds)
1232
+
1233
+ return var
1234
+
1235
+ def newvar(self, bounds: ValueRanges[Any] = ValueRanges.unknown()) -> CSEVariable:
1236
+ var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}"
1237
+ var = V.kernel.create_cse_var(var_name, bounds)
1238
+ self.varname_map[var_name] = var
1239
+ return var
1240
+
1241
+
1242
+ class IndirectAssertLine(DeferredLineBase):
1243
+ def __init__(self, line, assert_fn, var, mask, size_map):
1244
+ self.var = var
1245
+ self.mask = mask
1246
+ self.line = line
1247
+ self.assert_fn = assert_fn
1248
+ self.size_map = size_map
1249
+
1250
+ def __call__(self):
1251
+ size, size_str = self.size_map[(self.var, self.mask)]
1252
+
1253
+ # We assert if we've not been able to prove the bound
1254
+ assert_min = (self.var.bounds.lower >= 0) != sympy.true
1255
+ assert_max = (self.var.bounds.upper < size) != sympy.true
1256
+
1257
+ # FooBar interview question
1258
+ if not (assert_min or assert_max):
1259
+ return None
1260
+ elif assert_min and assert_max:
1261
+ # The conditions need to be in parens because of Python's operator precedence.
1262
+ # It'd be less error-prone to use and/or/not, which is suported by triton
1263
+ cond = f"(0 <= {self.var}) & ({self.var} < {size_str})"
1264
+ cond_print = f"0 <= {self.var} < {size_str}"
1265
+ elif assert_min:
1266
+ cond = f"0 <= {self.var}"
1267
+ cond_print = cond
1268
+ else:
1269
+ assert assert_max
1270
+ cond = f"{self.var} < {size_str}"
1271
+ cond_print = cond
1272
+
1273
+ if self.mask:
1274
+ cond = f"({cond}) | ~{self.mask}"
1275
+ return self.line.format(
1276
+ assert_fn=self.assert_fn, cond=cond, cond_print=cond_print
1277
+ )
1278
+
1279
+ def _new_line(self, line):
1280
+ return IndirectAssertLine(
1281
+ line, self.assert_fn, self.var, self.mask, self.size_map
1282
+ )
1283
+
1284
+
1285
+ class CodeGen:
1286
+ def __init__(self):
1287
+ super().__init__()
1288
+ self.exit_stack = contextlib.ExitStack()
1289
+
1290
+ def __enter__(self):
1291
+ self.exit_stack.__enter__()
1292
+ return self
1293
+
1294
+ def __exit__(self, exc_type, exc_val, exc_tb):
1295
+ self.exit_stack.__exit__(exc_type, exc_val, exc_tb)
1296
+
1297
+
1298
+ class Kernel(CodeGen):
1299
+ newvar_prefix = ""
1300
+ suffix = ""
1301
+ overrides: Optional[Callable[[OpsHandler[Any]], OpsHandler[Any]]] = None
1302
+ # TODO: these look dead, but with all the getattr it's hard to tell...
1303
+ load_format: None = None
1304
+ store_format: None = None
1305
+
1306
+ def __init__(self, args=None, increase_kernel_count=True):
1307
+ super().__init__()
1308
+ if increase_kernel_count:
1309
+ metrics.generated_kernel_count += 1
1310
+ self.args = args or KernelArgs()
1311
+ self.loads = IndentedBuffer()
1312
+ self.compute = IndentedBuffer()
1313
+ self.stores = IndentedBuffer()
1314
+ self.cse: CSE = CSE(self.newvar_prefix, self.suffix)
1315
+ self.must_keep_buffers = set()
1316
+ self.store_buffer_names = set()
1317
+ self._load_mask = None
1318
+ # set in set_current_node
1319
+ self.current_node = None
1320
+ self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges[Any]]] = None
1321
+ # Upper bounds for indirect_indexing and their str representation
1322
+ # NB: None, None is never stored in map, but it is the assumed
1323
+ # "not set" value for the dict
1324
+ self.indirect_max_sizes: Dict[
1325
+ Tuple[CSEVariable, str], Union[Tuple[sympy.Expr, str], Tuple[None, None]]
1326
+ ] = {}
1327
+
1328
+ self.removed_buffers = set()
1329
+ self.inplaced_to_remove = set()
1330
+
1331
+ # key: the buffer to write
1332
+ # value: the buffer to read and whose memory can be reused for
1333
+ # the buffer specified by key
1334
+ self.inplace_update_buffers = dict()
1335
+ # Set minimum number of elements processed per thread.
1336
+ self.min_elem_per_thread = 1
1337
+ self.kernel_name = None
1338
+
1339
+ @contextlib.contextmanager
1340
+ def set_current_node(self, node):
1341
+ prior = self.current_node
1342
+ self.current_node = node
1343
+ self.node_to_bounds = node._body.bounds().get_bounds()
1344
+ try:
1345
+ yield
1346
+ finally:
1347
+ self.current_node = prior
1348
+
1349
+ @contextlib.contextmanager
1350
+ def swap_buffers(self, lb, cb=None, sb=None):
1351
+ if cb is None:
1352
+ cb = lb
1353
+ loads = self.loads
1354
+ compute = self.compute
1355
+ stores = self.stores
1356
+ cse = self.cse
1357
+ self.loads = lb
1358
+ self.compute = cb
1359
+ self.stores = sb
1360
+ self.cse = cse.clone()
1361
+ try:
1362
+ yield
1363
+ finally:
1364
+ self.loads = loads
1365
+ self.compute = compute
1366
+ self.stores = stores
1367
+ self.cse = cse
1368
+
1369
+ def load(self, name: str, index: sympy.Expr) -> CSEVariable:
1370
+ raise NotImplementedError()
1371
+
1372
+ def indirect_load(self, name: str, index: sympy.Expr):
1373
+ """A load the depends on an index we have read"""
1374
+ prior = self.loads
1375
+ try:
1376
+ # put the load in the compute section as it might have deps
1377
+ self.loads = self.compute
1378
+ return self.load(name, index)
1379
+ finally:
1380
+ self.loads = prior
1381
+
1382
+ def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable):
1383
+ raise NotImplementedError()
1384
+
1385
+ def store(
1386
+ self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None
1387
+ ) -> None:
1388
+ raise NotImplementedError()
1389
+
1390
+ def reduction(
1391
+ self,
1392
+ dtype: torch.dtype,
1393
+ src_dtype: torch.dtype,
1394
+ reduction_type: ReductionType,
1395
+ value: Union[CSEVariable, Tuple[CSEVariable, ...]],
1396
+ ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]:
1397
+ raise NotImplementedError()
1398
+
1399
+ def scan(
1400
+ self,
1401
+ dtype: torch.dtype,
1402
+ combine_fn: Callable[[CSEVariable, CSEVariable], CSEVariable],
1403
+ value: CSEVariable,
1404
+ init: int,
1405
+ ) -> CSEVariable:
1406
+ raise NotImplementedError()
1407
+
1408
+ def bucketize(
1409
+ self,
1410
+ values: CSEVariable,
1411
+ offsets_name: str,
1412
+ offsets_size: sympy.Expr,
1413
+ indexing_dtype: torch.dtype,
1414
+ right: bool,
1415
+ ) -> CSEVariable:
1416
+ """
1417
+ See [Note: Inductor bucketize op]
1418
+ """
1419
+ raise NotImplementedError()
1420
+
1421
+ @property
1422
+ def assert_function(self) -> str:
1423
+ raise NotImplementedError()
1424
+
1425
+ def index_to_str(self, index: sympy.Expr) -> str:
1426
+ raise NotImplementedError()
1427
+
1428
+ def __enter__(self):
1429
+ # TODO: hoist this to top level
1430
+ class CSEProxy:
1431
+ self.name = "CSEProxy"
1432
+
1433
+ @staticmethod
1434
+ def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc]
1435
+ def inner(*args, **kwargs):
1436
+ # TritonTemplateKernel has no current_node
1437
+ buf_bounds = ValueRanges.unknown()
1438
+ if hasattr(V.interpreter, "current_node"):
1439
+ fx_node = V.interpreter.current_node
1440
+ assert isinstance(self.node_to_bounds, dict)
1441
+ buf_bounds = self.node_to_bounds.get(
1442
+ fx_node, ValueRanges.unknown()
1443
+ )
1444
+
1445
+ value = getattr(parent_handler, name)(*args, **kwargs) # type: ignore[has-type]
1446
+
1447
+ def do_cse(v):
1448
+ csevar = self.cse.generate(self.compute, v, bounds=buf_bounds)
1449
+ csevar.update_on_args(name, args, kwargs)
1450
+ return csevar
1451
+
1452
+ return pytree.tree_map(do_cse, value)
1453
+
1454
+ return inner
1455
+
1456
+ @staticmethod
1457
+ def indirect_indexing(
1458
+ var: CSEVariable, size: sympy.Expr, check: bool = True
1459
+ ):
1460
+ # Skip CSE since this doesn't return an expression
1461
+
1462
+ if var.bounds.lower < 0: # type: ignore[operator]
1463
+ new_bounds = ValueRanges.unknown()
1464
+ if var.bounds != ValueRanges.unknown() and isinstance(
1465
+ size, sympy.Number
1466
+ ):
1467
+ # Take the negative part of the bound and add size to it
1468
+ # Then take union of that and the positive part
1469
+ # This is a tighter bound than that of a generic ops.where, as we have info on the cond
1470
+ neg = var.bounds & ValueRanges(-sympy.oo, -1)
1471
+ new_bounds = ValueRanges(neg.lower + size, neg.upper + size)
1472
+ # We don't have a good way of representing the empty range
1473
+ if var.bounds.upper >= 0: # type: ignore[operator]
1474
+ pos = var.bounds & ValueRanges(0, sympy.oo)
1475
+ new_bounds = new_bounds | pos
1476
+
1477
+ stm = ops.add(var, self.rename_indexing(size))
1478
+ # Mixed negative and non-negative
1479
+ if var.bounds.upper >= 0: # type: ignore[operator]
1480
+ lt = ops.lt(var, "0")
1481
+ stm = ops.where(lt, stm, var)
1482
+ new_var = self.cse.generate(self.compute, stm, bounds=new_bounds)
1483
+
1484
+ new_var.update_on_args("index_wrap", (var,), {})
1485
+ var = new_var
1486
+
1487
+ if self.generate_assert(check):
1488
+ mask = self.load_mask(var)
1489
+
1490
+ # An assertion line may have been written already, if so just
1491
+ # update the max size.
1492
+ map_key = (var, mask)
1493
+ existing_size, _ = self.indirect_max_sizes.get(
1494
+ map_key, (None, None)
1495
+ )
1496
+ if existing_size is not None:
1497
+ size = sympy.Min(size, existing_size)
1498
+ else:
1499
+ line = (
1500
+ '{assert_fn}({cond}, "index out of bounds: {cond_print}")'
1501
+ )
1502
+ self.compute.writeline(
1503
+ IndirectAssertLine(
1504
+ line,
1505
+ self.assert_function,
1506
+ var,
1507
+ mask,
1508
+ self.indirect_max_sizes,
1509
+ )
1510
+ )
1511
+
1512
+ self.indirect_max_sizes[map_key] = (size, self.index_to_str(size))
1513
+ return sympy_index_symbol(str(var))
1514
+
1515
+ @staticmethod
1516
+ def load(name: str, index: sympy.Expr) -> CSEVariable:
1517
+ if name in self.cse.invalidated_stores:
1518
+ # A load from an invalidated store requires us to
1519
+ # keep the actual buffer around
1520
+ V.kernel.must_keep_buffers.add(name)
1521
+ if free_symbol_startswith(index, "tmp"):
1522
+ return self.indirect_load(name, index)
1523
+ store_cache = self.cse.store_cache
1524
+ if name in store_cache:
1525
+ return store_cache[name]
1526
+ return self.load(name, index)
1527
+
1528
+ @staticmethod
1529
+ def store(
1530
+ name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None
1531
+ ) -> None:
1532
+ self.store_buffer_names.add(name)
1533
+ if mode is None:
1534
+ self.cse.store_cache[name] = value
1535
+ if self.current_node:
1536
+ for other_name in self.current_node.get_mutations():
1537
+ self.cse.store_cache[other_name] = value
1538
+ if name not in V.graph.removed_buffers:
1539
+ return self.store(name, index, value, mode=mode)
1540
+ else:
1541
+ return None # type: ignore[return-value]
1542
+
1543
+ @staticmethod
1544
+ def store_reduction(name: str, index: sympy.Expr, value: CSEVariable):
1545
+ self.store_buffer_names.add(name)
1546
+ self.cse.store_cache[name] = value
1547
+ if self.current_node:
1548
+ for other_name in self.current_node.get_mutations():
1549
+ self.cse.store_cache[other_name] = value
1550
+
1551
+ if name not in V.graph.removed_buffers:
1552
+ return self.store_reduction(name, index, value)
1553
+
1554
+ @staticmethod
1555
+ def reduction(
1556
+ dtype: torch.dtype,
1557
+ src_dtype: torch.dtype,
1558
+ reduction_type: ReductionType,
1559
+ value: Union[CSEVariable, Tuple[CSEVariable, ...]],
1560
+ ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]:
1561
+ return self.reduction(dtype, src_dtype, reduction_type, value)
1562
+
1563
+ @staticmethod
1564
+ def scan(
1565
+ dtype: torch.dtype,
1566
+ combine_fn: Callable[[CSEVariable, CSEVariable], CSEVariable],
1567
+ value: CSEVariable,
1568
+ init: int,
1569
+ ) -> CSEVariable:
1570
+ return self.scan(dtype, combine_fn, value, init)
1571
+
1572
+ @staticmethod
1573
+ def bucketize(
1574
+ values: CSEVariable,
1575
+ offsets_name: str,
1576
+ offsets_size: sympy.Expr,
1577
+ indexing_dtype: torch.dtype,
1578
+ right: bool,
1579
+ ) -> CSEVariable:
1580
+ """
1581
+ [Note: Inductor bucketize op]
1582
+
1583
+ Given values (tensor) and offsets_name (reference to the name of a 1D
1584
+ tensor), calculate the bucket that each value belongs to.
1585
+
1586
+ e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True
1587
+ return = [ 0, 1, 1, 1, 1, 3, 3, 4].
1588
+
1589
+ When right == False, bucket i refers to range (offsets[i], offsets[i+1]].
1590
+ When right == True, bucket i refers to range [offsets[i], offsets[i+1]).
1591
+
1592
+ Offsets must be non-decreasing or the result is undefined.
1593
+ """
1594
+ return self.bucketize(
1595
+ values, offsets_name, offsets_size, indexing_dtype, right
1596
+ )
1597
+
1598
+ # Use mypy to check protocol implemented correctly
1599
+ def _typecheck_CSEProxy(h: CSEProxy) -> OpsHandler[CSEVariable]:
1600
+ return h
1601
+
1602
+ super().__enter__()
1603
+ assert self.overrides
1604
+ parent_handler = self.overrides(V.get_ops_handler())
1605
+ self.exit_stack.enter_context(V.set_ops_handler(CSEProxy()))
1606
+ self.exit_stack.enter_context(V.set_kernel_handler(self))
1607
+ return self
1608
+
1609
+ def __exit__(self, exc_type, exc_val, exc_tb):
1610
+ """
1611
+ Note that V.graph.scheduler can be None when codegening triton template
1612
+ kernels.
1613
+ """
1614
+ if V.graph.scheduler:
1615
+ V.graph.scheduler.remove_kernel_local_buffers()
1616
+ super().__exit__(exc_type, exc_val, exc_tb)
1617
+
1618
+ def generate_assert(self, check):
1619
+ return (check or config.debug_index_asserts) and config.assert_indirect_indexing
1620
+
1621
+ def load_mask(self, var) -> str:
1622
+ # only the triton kernel requires mask
1623
+ return ""
1624
+
1625
+ def rename_indexing(self, index) -> sympy.Expr:
1626
+ # adds the necessary kernel args for index expressions
1627
+ # and renames variables in index expressions to kernel arg names
1628
+ if isinstance(index, (list, tuple)):
1629
+ return [self.rename_indexing(x) for x in index] # type: ignore[return-value]
1630
+ index = V.graph.sizevars.simplify(index)
1631
+ sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name)
1632
+ replacements = {
1633
+ x: self.args.size(x)
1634
+ for x in sorted_symbols
1635
+ if x.name.startswith(("s", "u", "ps"))
1636
+ or (x.name.startswith("i") and not x.name.startswith("idx"))
1637
+ }
1638
+ return sympy_subs(index, replacements)
1639
+
1640
+ def create_cse_var(self, *args, **kwargs):
1641
+ return CSEVariable(*args, **kwargs)
1642
+
1643
+
1644
+ @dataclasses.dataclass
1645
+ class OptimizationContext:
1646
+ key: ClassVar[str] = "opt_ctx"
1647
+
1648
+ # Load value as mask
1649
+ is_load_as_mask: bool = False
1650
+
1651
+ dtype: Optional[torch.dtype] = None
1652
+ ops_name: str = ""
1653
+
1654
+ # Load uint8/int8 value as float32
1655
+ is_load_int8_as_float: bool = False
1656
+
1657
+
1658
+ @functools.lru_cache(None)
1659
+ def jinja2_env():
1660
+ try:
1661
+ import jinja2
1662
+
1663
+ return jinja2.Environment(
1664
+ undefined=jinja2.StrictUndefined,
1665
+ )
1666
+ except ImportError:
1667
+ return None
1668
+
1669
+
1670
+ PrimitiveInfoType = Union[int, float, bool, str, List[Union[int, str, float, bool]]]
1671
+
1672
+
1673
+ class ChoiceCaller:
1674
+ """
1675
+ Represents a possible choice used in autotune_process.py.
1676
+ During autotuning, self.benchmark() is first called to get benchmark result,
1677
+ and if this choice is selected, self.output_node() is called to get the output_node.
1678
+
1679
+ Children classes: TritonTemplateCaller, CUDATemplateCaller.
1680
+ """
1681
+
1682
+ def __init__(self, name, input_nodes, layout):
1683
+ super().__init__()
1684
+ self.name = name
1685
+ self.layout = layout
1686
+ self.input_nodes = input_nodes
1687
+
1688
+ def benchmark(self, *args, out) -> float:
1689
+ algo = self.to_callable()
1690
+ return do_bench(lambda: algo(*args, out=out))
1691
+
1692
+ def call_name(self) -> str:
1693
+ raise NotImplementedError()
1694
+
1695
+ def to_callable(self):
1696
+ raise NotImplementedError()
1697
+
1698
+ def hash_key(self) -> str:
1699
+ raise NotImplementedError()
1700
+
1701
+ def output_node(self) -> "TensorBox":
1702
+ raise NotImplementedError()
1703
+
1704
+ def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]:
1705
+ """Information returned here is logged to the autotune log file when that is enabled."""
1706
+ return {}
1707
+
1708
+
1709
+ class KernelTemplate:
1710
+ """
1711
+ Base class for defining kernel templates.
1712
+
1713
+ Children classes: TritonTemplate, CUDATemplate
1714
+ """
1715
+
1716
+ @staticmethod
1717
+ def _template_from_string(source):
1718
+ env = jinja2_env()
1719
+ if env is not None:
1720
+ return env.from_string(source)
1721
+ return None
1722
+
1723
+ @staticmethod
1724
+ def _fake_get_dtype(fake_out):
1725
+ _get_dtype_real = V.graph.get_dtype
1726
+
1727
+ def get_dtype(name):
1728
+ if name == fake_out.get_name():
1729
+ return fake_out.get_dtype()
1730
+ return _get_dtype_real(name)
1731
+
1732
+ return get_dtype
1733
+
1734
+ def __init__(self, name: str):
1735
+ self.name = name
1736
+
1737
+ def maybe_append_choice(self, choices, **kwargs):
1738
+ """
1739
+ Maybe generates a new ChoiceCaller and appends it into existing choices.
1740
+
1741
+ choices: A list of ChoiceCallers.
1742
+ kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller.
1743
+ """
1744
+
1745
+ try:
1746
+ choices.append(self.generate(**kwargs))
1747
+ except NotImplementedError:
1748
+ pass
1749
+
1750
+ def generate(self, **kwargs) -> ChoiceCaller:
1751
+ """
1752
+ Generates a ChoiceCaller instance from the given arguments.
1753
+ """
1754
+
1755
+ raise NotImplementedError()
venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <atomic>
5
+ #include <cmath>
6
+ #include <cstdlib>
7
+ #include <limits>
8
+ #include <omp.h>
9
+
10
+ #include <ATen/NumericUtils.h>
11
+ #include <ATen/core/PhiloxRNGEngine.h>
12
+ #include <ATen/native/Math.h>
13
+
14
+ #include <c10/util/Float8_e4m3fn.h>
15
+ #include <c10/util/Float8_e5m2.h>
16
+ #include <c10/util/BFloat16.h>
17
+ #include <c10/util/BFloat16-math.h>
18
+ #include <c10/util/generic_math.h>
19
+ #include <c10/util/Half.h>
20
+ #include <c10/util/TypeCast.h>
21
+
22
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
23
+ #define INDUCTOR_USE_VECTOR_TYPES() 1
24
+ #else
25
+ #define INDUCTOR_USE_VECTOR_TYPES() 0
26
+ #endif
27
+
28
+ #if INDUCTOR_USE_VECTOR_TYPES()
29
+ #include <ATen/cpu/vec/functional.h>
30
+ #include <ATen/cpu/vec/vec.h>
31
+ #include <ATen/cpu/vec/vec_n.h>
32
+ #endif
33
+
34
+ typedef at::Half half;
35
+ typedef at::BFloat16 bfloat16;
36
+
37
+ typedef at::Float8_e4m3fn float8_e4m3fn;
38
+ typedef at::Float8_e5m2 float8_e5m2;
39
+
40
+ template <typename T>
41
+ struct Welford {
42
+ T mean = T(0);
43
+ T m2 = T(0);
44
+ T weight = T(0);
45
+ };
46
+
47
+
48
+ template <typename T>
49
+ struct IsVecType: std::false_type {};
50
+
51
+ #if INDUCTOR_USE_VECTOR_TYPES()
52
+ template <typename T>
53
+ struct IsVecType<at::vec::Vectorized<T>>: std::true_type {};
54
+ #endif
55
+
56
+ template <typename T>
57
+ Welford<T> welford_combine(const Welford<T> &a, const Welford<T> &b) {
58
+ if constexpr (!IsVecType<T>::value) {
59
+ if (a.weight == 0) {
60
+ return b;
61
+ }
62
+ if (b.weight == 0) {
63
+ return a;
64
+ }
65
+ }
66
+ auto delta = b.mean - a.mean;
67
+ auto new_weight = a.weight + b.weight;
68
+ auto wb_over_w = b.weight / new_weight;
69
+ if constexpr (IsVecType<T>::value) {
70
+ // Guard against division by zero
71
+ wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0));
72
+ }
73
+ auto result = Welford<T>{
74
+ a.mean + delta * wb_over_w,
75
+ a.m2 + b.m2 + delta * delta * a.weight * wb_over_w,
76
+ new_weight
77
+ };
78
+ return result;
79
+ }
80
+
81
+ template <typename T>
82
+ Welford<T> welford_combine(const Welford<T> &acc, T data) {
83
+ // Add a single data point
84
+ auto delta = data - acc.mean;
85
+ auto new_weight = acc.weight + T(1);
86
+ auto new_mean = acc.mean + delta / new_weight;
87
+ auto new_delta = data - new_mean;
88
+ auto result = Welford<T>{
89
+ new_mean,
90
+ acc.m2 + delta * new_delta,
91
+ new_weight
92
+ };
93
+ return result;
94
+ }
95
+
96
+ // Refer to https://github.com/pytorch/pytorch/blob/b5b36cf0c4e1958f1ff25120f5d4beeef3288187/
97
+ // aten/src/ATen/native/SharedReduceOps.h#L419-L445
98
+ template <typename scalar_t>
99
+ inline bool greater_or_nan(scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) {
100
+ // If (a == b), then choose the one with lower idx, else max(a, b)
101
+ if (at::_isnan(a)) {
102
+ if (at::_isnan(b)) {
103
+ return idx_a < idx_b;
104
+ }
105
+ return true;
106
+ }
107
+ return (a == b) ? idx_a < idx_b : (a > b);
108
+ }
109
+
110
+ template <typename scalar_t>
111
+ inline bool less_or_nan(scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) {
112
+ // If (a == b), then choose the one with lower idx, else min(a, b)
113
+ if (at::_isnan(a)) {
114
+ if (at::_isnan(b)) {
115
+ return idx_a < idx_b;
116
+ }
117
+ return true;
118
+ }
119
+ return (a == b) ? idx_a < idx_b : (a < b);
120
+ }
121
+
122
+ #if INDUCTOR_USE_VECTOR_TYPES()
123
+ template <typename scalar_t>
124
+ inline at::vec::Vectorized<scalar_t> vec_shuffle_down(at::vec::Vectorized<scalar_t> x, size_t n) {
125
+ using Vec = at::vec::Vectorized<scalar_t>;
126
+ alignas(alignof(Vec)) scalar_t array[Vec::size()];
127
+ x.store(array);
128
+ for (size_t i = 0; i + n < Vec::size(); i += 2 * n) {
129
+ array[i] = array[i + n];
130
+ }
131
+ return Vec::loadu(array);
132
+ }
133
+
134
+ #ifdef CPU_CAPABILITY_AVX2
135
+ inline at::vec::Vectorized<float> vec_shuffle_down(at::vec::Vectorized<float> x, size_t n) {
136
+ using vec_t = at::vec::Vectorized<float>;
137
+ #define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w)
138
+ switch (n) {
139
+ case 1:
140
+ return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3)));
141
+ case 2:
142
+ return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2)));
143
+ case 4:
144
+ return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1)));
145
+ }
146
+ TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n);
147
+ }
148
+ #endif
149
+
150
+ template <typename scalar_t>
151
+ Welford<scalar_t> welford_vec_reduce_all(Welford<at::vec::Vectorized<scalar_t>> acc) {
152
+ using Vec = at::vec::Vectorized<scalar_t>;
153
+ for (size_t n = 1; n < Vec::size(); n *= 2) {
154
+ auto shuffled = Welford<Vec>{
155
+ vec_shuffle_down(acc.mean, n),
156
+ vec_shuffle_down(acc.m2, n),
157
+ vec_shuffle_down(acc.weight, n)
158
+ };
159
+ acc = welford_combine(acc, shuffled);
160
+ }
161
+
162
+ Welford<scalar_t> result;
163
+ alignas(alignof(Vec)) scalar_t array[Vec::size()];
164
+ acc.mean.store(array);
165
+ result.mean = array[0];
166
+
167
+ acc.m2.store(array);
168
+ result.m2 = array[0];
169
+
170
+ acc.weight.store(array);
171
+ result.weight = array[0];
172
+
173
+ return result;
174
+ }
175
+ #endif
176
+
177
+
178
+ template <typename T, typename U> inline typename std::common_type<T, U>::type mod(T a, U b) { return a % b; }
179
+ template <> inline float mod(float a, float b) { return std::fmod(a, b); }
180
+ template <> inline double mod(double a, double b) { return std::fmod(a, b); }
181
+
182
+ template <typename scalar_t>
183
+ inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
184
+ if (at::_isnan(a)) {
185
+ return a;
186
+ }
187
+ return a > b ? a : b;
188
+ }
189
+
190
+ template <typename scalar_t>
191
+ inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
192
+ if (at::_isnan(a)) {
193
+ return a;
194
+ }
195
+ return a < b ? a : b;
196
+ }
197
+
198
+ constexpr float uint32_to_uniform_float(uint32_t value) {
199
+ // maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
200
+ constexpr float scale = 4.6566127342e-10;
201
+ return static_cast<float>(value & 0x7FFFFFFF) * scale;
202
+ }
203
+
204
+ float normalized_rand_cpu(uint32_t seed, uint32_t offset) {
205
+ return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)());
206
+ }
207
+
208
+ float randn_cpu(uint32_t seed, uint32_t offset) {
209
+ at::Philox4_32 engine(seed, 0, offset);
210
+ return engine.randn(10);
211
+ }
212
+
213
+ int64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) {
214
+ auto gen = at::Philox4_32(seed, 0, offset);
215
+ uint64_t r0 = gen();
216
+ uint64_t r1 = gen();
217
+ uint64_t result = r0 | (r1 << 32);
218
+ return static_cast<int64_t>(result % (high - low)) + low;
219
+ }
220
+
221
+ template <typename T> struct AsIntegerType { typedef T type; };
222
+ template <> struct AsIntegerType<float> { typedef uint32_t type; };
223
+ template <> struct AsIntegerType<double> { typedef uint64_t type; };
224
+ template <> struct AsIntegerType<bfloat16> { typedef uint16_t type; };
225
+
226
+ template <typename T>
227
+ typename std::enable_if<!std::is_reduced_floating_point<T>::value, T>::type
228
+ inline fetch_value(volatile T *addr) {
229
+ return *addr;
230
+ }
231
+
232
+ template <typename T>
233
+ typename std::enable_if<std::is_reduced_floating_point<T>::value, T>::type
234
+ inline fetch_value(volatile T *addr) {
235
+ return T(addr->x, T::from_bits());
236
+ }
237
+
238
+ template <typename T>
239
+ typename std::enable_if<!std::is_integral<T>::value>::type
240
+ atomic_add(volatile T *addr, T offset) {
241
+ typedef typename AsIntegerType<T>::type alt_type;
242
+
243
+ static_assert(sizeof(std::atomic<alt_type>) == sizeof(T),
244
+ "std::atomic issue");
245
+
246
+ alt_type expected;
247
+
248
+ alt_type desired;
249
+
250
+ std::atomic<alt_type> *atomic_addr = (std::atomic<alt_type> *)addr;
251
+ do {
252
+ T val = fetch_value(addr);
253
+ reinterpret_cast<T *>(&expected)[0] = val;
254
+ reinterpret_cast<T *>(&desired)[0] = val + offset;
255
+ } while (!atomic_addr->compare_exchange_weak(expected, desired,
256
+ std::memory_order_relaxed));
257
+ }
258
+
259
+ // Since C++20 float is supported by fetch_add, but the performance may not
260
+ // better than compare_exchange_weak, which can be checked by microbenchmark
261
+ // inductor_cpu_atomic.py
262
+ template <typename T>
263
+ typename std::enable_if<std::is_integral<T>::value>::type
264
+ atomic_add(volatile T *addr, T offset) {
265
+ static_assert(sizeof(std::atomic<T>) == sizeof(T),
266
+ "std::atomic issue");
267
+ std::atomic<T> *atomic_addr = (std::atomic<T> *)addr;
268
+ atomic_addr->fetch_add(offset, std::memory_order_relaxed);
269
+ }
270
+
271
+ // This function is used to convert bool or uint8 to float mask for
272
+ // vectorization. The caller needs to make sure the src represents TRUE/FALSE
273
+ // correctly.
274
+ template <typename T>
275
+ inline float flag_to_float_scalar(T src) {
276
+ float ret;
277
+ *(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0;
278
+ return ret;
279
+ }
280
+
281
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
282
+
283
+ inline at::vec::Vectorized<float> masked_load(const float* src, at::vec::Vectorized<float> mask) {
284
+ # if defined(CPU_CAPABILITY_AVX512)
285
+ at::vec::Vectorized<float> zero_vec(0);
286
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
287
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
288
+ return _mm512_mask_loadu_ps(zero_vec, mmask, src);
289
+ # elif defined(CPU_CAPABILITY_AVX2)
290
+ auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
291
+ auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
292
+ return _mm256_maskload_ps(src, mmask);
293
+ # elif defined(CPU_CAPABILITY_ZVECTOR)
294
+ auto result = at::vec::Vectorized<float>::loadu(src);
295
+ return (result & mask);
296
+ # else
297
+ # error Unsupported vectorization CPU capability
298
+ # endif
299
+ }
300
+
301
+ template <typename T>
302
+ typename std::enable_if<std::is_same<T, bfloat16>::value || std::is_same<T, half>::value, at::vec::Vectorized<T>>::type
303
+ inline masked_load(const T* src, at::vec::Vectorized<float> mask) {
304
+ # if defined(CPU_CAPABILITY_AVX512)
305
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
306
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
307
+ auto zero = _mm256_set1_epi16(0);
308
+ auto temp = _mm256_mask_loadu_epi16(zero, mmask, src);
309
+ return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1);
310
+ # elif defined(CPU_CAPABILITY_AVX2)
311
+ auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
312
+ auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
313
+ __at_align__ uint32_t mmask[8];
314
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
315
+ __at_align__ uint16_t result[16];
316
+ for (auto i = 0; i < 8; i++) {
317
+ result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0);
318
+ }
319
+ return at::vec::Vectorized<T>::loadu(result);
320
+ # elif defined(CPU_CAPABILITY_ZVECTOR)
321
+ auto result = at::vec::Vectorized<T>::loadu(src, 8);
322
+ uint32_t maskdata[8] = { 0 };
323
+ uint16_t maskdata_dest[16] = { 0 };
324
+ mask.store(maskdata);
325
+ for (auto i = 0; i < 8; i++) {
326
+ maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFFFF: 0;
327
+ }
328
+ auto maskvector = at::vec::Vectorized<T>::loadu(maskdata_dest);
329
+ return (result & maskvector);
330
+ # else
331
+ # error Unsupported vectorization CPU capability
332
+ # endif
333
+ }
334
+
335
+ template <typename T>
336
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<T>>::type
337
+ inline masked_load(const T* src, at::vec::Vectorized<float> mask) {
338
+ # if defined(CPU_CAPABILITY_AVX512)
339
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
340
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
341
+ auto zero = _mm_set1_epi8(0);
342
+ auto temp = _mm_mask_loadu_epi8(zero, mmask, src);
343
+ return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0);
344
+ # elif defined(CPU_CAPABILITY_AVX2)
345
+ auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
346
+ auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
347
+ __at_align__ uint32_t mmask[8];
348
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
349
+ __at_align__ T result[32];
350
+ for (auto i = 0; i < 8; i++) {
351
+ result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: T(0);
352
+ }
353
+ return at::vec::Vectorized<T>::loadu(result);
354
+ # elif defined(CPU_CAPABILITY_ZVECTOR)
355
+ auto result = at::vec::Vectorized<T>::loadu(src, 8);
356
+ uint32_t maskdata[8];
357
+ T maskdata_dest[32] = { 0 };
358
+ mask.store(maskdata);
359
+ for (auto i = 0; i < 8; i++) {
360
+ maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFF: 0;
361
+ }
362
+ auto maskvector = at::vec::Vectorized<T>::loadu(maskdata_dest);
363
+ return (result & maskvector);
364
+ # else
365
+ # error Unsupported vectorization CPU capability
366
+ # endif
367
+ }
368
+
369
+ template <typename T>
370
+ inline at::vec::Vectorized<float> flag_to_float_vec(const T* src) {
371
+ __at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
372
+ #pragma unroll
373
+ for (int64_t i = 0; i < at::vec::Vectorized<float>::size(); i++) {
374
+ dst_tmp[i] = flag_to_float_scalar(src[i]);
375
+ }
376
+ return at::vec::Vectorized<float>::loadu(dst_tmp);
377
+ }
378
+
379
+ template <typename scalar_t>
380
+ inline at::vec::Vectorized<float> cvt_lowp_fp_to_fp32(
381
+ at::vec::Vectorized<scalar_t> src) {
382
+ at::vec::Vectorized<float> res_vec1(0);
383
+ at::vec::Vectorized<float> res_vec2(0);
384
+ std::tie(res_vec1, res_vec2) = at::vec::convert_to_float<scalar_t>(src);
385
+ return res_vec1;
386
+ }
387
+
388
+ template <typename scalar_t>
389
+ inline at::vec::Vectorized<scalar_t> cvt_fp32_to_lowp_fp(
390
+ at::vec::Vectorized<float> src) {
391
+ return at::vec::convert_from_float<scalar_t>(src, src);
392
+ }
393
+
394
+ inline at::vec::Vectorized<float> mask_convert_to_float(at::vec::Vectorized<float> src) {
395
+ auto zeros = at::vec::Vectorized<float>(0);
396
+ auto ones = at::vec::Vectorized<float>(1);
397
+ return at::vec::Vectorized<float>::blendv(zeros, ones, src);
398
+ }
399
+
400
+ template <typename scalar_t>
401
+ inline
402
+ typename std::enable_if<std::is_same<scalar_t, bfloat16>::value || std::is_same<scalar_t, half>::value, at::vec::Vectorized<scalar_t>>::type
403
+ mask_convert_to_lowp(at::vec::Vectorized<float> src) {
404
+ auto fp_vec = mask_convert_to_float(src);
405
+ return cvt_fp32_to_lowp_fp<scalar_t>(fp_vec);
406
+ }
407
+
408
+ template <typename SRC>
409
+ inline at::vec::Vectorized<float> vec_convert_to_mask(at::vec::Vectorized<SRC> src) {
410
+ assert(
411
+ at::vec::Vectorized<float>::size() == at::vec::Vectorized<SRC>::size());
412
+ at::vec::Vectorized<float> res_vec(0);
413
+ __at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
414
+ __at_align__ SRC src_tmp[at::vec::Vectorized<SRC>::size()];
415
+ src.store(src_tmp);
416
+
417
+ #pragma unroll
418
+ for (int i = 0; i < at::vec::Vectorized<float>::size(); i++) {
419
+ *(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0;
420
+ }
421
+
422
+ return res_vec.loadu(dst_tmp);
423
+ }
424
+
425
+ template <typename SRC>
426
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<SRC> src) {
427
+ return vec_convert_to_mask(src);
428
+ }
429
+
430
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2)
431
+ template <>
432
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<int> src) {
433
+ #if defined(CPU_CAPABILITY_AVX2)
434
+ return at::vec::Vectorized<float>(_mm256_castsi256_ps(src));
435
+ #else
436
+ return at::vec::Vectorized<float>(_mm512_castsi512_ps(src));
437
+ #endif
438
+ }
439
+ #endif
440
+
441
+ template <>
442
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<float> src) {
443
+ return src;
444
+ }
445
+
446
+ inline at::vec::Vectorized<float> to_float_mask(int src) {
447
+ union {
448
+ float fmask;
449
+ uint32_t imask;
450
+ } mask;
451
+ mask.imask = src ? 0xFFFFFFFF : 0;
452
+ return at::vec::Vectorized<float>(mask.fmask);
453
+ }
454
+
455
+ inline bool all_zero(at::vec::Vectorized<float> src) {
456
+ # if defined(CPU_CAPABILITY_AVX512)
457
+ auto src_int = _mm512_castps_si512(src);
458
+ __mmask16 mask = _mm512_test_epi32_mask(src_int, src_int);
459
+ return mask == 0;
460
+ # elif defined(CPU_CAPABILITY_AVX2)
461
+ return _mm256_testz_ps(src, src);
462
+ # else
463
+ __at_align__ int mask[at::vec::Vectorized<float>::size()];
464
+ src.store(mask);
465
+ for (int i = 0; i < at::vec::Vectorized<float>::size(); i++) {
466
+ if (mask[i] != 0) {
467
+ return false;
468
+ }
469
+ }
470
+ return true;
471
+ # endif
472
+ }
473
+
474
+ inline bool vector_lane_mask_check(at::vec::Vectorized<float> src, int lane) {
475
+ # if defined(CPU_CAPABILITY_AVX512)
476
+ return _mm512_movepi32_mask(_mm512_castps_si512(src)) & (1 << lane);
477
+ # elif defined(CPU_CAPABILITY_AVX2)
478
+ return _mm256_movemask_ps(src) & (1 << lane);
479
+ # else
480
+ __at_align__ int mask[at::vec::Vectorized<float>::size()];
481
+ src.store(mask);
482
+ return mask[lane] != 0;
483
+ # endif
484
+ }
485
+
486
+ inline at::vec::Vectorized<float> cvt_int64_to_fp32(at::vec::VectorizedN<int64_t,2> src) {
487
+ # if defined(CPU_CAPABILITY_AVX512)
488
+ auto low = _mm512_cvtepi64_ps(src[0]);
489
+ auto high = _mm512_cvtepi64_ps(src[1]);
490
+ return _mm512_insertf32x8(_mm512_castps256_ps512(low), high, 1);
491
+ # elif defined(CPU_CAPABILITY_AVX2)
492
+ auto low_double = at::vec::convert_to_fp_of_same_size<double>(src[0]);
493
+ auto low = _mm256_cvtpd_ps(low_double);
494
+ auto high_double = at::vec::convert_to_fp_of_same_size<double>(src[1]);
495
+ auto high = _mm256_cvtpd_ps(high_double);
496
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(low), high, 1);
497
+ # else
498
+ constexpr int float_vec_size = at::vec::Vectorized<float>::size();
499
+ constexpr int int64_vec_size = at::vec::Vectorized<int64_t>::size();
500
+ __at_align__ float result[float_vec_size];
501
+ __at_align__ int64_t src_buf[int64_vec_size];
502
+ for (int i = 0; i < 2; i++) {
503
+ src[i].store(src_buf + i * int64_vec_size);
504
+ for (int j = 0; j < int64_vec_size; j++) {
505
+ result[i * int64_vec_size + j] = static_cast<float>(src_buf[i * int64_vec_size + j]);
506
+ }
507
+ }
508
+ return at::vec::Vectorized<float>::loadu(result);
509
+ # endif
510
+ }
511
+
512
+ inline at::vec::VectorizedN<int64_t,2> cvt_fp32_to_int64(at::vec::Vectorized<float> src) {
513
+ at::vec::VectorizedN<int64_t,2> result;
514
+ # if defined(CPU_CAPABILITY_AVX512)
515
+ result[0] = _mm512_cvt_roundps_epi64(_mm512_castps512_ps256(src), _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
516
+ result[1] = _mm512_cvt_roundps_epi64(_mm512_extractf32x8_ps(src, 1), _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC);
517
+ # elif defined(CPU_CAPABILITY_AVX2)
518
+ auto int32_vec = at::vec::convert_to_int_of_same_size(src);
519
+ result[0] = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(int32_vec));
520
+ result[1] = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(int32_vec, 1));
521
+ # else
522
+ constexpr int float_vec_size = at::vec::Vectorized<float>::size();
523
+ constexpr int int64_vec_size = at::vec::Vectorized<int64_t>::size();
524
+ __at_align__ float src_buf[float_vec_size];
525
+ __at_align__ int64_t result_buf[int64_vec_size];
526
+ src.store(src_buf);
527
+ for (int i = 0; i < 2; i++) {
528
+ for (int j = 0; j < int64_vec_size; j++) {
529
+ result_buf[j] = static_cast<int64_t>(src_buf[i * int64_vec_size + j]);
530
+ }
531
+ result[i] = at::vec::Vectorized<int64_t>::loadu(result_buf);
532
+ }
533
+ # endif
534
+ return result;
535
+ }
536
+
537
+ inline at::vec::Vectorized<int32_t> cvt_int64_to_int32(at::vec::VectorizedN<int64_t,2> src) {
538
+ # if defined(CPU_CAPABILITY_AVX512)
539
+ auto low = _mm512_cvtepi64_epi32(src[0]);
540
+ auto high = _mm512_cvtepi64_epi32(src[1]);
541
+ return _mm512_inserti32x8(_mm512_castsi256_si512(low), high, 1);
542
+ # elif defined(CPU_CAPABILITY_AVX2)
543
+ auto low = _mm256_shuffle_epi32(src[0], _MM_SHUFFLE(2, 0, 2, 0));
544
+ auto high = _mm256_shuffle_epi32(src[1], _MM_SHUFFLE(2, 0, 2, 0));
545
+ auto low_perm = _mm256_permute4x64_epi64(low, _MM_SHUFFLE(3, 1, 2, 0));
546
+ auto high_perm = _mm256_permute4x64_epi64(high, _MM_SHUFFLE(3, 1, 2, 0));
547
+ return _mm256_blend_epi32(low_perm, high_perm, 0xF0);
548
+ # else
549
+ constexpr int int32_vec_size = at::vec::Vectorized<int32_t>::size();
550
+ constexpr int int64_vec_size = at::vec::Vectorized<int64_t>::size();
551
+ __at_align__ int32_t result[int32_vec_size];
552
+ __at_align__ int64_t src_buf[int64_vec_size];
553
+ for (int i = 0; i < 2; i++) {
554
+ src[i].store(src_buf + i * int64_vec_size);
555
+ for (int j = 0; j < int64_vec_size; j++) {
556
+ result[i * int64_vec_size + j] = static_cast<int32_t>(src_buf[i * int64_vec_size + j]);
557
+ }
558
+ }
559
+ return at::vec::Vectorized<int32_t>::loadu(result);
560
+ # endif
561
+ }
562
+
563
+ inline at::vec::VectorizedN<int64_t,2> cvt_int32_to_int64(at::vec::Vectorized<int32_t> src) {
564
+ at::vec::VectorizedN<int64_t,2> result;
565
+ # if defined(CPU_CAPABILITY_AVX512)
566
+ result[0] = _mm512_cvtepi32_epi64(_mm512_castsi512_si256(src));
567
+ result[1] = _mm512_cvtepi32_epi64(_mm512_extracti32x8_epi32(src, 1));
568
+ # elif defined(CPU_CAPABILITY_AVX2)
569
+ result[0] = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(src));
570
+ result[1] = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(src, 1));
571
+ #else
572
+ constexpr int int32_vec_size = at::vec::Vectorized<int32_t>::size();
573
+ constexpr int int64_vec_size = at::vec::Vectorized<int64_t>::size();
574
+ __at_align__ int32_t src_buf[int32_vec_size];
575
+ __at_align__ int64_t result_buf[int64_vec_size];
576
+ src.store(src_buf);
577
+ for (int i = 0; i < 2; i++) {
578
+ for (int j = 0; j < int64_vec_size; j++) {
579
+ result_buf[j] = static_cast<int64_t>(src_buf[i * int64_vec_size + j]);
580
+ }
581
+ result[i] = at::vec::Vectorized<int64_t>::loadu(result_buf);
582
+ }
583
+ # endif
584
+ return result;
585
+ }
586
+
587
+ inline at::vec::VectorizedN<int64_t,2> mask_convert_to_int64(at::vec::Vectorized<float> src) {
588
+ return cvt_fp32_to_int64(mask_convert_to_float(src));
589
+ }
590
+
591
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::VectorizedN<int64_t,2> src) {
592
+ return to_float_mask(cvt_int64_to_int32(src));
593
+ }
594
+
595
+ #endif