diff --git a/ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..72e57f76e3ddede6281e05d5590dde77ed4e17e1 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:672ac62007f4d5b0094db89b6e378aea5f0ed8e3656d9c3d2d2fb43dd1419e4f +size 16778396 diff --git a/ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..18ca4be04ef5557df2682303e1d3ee1b2ee8a855 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ae792e91631277c80ce78041c1de2fcff0d3d24293cc53b5035888196cdda01 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/14.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/14.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..25bc6c85dab9544115ccf3b8f14833fedb01ec3c --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ff19c396df8c65a9592af0c9cd970fbea74a2dccfdd2c23bcc17957de73054d +size 16778317 diff --git a/ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..524b5a0c6ad85e5f5a4a4e5bab4da06005dec687 --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32a8754c2fd57a65223af79547ac66b9a1539edcef28da71f44e191e1ff863e8 +size 9387 diff --git a/ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6254b3b14c5c0fa3897a9342f5d1674ebaa25ed2 --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7acc58c0f2d1cb173b4b9b538fca10594ccbdb3b92d1a6fe3e69ff1f7c739b0 +size 9293 diff --git a/ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..67d02be438e0dca181230d14fd3352fcfafb6fad --- /dev/null +++ b/ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2469365250bfaaa7c552d61f32d3d934aab0c1767a61d168e6dc14e3a24b3f67 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..cc10ba1927963e3d277722f11a00678af1b1bab1 --- /dev/null +++ b/ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93d5b79cf17f99252750835ee71067869d97b7acffd549659b745bfe0d76c747 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..fc70f1149afe442798e670d52e5428f66aa9bc16 --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05f82ef81749678d000d44b89893a9f52f8d0a81e7b6293a47cb3b25859fdbc2 +size 50332843 diff --git a/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..317406f7916c246de03b610fa153e903e1e586e9 --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bf05127491c52147ca09dff39178f56ad06f239a3d15a6f6713f43488f73017 +size 50332749 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1a75f5e164b98fb6e5548c16ef78e1cefe5d716 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comm_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d6f1738d42a75de3911e77de920d667664eed1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/constant_folding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e88f345af5ff36022da987bb9f8a7d9e56921ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/coordinate_descent_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..466e2fa0fda0d9b3f14fde27a353516f61b0f8db Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a09cbf7f9a3d682d9c63b5b5051b605d59a943f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/debug.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92d59758b51deb618c414dc9a73f4ef0629bd597 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/decomposition.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f99e5fe62a33dc0d43f11c3f3b59635414f05723 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/dependencies.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2855f4346d9f1f9ed7494f2487ac4aa60a729ae0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/freezing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f5b762a0745d24593668ddaede652be0cb3abfd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3624ef61d4335169cd26fb0c7e685c8fc0d51ef6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/metrics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ops_handler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ops_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd3879d8ac2f5345df880059f03ef42a91b698d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/ops_handler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0077b686ff469e73512aa415de8038fa973a1678 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc93ffec88a9bc6e90da4387dc8301cb188fee4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/quantized_lowerings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4189554b82b7465f6ad9d40ca93fb4bb82119361 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49a355a96b812a054798b288f00b6589ad4b044a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fc7365ccf26f19c189e8e22bce6144ad6a53f7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/sizevars.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_case.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89609e139859205f82c924187d713d72d8ce438b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_case.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cffd6a02c64c46b8431c48c7558abacfdc2f1d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/test_operators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2637bfcd3f771be32085f26d34eda14e239acd08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47a047846626af4d826f1543ac79670573d500e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/triton_heuristics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4d76c7d795889f9575df695617c46c2db3f89b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..877fea95cc7444e01d9e7093305bf8e83b4756bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/virtualized.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f148d7baecf5902ad5ce75592946f84fb33bfa5d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffdf2bfb91dbe89712c9191a7dcba38143b3b2e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6408369a5618319e1ba6b729125cac889e13083d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cpu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68365237086c51fdec7cc93159a1a717aab82ce8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cpu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cuda.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c12f476a7c58554902b52f04435de742b73122f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cuda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..704d773103f3bb38d7f073f06a711c2931a5f3c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13bf190a4a95630503de998fce4b1be2905f133f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/multi_kernel.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/multi_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ed2d9d4f8280ed45cb192fb3c8da0a85378f869 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/multi_kernel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e5a3f4739be2b50b077c597b5229eeeaa19f117 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caf056a276e5142ba3a08bc5bd4b06c7a3cbcc54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_split_scan.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_split_scan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d11554d07c9ba7dd3c560f26bfeae5a1f7de711 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_split_scan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccfbe442d998c20fcd1ab46aabf49b4caa4bb877 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb0d5cf6dee4f2df6afdaf9182972cbd9b7d3913 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/implementation.cpp b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/implementation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4869825cadabdd837a03ed8af8c73df6535225ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/implementation.cpp @@ -0,0 +1,87 @@ +// NOTE: Like interface.cpp, this file will be copied into AOTInductor +// generated output. This file is intended to keep implementation +// details separate from the implementation of the AOTI public +// interface. Note also that #includes should go into interface.cpp +// for simplicity of maintenance. + +namespace torch { +namespace aot_inductor { +template +void convert_output_to_handle( + const ArrayRefTensor& output, + AtenTensorHandle& handle) { + handle = output.expensiveCopyToTensor(); +} + +template +void convert_outputs_to_handles_helper( + const std::tuple...>& outputs, + AtenTensorHandle* output_handles, + std::index_sequence) { + (convert_output_to_handle(std::get(outputs), output_handles[Is]), ...); +} +template +void convert_outputs_to_handles( + const std::tuple...>& outputs, + AtenTensorHandle* output_handles) { + convert_outputs_to_handles_helper( + outputs, output_handles, std::make_index_sequence()); +} + +template +void convert_handle_to_arrayref_tensor( + AtenTensorHandle handle, + ArrayRefTensor& input) { + void* data_ptr; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr(handle, &data_ptr)); + int64_t dim; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_dim(handle, &dim)); + int64_t numel; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_numel(handle, &numel)); + int64_t* sizes; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes(handle, &sizes)); + int64_t* strides; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides(handle, &strides)); + int32_t dtype; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(handle, &dtype)); + int32_t device_type; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_device_type(handle, &device_type)); + int32_t device_index; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_device_index(handle, &device_index)); + + input = ArrayRefTensor( + MiniArrayRef(reinterpret_cast(data_ptr), numel), + MiniArrayRef(sizes, dim), + MiniArrayRef(strides, dim), + device_type, + device_index); +} + +template +void convert_handles_to_inputs_helper( + AtenTensorHandle* input_handles, + std::tuple...>& inputs, + std::index_sequence) { + (convert_handle_to_arrayref_tensor(input_handles[Is], std::get(inputs)), + ...); +} + +template +void convert_handles_to_inputs( + AtenTensorHandle* input_handles, + std::tuple...>& inputs) { + convert_handles_to_inputs_helper( + input_handles, inputs, std::make_index_sequence()); +} + +template +void assert_numel(const ArrayRefTensor& tensor, int64_t numel) { + if (tensor.numel() != numel) { + std::stringstream err; + err << "incorrect numel for input tensor. expected " << numel << ", got " << tensor.numel(); + throw std::runtime_error(err.str()); + } +} +} // namespace aot_inductor +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e52dc8f5f46c3ca0b6f9ed858ad7b280da85ea2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp @@ -0,0 +1,354 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \ + try { \ + __VA_ARGS__ \ + } catch (const std::exception& e) { \ + std::cerr << "Error: " << e.what() << std::endl; \ + return AOTI_RUNTIME_FAILURE; \ + } catch (...) { \ + std::cerr << "Unknown exception occurred." << std::endl; \ + return AOTI_RUNTIME_FAILURE; \ + } \ + return AOTI_RUNTIME_SUCCESS; + +#define AOTI_VECTOR_SIZE_CHECK(actual_size, expected_size, name) \ + do { \ + AOTI_RUNTIME_CHECK( \ + actual_size == expected_size, \ + "expected " + std::string(name) + " vector size to be " + \ + std::to_string(expected_size) + ", but got " + \ + std::to_string(actual_size)); \ + } while (0) + +// AOTInductor uses at::addmm_out, which doesn't supports +// arguments that requires gradient. For this reason, we +// enforce no_grad context for run APIs. +// +// A RAII, thread local (!) guard that enables or disables grad mode upon +// construction, and sets it back to the original value upon destruction. +struct AOTINoGradGuard { + AOTINoGradGuard() : prev_mode(aoti_torch_grad_mode_is_enabled()) { + aoti_torch_grad_mode_set_enabled(false); + } + ~AOTINoGradGuard() { + aoti_torch_grad_mode_set_enabled(prev_mode); + } + bool prev_mode; +}; + +extern "C" { + +AOTIRuntimeError AOTInductorModelContainerCreate( + AOTInductorModelContainerHandle* container_handle, + size_t num_models, + bool is_cpu, + const char* cubin_dir) { + return AOTInductorModelContainerCreateWithDevice( + container_handle, + num_models, + is_cpu ? "cpu" : "cuda", + cubin_dir); +} + +AOTIRuntimeError AOTInductorModelContainerCreateWithDevice( + AOTInductorModelContainerHandle* container_handle, + size_t num_models, + const char* device_str, + const char* cubin_dir) { + if (num_models == 0) { + std::cerr << "Error: num_models must be positive, but got 0" << std::endl; + return AOTI_RUNTIME_FAILURE; + } + CONVERT_EXCEPTION_TO_ERROR_CODE({ + std::optional cubin_dir_opt; + if (cubin_dir != nullptr) { + cubin_dir_opt.emplace(cubin_dir); + } + auto* container = new torch::aot_inductor::AOTInductorModelContainer( + num_models, std::string(device_str), cubin_dir_opt); + *container_handle = + reinterpret_cast(container); + }) +} + +AOTIRuntimeError AOTInductorModelContainerDelete( + AOTInductorModelContainerHandle container_handle) { + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto* container = + reinterpret_cast( + container_handle); + delete container; + }); +} + +AOTIRuntimeError AOTInductorModelContainerRun( + AOTInductorModelContainerHandle container_handle, + AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + size_t num_inputs, + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + size_t num_outputs, + AOTInductorStreamHandle stream_handle, + AOTIProxyExecutorHandle proxy_executor_handle) { + auto* container = + reinterpret_cast( + container_handle); + AOTI_VECTOR_SIZE_CHECK(num_inputs, container->num_inputs(), "inputs"); + AOTI_VECTOR_SIZE_CHECK(num_outputs, container->num_outputs(), "outputs"); + + auto stream = + reinterpret_cast(stream_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + AOTINoGradGuard guard; + container->run( + input_handles, output_handles, stream, proxy_executor_handle); + }) +} + +AOTIRuntimeError AOTInductorModelContainerGetNumConstants( + AOTInductorModelContainerHandle container_handle, + size_t* num_constants) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *num_constants = container->num_constants(); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetConstantName( + AOTInductorModelContainerHandle container_handle, + size_t idx, + const char** name) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *name = container->constant_name(idx); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetConstantOriginalFQN( + AOTInductorModelContainerHandle container_handle, + size_t idx, + const char** original_fqn) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *original_fqn = container->constant_original_fqn(idx); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetConstantFromFolded( + AOTInductorModelContainerHandle container_handle, + size_t idx, + bool* from_folded) { + auto* container = + reinterpret_cast(container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ *from_folded = container->constant_from_folded(idx); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetConstantDtype( + AOTInductorModelContainerHandle container_handle, + size_t idx, + int32_t* dtype) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *dtype = container->constant_dtype(idx); }) +} + +AOTIRuntimeError AOTInductorModelContainerUpdateConstantBuffer( + AOTInductorModelContainerHandle container_handle, + AOTInductorConstantMapHandle constant_map_handle, + bool use_inactive, + bool validate_full_update) { + auto* container = + reinterpret_cast( + container_handle); + auto input_map = reinterpret_cast*>(constant_map_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + container->update_constant_buffer( + *input_map, use_inactive, validate_full_update); + }) +} + +AOTIRuntimeError AOTInductorModelContainerUpdateInactiveConstantBuffer( + AOTInductorModelContainerHandle container_handle, + AOTInductorConstantMapHandle constant_map_handle) { + return AOTInductorModelContainerUpdateConstantBuffer(container_handle, + constant_map_handle, + /*use_inactive*/ true, + /*validate_full_update*/ true); +} + +AOTIRuntimeError AOTInductorModelContainerRunConstantFolding( + AOTInductorModelContainerHandle container_handle, + bool use_inactive, + AOTInductorStreamHandle stream_handle, + AOTIProxyExecutorHandle proxy_executor_handle) { + auto* container = + reinterpret_cast( + container_handle); + auto stream = + reinterpret_cast(stream_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + AOTINoGradGuard guard; + container->run_const_fold(use_inactive, stream, proxy_executor_handle); + }) +} + +AOTIRuntimeError AOTInductorModelContainerSwapConstantBuffer( + AOTInductorModelContainerHandle container_handle) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + container->swap_constant_buffer(); + }) +} + +AOTIRuntimeError AOTInductorModelContainerGetNumInputs( + AOTInductorModelContainerHandle container_handle, + size_t* ret_num_inputs) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *ret_num_inputs = container->num_inputs(); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetInputName( + AOTInductorModelContainerHandle container_handle, + size_t input_idx, + const char** ret_input_names) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *ret_input_names = container->input_name(input_idx); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetNumOutputs( + AOTInductorModelContainerHandle container_handle, + size_t* ret_num_outputs) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *ret_num_outputs = container->num_outputs(); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetOutputName( + AOTInductorModelContainerHandle container_handle, + size_t output_idx, + const char** ret_output_names) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE( + { *ret_output_names = container->output_name(output_idx); }) +} + +AOTIRuntimeError AOTInductorModelContainerGetCallSpec( + AOTInductorModelContainerHandle container_handle, + const char** in_spec, + const char** out_spec) { + auto* container = + reinterpret_cast( + container_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + *in_spec = container->get_in_spec(); + *out_spec = container->get_out_spec(); + }) +} + +AOTIRuntimeError AOTInductorModelCreate( + AOTInductorModelHandle* model_handle, + AOTInductorConstantMapHandle constant_map_handle){ + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto constant_map = std::make_shared(); + auto constant_array = std::make_shared>(); + auto input_map = reinterpret_cast*>(constant_map_handle); + + auto model = new torch::aot_inductor::AOTInductorModel( + constant_map, + constant_array, + "cpu", // device_str is hardcoded, as AOTInductorModelCreate is only use for CPU models + "" + ); + + if (input_map) { + for (auto const& kv : *input_map) { + constant_map->emplace(kv.first, kv.second); + } + } else { + model->load_constants(); + } + + *model_handle = reinterpret_cast(model); + })} + +AOTIRuntimeError AOTInductorModelRun( + AOTInductorModelHandle model_handle, + AtenTensorHandle* input_handles, + AtenTensorHandle* output_handles) { + auto model = + reinterpret_cast(model_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + AOTINoGradGuard guard; + model->run_impl( + input_handles, + output_handles, + (torch::aot_inductor::DeviceStreamType) nullptr, + nullptr); + }) +} + +AOTIRuntimeError AOTInductorModelDelete(AOTInductorModelHandle model_handle){ + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto model = reinterpret_cast( + model_handle); + delete model; + })} + +AOTIRuntimeError AOTInductorModelGetNumOutputs( + AOTInductorModelHandle model_handle, + size_t* ret_num_outputs) { + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto model = reinterpret_cast(model_handle); + *ret_num_outputs = model->num_outputs(); + }) +} + +AOTIRuntimeError AOTInductorModelUpdateConstantsMap( + AOTInductorModelHandle model_handle, + AOTInductorConstantMapHandle constant_map_handle) { + auto model = + reinterpret_cast(model_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + auto constant_map = std::make_shared(); + auto input_map = + reinterpret_cast*>( + constant_map_handle); + + for (auto const& kv : *input_map) { + constant_map->emplace(kv.first, kv.second); + } + model->update_constants_map(std::move(constant_map)); + }) +} + +} // extern "C" diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/common.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/common.py new file mode 100644 index 0000000000000000000000000000000000000000..2dfca704b65bf85626ad54e0f922cf594fc1785f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/common.py @@ -0,0 +1,1755 @@ +import contextlib +import dataclasses +import functools +import itertools +import logging +import operator +import re +from itertools import chain +from typing import ( + Any, + Callable, + ClassVar, + Dict, + List, + NamedTuple, + Optional, + Set, + Tuple, + TYPE_CHECKING, + Union, +) + +import sympy +from sympy.printing.printer import Printer + +import torch +import torch.fx +from torch._prims_common import ELEMENTWISE_TYPE_PROMOTION_KIND +from torch.utils import _pytree as pytree +from torch.utils._sympy.value_ranges import ValueRanges + +from .. import config, metrics +from ..utils import ( + DeferredLineBase, + do_bench, + free_symbol_startswith, + IndentedBuffer, + sympy_dot, + sympy_index_symbol, + sympy_subs, + unique, +) +from ..virtualized import ops, OpsHandler, OpsValue, ReductionType, StoreMode, V + +if TYPE_CHECKING: + from ..ir import TensorBox + +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + + +def data_type_logger(msg): + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Data type propagation: %s", msg) + + +@dataclasses.dataclass +class WorkspaceArg: + """A temporary buffer used for a single kernel, then discarded. + + Not registered as a traditional buffer since there are no users, + so it would be dead code eliminated. + """ + + nbytes: sympy.Expr + zero_fill: bool + + +@dataclasses.dataclass +class TensorArg: + name: str + buffer: str + dtype: torch.dtype + offset: sympy.Expr = sympy.Integer(0) + + +@dataclasses.dataclass +class SizeArg: + name: str + expr: sympy.Expr + + +@dataclasses.dataclass +class DeviceCodegen: + scheduling: type + wrapper_codegen: type + + +KernelArgType = Union[WorkspaceArg, TensorArg, SizeArg] + +device_codegens: Dict[str, DeviceCodegen] = {} + + +class DeviceOpOverrides: + def import_get_raw_stream_as(self, name): + raise NotImplementedError() + + def set_device(self, device_idx): + raise NotImplementedError() + + def synchronize(self): + raise NotImplementedError() + + def device_guard(self, device_idx): + raise NotImplementedError() + + +device_op_overrides_dict: Dict[str, DeviceOpOverrides] = {} + + +# The code generated by Inductor consists of two main parts: kernel code and wrapper code. +# For any new backend looking to integrate with Inductor, customization of these two main +# parts are necessary to generate its specific code. +# +# Kernel code generation is determined by different Scheduling. Consequently, a new +# backend needs to provide a custom Scheduling for its unique kernel code generation. Currently, +# CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively. +# +# For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code +# that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen, +# and override specific member functions to create backend-specific Python wrapper code. +# +# Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part +# of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces +# provide flexibility to the backend. A backend can choose to implement these classes from scratch, +# or reuse them by extending and overriding as necessary. And Inductor provides the registration API, +# register_backend_for_device, to equip a new backend at runtime. +# +# Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces. +# This backend can be used as a reference: +# https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9 +def register_backend_for_device( + device: str, device_scheduling: type, device_wrapper_codegen: type +): + device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen) + + +def get_scheduling_for_device(device: str): + return device_codegens[device].scheduling if device in device_codegens else None + + +def get_wrapper_codegen_for_device(device: str): + return ( + device_codegens[device].wrapper_codegen if device in device_codegens else None + ) + + +def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes): + from ..ir import FlexibleLayout + + # added contiguous index prevents reordering + return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))] + + +def register_device_op_overrides(device: str, device_op_overrides: DeviceOpOverrides): + device_op_overrides_dict[device] = device_op_overrides + + +def get_device_op_overrides(device: str): + assert isinstance(device, str) + + if not device_op_overrides_dict.keys(): + from .cuda import device_op_overrides # noqa: F401 + + if device in device_op_overrides_dict.keys(): + return device_op_overrides_dict[device] + + return DeviceOpOverrides() + + +@functools.lru_cache(None) +def boolean_ops(): + return ( + "is_inf", + "is_nan", + "bitwise_xor", + "logical_not", + "signbit", + "le", + "lt", + "ge", + "gt", + "eq", + "ne", + ) + + +DTYPE_TO_COMPUTATION_DTYPE = { + torch.bfloat16: torch.float, + torch.float16: torch.float, + **{ + dtype: dtype + for dtype in [ + torch.bool, + torch.float32, + torch.float64, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.uint8, + torch.uint16, + torch.uint32, + torch.uint64, + ] + }, +} + + +class DataTypePropagation: + def __init__(self, body) -> None: + self.body = body + self.graphs: Dict[Union[Callable[..., Any], str], Any] = { + "root": body.root_block.graph + } + for k, v in body.subblocks.items(): + self.graphs[k] = v.graph + + def deduce_node_dtype_by_inputs(self, node: torch.fx.Node): + inputs = node.all_input_nodes + input_nodes = [ + n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder" + ] + if len(input_nodes) == 0: + return None + + all_input_nodes_propogated = all( + OptimizationContext.key in n.meta + and n.meta[OptimizationContext.key].dtype is not None + for n in input_nodes + ) + if not all_input_nodes_propogated: + return None + + return functools.reduce( + torch.promote_types, + [n.meta[OptimizationContext.key].dtype for n in input_nodes], + ) + + def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node): + sub_graph = self.graphs[node.target] + dtype = self.propagate_graph(sub_graph) + assert dtype + return dtype + + def deduce_node_dtype(self, node: torch.fx.Node): + if node.target in boolean_ops(): + return torch.bool + + if node.op == "placeholder": + return None + + if node.target == "output": + # we can infer output node if it only have 1 arg + if len(node.args) != 1: + return None + + if node.target in ( + "to_dtype", + "index_expr", + ): + return node.args[-1] + + if node.target in ( + "rand", + "randn", + ): + return torch.float + + if node.target in ( + "get_index", + "index_expr", + ): + return torch.int64 + + if node.target in ( + "load", + "store", + "store_reduction", + ): + buf_name = node.args[1] + return V.graph.get_dtype(buf_name) # type: ignore[arg-type] + + if node.target == operator.getitem: + return self.deduce_node_dtype(node.args[0]) # type: ignore[arg-type] + + assert isinstance(node.target, str) + + if node.target == "reduction": + return node.args[1] + + if node.target == "constant": + return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]] # type: ignore[index] + + if node.target.startswith("masked_subblock"): + return self.deduce_node_dtype_by_subgraph(node) + + return self.deduce_node_dtype_by_inputs(node) + + def propagate_graph(self, graph: torch.fx.Graph): + assert graph.nodes + graph_dtype = None + # For masked_subblock, we use output's dtype to represent + # the dtype of this subgraph. For other cases, graph_dtype + # might be None + for node in graph.nodes: + if OptimizationContext.key in node.meta: + opt_ctx = node.meta[OptimizationContext.key] + else: + opt_ctx = OptimizationContext() + + opt_ctx.dtype = self.deduce_node_dtype(node) + node.meta[OptimizationContext.key] = opt_ctx + if node.target == "output": + graph_dtype = opt_ctx.dtype + return graph_dtype + + def propagate(self): + self.propagate_graph(self.graphs["root"]) + + @classmethod + def propagate_loopbody(cls, body): + return cls(body).propagate() + + @classmethod + def propagate_scheduler_node(cls, node): + from ..ir import LoopBody + from ..scheduler import SchedulerNode + + assert isinstance(node, SchedulerNode) + assert isinstance(node._body, LoopBody) + DataTypePropagation.propagate_loopbody(node._body) + + +class ExprPrinter(Printer): + @staticmethod + def paren(string): + def all_in_parens(string): + if string[0] != "(" or len(string) < 2: + return False + count = 1 + for i, char in enumerate(string[1:]): + if char == "(": + count += 1 + elif char == ")": + count -= 1 + if count == 0 and i != len(string) - 2: + return False + assert count == 0 + return True + + if ( + isinstance(string, CSEVariable) + or re.match(r"^[a-z0-9_.]+$", string, re.I) + or re.match(r"^\([^)]*\)$", string, re.I) + or string == "" + ): + return string + # don't put extra parens for strings that are already wrapped in parens + if all_in_parens(string): + return string + return f"({string})" + + def _print_Infinity(self, expr): + return "math.inf" + + def _print_NegativeInfinity(self, expr): + return "-math.inf" + + def _print_Relational(self, expr): + return f" {expr.rel_op} ".join(map(self.paren, map(self._print, expr.args))) + + def _print_Mul(self, expr): + return "*".join(map(self.paren, map(self._print, expr.args))) + + def _print_Add(self, expr): + return " + ".join(map(self.paren, map(self._print, expr.args))) + + def _print_Mod(self, expr): + return " % ".join(map(self.paren, map(self._print, expr.args))) + + def _print_FloorDiv(self, expr): + raise NotImplementedError(f"_print_FloorDiv not implemented for {type(self)}") + + def _print_CleanDiv(self, expr): + return self._print_FloorDiv(expr) + + def _print_GreaterThan(self, expr): + # GreaterThan: >= + # StrictlyGreaterThan: > + # Go figure... + return " >= ".join(map(self.paren, map(self._print, expr.args))) + + def _print_align(self, expr): + assert len(expr.args) == 1 + return f"align({self._print(expr.args[0])})" + + +class PythonPrinter(ExprPrinter): + def _print_ModularIndexing(self, expr): + x, div, mod = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + mod = self.paren(self.doprint(mod)) + if div != "1": + x = f"({x} // {div})" + return f"{x} % {mod}" + + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + return f"({x} // {div})" + + def _helper_sqrt(self, expr): + return f"math.sqrt({self._print(expr)})" + + def _print_Pow(self, expr): + # Pow() confuses triton + base, exp = expr.args + # NB: Remember this is sizevar computation! You don't typically + # expect to have to do floating point computation including exponents + # in sizevar compute. Instead of adding support for floating + # point pow, you should make upstream retranslate the Sympy expression + # into Tensor expressions earlier and do that instead. + if exp == 0.5: + return self._helper_sqrt(base) + elif exp == -0.5: + return "1/" + self._helper_sqrt(base) + base = self._print(base) + assert exp == int(exp), exp + exp = int(exp) + if exp > 0: + return "*".join([self.paren(base)] * exp) + elif exp < 0: + return "1/" + self.paren("*".join([self.paren(base)] * abs(exp))) + else: # exp == 0 + return "1" + + def _print_floor(self, expr): + assert len(expr.args) == 1 + return f"math.floor({self._print(expr.args[0])})" + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + return f"math.ceil({self._print(expr.args[0])})" + + def _print_Abs(self, expr): + assert len(expr.args) == 1 + return f"abs({self._print(expr.args[0])})" + + def _print_Max(self, expr): + assert len(expr.args) >= 2 + return f"max({', '.join(map(self._print, expr.args))})" + + def _print_Min(self, expr): + assert len(expr.args) >= 2 + return f"min({', '.join(map(self._print, expr.args))})" + + def _print_cos(self, expr): + assert len(expr.args) == 1 + return f"math.cos({self._print(expr.args[0])})" + + def _print_cosh(self, expr): + assert len(expr.args) == 1 + return f"math.cosh({self._print(expr.args[0])})" + + def _print_acos(self, expr): + assert len(expr.args) == 1 + return f"math.acos({self._print(expr.args[0])})" + + def _print_sin(self, expr): + assert len(expr.args) == 1 + return f"math.sin({self._print(expr.args[0])})" + + def _print_sinh(self, expr): + assert len(expr.args) == 1 + return f"math.sinh({self._print(expr.args[0])})" + + def _print_asin(self, expr): + assert len(expr.args) == 1 + return f"math.asin({self._print(expr.args[0])})" + + def _print_tan(self, expr): + assert len(expr.args) == 1 + return f"math.tan({self._print(expr.args[0])})" + + def _print_tanh(self, expr): + assert len(expr.args) == 1 + return f"math.tanh({self._print(expr.args[0])})" + + def _print_atan(self, expr): + assert len(expr.args) == 1 + return f"math.atan({self._print(expr.args[0])})" + + def _print_Round(self, expr): + assert len(expr.args) == 1 + return f"round({self._print(expr.args[0])})" + + def _print_RoundDecimal(self, expr): + assert len(expr.args) == 2 + number, ndigits = expr.args + assert isinstance(ndigits, sympy.Integer) + return f"round({self._print(number)}, {ndigits})" + + +class OpOverrides: + def __init__(self, parent): + super().__init__() + self._parent = parent + + def __getattr__(self, item): + return getattr(self._parent, item) + + @staticmethod + def identity(value): + # used to trigger cse + return value + + @staticmethod + def constant(value, dtype): + return repr(value) + + @staticmethod + def reciprocal(x): + return ops.truediv("1", x) + + @staticmethod + def square(x): + return ops.mul(x, x) + + @staticmethod + def bitwise_not(x): + return f"~{ExprPrinter.paren(x)}" + + @staticmethod + def logical_not(a): + return f"{ExprPrinter.paren(a)} == 0" + + @staticmethod + def bitwise_and(x, y): + return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_or(x, y): + return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_xor(x, y): + return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_left_shift(x, y): + return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}" + + @staticmethod + def bitwise_right_shift(x, y): + return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}" + + @staticmethod + def remainder(a, b): + r = ops.mod(a, b) + return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r) + + @staticmethod + def load_seed(name, offset): + return ops.load(name, sympy.Integer(offset)) + + @classmethod + def _initialize_pointwise_overrides(cls, target): + assert target in {"triton", "cpp", "cppvec"}, target + + def pointwise_factory_1(impl): + def func(x): + return impl.format(x=x) + + return func + + def pointwise_factory_2(impl): + def func(x, y): + return impl.format(x=x, y=y) + + return func + + for funcname, data in pointwise_overrides_data.items(): + impl = getattr(data, target) + if isinstance(impl, str): + nof_args = 2 if "{y}" in impl else 1 + # extend the following dictionary with factory + # functions for a specific number of arguments as + # needed: + factory = {1: pointwise_factory_1, 2: pointwise_factory_2}[nof_args] + setattr(cls, funcname, staticmethod(factory(impl))) + + +@dataclasses.dataclass +class OverridesData: + name: str + cpp: str + triton: Optional[str] = None # None when not impl in libdevice/triton + cppvec: Optional[str] = None # None when not impl in aten/.../vec + type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND = ( + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + + +pointwise_overrides_data: Dict[str, OverridesData] = dict( + airy_ai=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="airy_ai_forward({x})", + name="special_airy_ai", + ), + bessel_j0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="bessel_j0_forward({x})", + triton="libdevice.j0({x})", + name="special_bessel_j0", + ), + bessel_j1=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="bessel_j1_forward({x})", + triton="libdevice.j1({x})", + name="special_bessel_j1", + ), + bessel_y0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="bessel_y0_forward({x})", + triton="libdevice.y0({x})", + name="special_bessel_y0", + ), + bessel_y1=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="bessel_y1_forward({x})", + triton="libdevice.y1({x})", + name="special_bessel_y1", + ), + digamma=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_digamma({x})", + cppvec="{x}.digamma()", + name="digamma", + ), + # no cpp nor triton implementation for entr, it is defined as decomposition + # erf, erfc + erfcx=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_erfcx({x})", + triton="libdevice.erfcx({x})", + name="special_erfcx", + ), + # erfinv, exp2, expit, gammaln + igamma=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_igamma({x}, {y})", + name="igamma", + ), + igammac=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_igammac({x}, {y})", + name="igammac", + ), + gammainc=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_igamma({x}, {y})", + name="special_gammainc", + ), + gammaincc=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_igammac({x}, {y})", + name="special_gammaincc", + ), + i0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_i0({x})", + triton="libdevice.cyl_bessel_i0({x})", + cppvec="{x}.i0()", + name="i0", + ), + i0e=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_i0e({x})", + cppvec="{x}.i0e()", + name="special_i0e", + ), + i1=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_i1({x})", + triton="libdevice.cyl_bessel_i1({x})", + name="special_i1", + ), + i1e=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_i1e({x})", + name="special_i1e", + ), + log_ndtr=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_log_ndtr({x})", + name="special_log_ndtr", + ), + # logit + modified_bessel_i0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="modified_bessel_i0_forward({x})", + triton="libdevice.cyl_bessel_i0({x})", + name="special_modified_bessel_i0", + ), + modified_bessel_i1=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="modified_bessel_i1_forward({x})", + triton="libdevice.cyl_bessel_i1({x})", + name="special_modified_bessel_i1", + ), + modified_bessel_k0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="modified_bessel_k0_forward({x})", + name="special_modified_bessel_k0", + ), + modified_bessel_k1=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="modified_bessel_k1_forward({x})", + name="special_modified_bessel_k1", + ), + # multigamma + ndtr=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_ndtr({x})", + name="special_ndtr", + ), + ndtri=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_ndtri({x})", + name="special_ndtri", + ), + polygamma=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="calc_polygamma({y}, {x})", + name="polygamma", + ), + # psi - alias to digamma + # round + scaled_modified_bessel_k0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="scaled_modified_bessel_k0_forward({x})", + name="special_scaled_modified_bessel_k0", + ), + scaled_modified_bessel_k1=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="scaled_modified_bessel_k1_forward({x})", + name="special_scaled_modified_bessel_k1", + ), + # sinc + spherical_bessel_j0=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="spherical_bessel_j0_forward({x})", + name="special_spherical_bessel_j0", + ), + zeta=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="zeta({x}, {y})", + name="special_zeta", + ), + chebyshev_polynomial_t=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="chebyshev_polynomial_t_forward({x}, {y})", + name="special_chebyshev_polynomial_t", + ), + chebyshev_polynomial_u=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="chebyshev_polynomial_u_forward({x}, {y})", + name="special_chebyshev_polynomial_u", + ), + chebyshev_polynomial_v=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="chebyshev_polynomial_v_forward({x}, {y})", + name="special_chebyshev_polynomial_v", + ), + chebyshev_polynomial_w=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="chebyshev_polynomial_w_forward({x}, {y})", + name="special_chebyshev_polynomial_w", + ), + legendre_polynomial_p=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="legendre_polynomial_p_forward({x}, {y})", + name="special_legendre_polynomial_p", + ), + shifted_chebyshev_polynomial_t=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="shifted_chebyshev_polynomial_t_forward({x}, {y})", + name="special_shifted_chebyshev_polynomial_t", + ), + shifted_chebyshev_polynomial_u=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="shifted_chebyshev_polynomial_u_forward({x}, {y})", + name="special_shifted_chebyshev_polynomial_u", + ), + shifted_chebyshev_polynomial_v=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="shifted_chebyshev_polynomial_v_forward({x}, {y})", + name="special_shifted_chebyshev_polynomial_v", + ), + shifted_chebyshev_polynomial_w=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="shifted_chebyshev_polynomial_w_forward({x}, {y})", + name="special_shifted_chebyshev_polynomial_w", + ), + hermite_polynomial_h=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="hermite_polynomial_h_forward({x}, {y})", + name="special_hermite_polynomial_h", + ), + hermite_polynomial_he=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="hermite_polynomial_he_forward({x}, {y})", + name="special_hermite_polynomial_he", + ), + laguerre_polynomial_l=OverridesData( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + cpp="laguerre_polynomial_l_forward({x}, {y})", + name="special_laguerre_polynomial_l", + ), +) + + +# Use mypy to check protocol implemented correctly +def _typecheck_OpOverrides(h: OpOverrides) -> OpsHandler[str]: + return h + + +class DeferredLine(DeferredLineBase): + """A line that can be 'unwritten' by adding name to V.graph.removed_buffers""" + + def __init__(self, name, line): + super().__init__(line) + self.name = name + assert not isinstance(line, DeferredLineBase) + + def __call__(self): + if all( + self.name not in x + for x in ( + V.graph.removed_buffers, + V.kernel.removed_buffers, + V.graph.inplaced_to_remove, + V.kernel.inplaced_to_remove, + ) + ): + return self.line + return None + + def _new_line(self, line): + return DeferredLine(self.name, line) + + +class BracesBuffer(IndentedBuffer): + def indent(self, offset=1): + @contextlib.contextmanager + def ctx(): + for _ in range(offset): + self.writeline("{") + self._indent += 1 + for _ in range(-offset): + self._indent -= 1 + self.writeline("}") + yield + for _ in range(-offset): + self.writeline("{") + self._indent += 1 + for _ in range(offset): + self._indent -= 1 + self.writeline("}") + + return ctx() + + +class InplacedBuffer(NamedTuple): + inner_name: str + other_names: List[str] + + +class KernelArgs: + @staticmethod + def _lookup(prefix, odict, name): + assert isinstance(name, (str, sympy.Symbol)) + if name not in odict: + odict[name] = f"{prefix}{len(odict)}" + return odict[name] + + def __init__(self, sizevars=None): + self.input_buffers = dict() + self.output_buffers = dict() + self.inplace_buffers = dict() + self.sizevars = sizevars or dict() + self.workspace_arg = None + + def __repr__(self): + return "KernelArgs({})".format( + ", ".join( + map( + repr, + [ + self.input_buffers, + self.output_buffers, + self.inplace_buffers, + self.sizevars, + ], + ) + ) + ) + + def _buffer_is_marked_removed(self, name): + return isinstance(name, str) and name.startswith("REMOVED") + + def input(self, name): + if V.graph.scheduler: + name = V.graph.scheduler.mutation_real_name.get(name, name) + assert name not in V.graph.removed_buffers, name + if name in self.output_buffers: + return self.output_buffers[name] + if name in self.inplace_buffers: + return self.inplace_buffers[name].inner_name + if name.startswith("seed"): + return self._lookup("seed", self.input_buffers, name) + return self._lookup("in_ptr", self.input_buffers, name) + + def output(self, name): + if V.graph.scheduler: + name = V.graph.scheduler.mutation_real_name.get(name, name) + assert name not in V.graph.removed_buffers, name + if name in self.inplace_buffers: + return self.inplace_buffers[name].inner_name + return self._lookup("out_ptr", self.output_buffers, name) + + def make_inplace(self, input_name, output_name): + assert output_name not in self.inplace_buffers + if input_name in self.inplace_buffers: + buf = self.inplace_buffers[input_name] + buf.other_names.append(output_name) + self.inplace_buffers[output_name] = buf + else: + buf = InplacedBuffer( + f"in_out_ptr{len(unique(self.inplace_buffers.values()))}", + [input_name, output_name], + ) + self.inplace_buffers[input_name] = buf + self.inplace_buffers[output_name] = buf + + def workspace(self, nbytes: sympy.Expr, zero_fill: bool): + if self.workspace_arg is None: + self.workspace_arg = WorkspaceArg(nbytes, zero_fill) + return "ws_ptr", 0 + + offset = self.workspace_arg.nbytes + zero_fill = zero_fill or self.workspace_arg.zero_fill + self.workspace_arg = WorkspaceArg(offset + nbytes, zero_fill) + return "ws_ptr", offset + + def seed_offset(self, name, value): + if value in self.sizevars: + return self.sizevars[value] + if name in self.sizevars.values(): + name = ( + f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}" + ) + self.sizevars[value] = name + return name + + def size(self, name): + if str(name) == "seed": + self.sizevars["seed"] = "seed" + return "seed" + return self._lookup("ks", self.sizevars, name) + + def call_names(self): + return chain( + self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys() + ) + + def wrap_ptr_arg(self, buf, dtype): + return buf + + def wrap_size_arg(self, size): + return str(size) + + def cpp_argdefs(self): + from .cpp import DTYPE_TO_CPP, INDEX_TYPE + + call_args = [] + arg_defs = [] + arg_types = [] + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + outer = inplaced.other_names[-1] + inner = inplaced.inner_name + dtype = V.graph.get_dtype(outer) + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"{cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"{cpp_dtype}*") + for outer, inner in self.input_buffers.items(): + if outer in self.inplace_buffers: + continue + dtype = V.graph.get_dtype(outer) + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"const {cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"const {cpp_dtype}*") + for outer, inner in self.output_buffers.items(): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + dtype = V.graph.get_dtype(outer) + cpp_dtype = DTYPE_TO_CPP[dtype] + arg_defs.append(f"{cpp_dtype}* {inner}") + call_args.append(self.wrap_ptr_arg(outer, dtype)) + arg_types.append(f"{cpp_dtype}*") + for outer, inner in self.sizevars.items(): + arg_defs.append(f"const {INDEX_TYPE} {inner}") + call_args.append(self.wrap_size_arg(outer)) + arg_types.append(f"const {INDEX_TYPE}") + if V.graph.wrapper_code: + V.graph.wrapper_code.ensure_size_computed(outer) + assert self.workspace_arg is None, "Workspace not supported on CPU " + return arg_defs, call_args, arg_types + + def python_argdefs(self): + arg_defs = [] + call_args = [] + precompile_args: List[Union[TensorArg, SizeArg, WorkspaceArg]] = [] + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + arg_defs.append(inplaced.inner_name) + call_args.append(inplaced.other_names[-1]) + precompile_args.append( + TensorArg( + name=inplaced.inner_name, + buffer=inplaced.other_names[-1], + dtype=V.graph.get_dtype(inplaced.other_names[-1]), + ) + ) + for outer, inner in chain( + self.input_buffers.items(), self.output_buffers.items() + ): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + arg_defs.append(inner) + call_args.append(outer) + precompile_args.append( + TensorArg( + name=inner, + buffer=outer, + dtype=V.graph.get_dtype(outer), + ) + ) + for outer, inner in self.sizevars.items(): + arg_defs.append(inner) + call_args.append(outer) + precompile_args.append(SizeArg(inner, outer)) + if V.graph.wrapper_code: + V.graph.wrapper_code.ensure_size_computed(outer) + if self.workspace_arg is not None: + arg_defs.append("ws_ptr") + call_args.append("workspace") + precompile_args.append(self.workspace_arg) + + return arg_defs, call_args, precompile_args + + def aliases(self): + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + for other in inplaced.other_names: + if ( + other in V.graph.inplaced_to_remove + or other in V.kernel.inplaced_to_remove + ): + continue + if other in self.input_buffers: + yield self.input_buffers[other], inplaced.inner_name + if other in self.output_buffers: + yield self.output_buffers[other], inplaced.inner_name + + def is_removed(self, name): + def _is_removed(name, buffers): + return name not in buffers or self._buffer_is_marked_removed(buffers[name]) + + return _is_removed(name, self.output_buffers) and _is_removed( + name, self.inplace_buffers + ) + + # Includes inplace buffers, excludes removed buffers. Essentially, + # after you do a call into this kernel, which buffers actually contain + # updated data? Modeled off of python_argdefs. + def live_output_buffers(self): + live_outs = set() + for inplaced in unique(self.inplace_buffers.values()): + if self._buffer_is_marked_removed(inplaced): + continue + live_outs.add(inplaced.other_names[-1]) + for outer, inner in self.output_buffers.items(): + if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner): + continue + live_outs.add(outer) + return live_outs + + +class CSEVariable: + """A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis. + To do so, the backends can simply overload `Kernel.create_cse_var` + The "CSEVariable.update_on_args" method gives you a hook for annotations + See example of TritonCSEVariable in triton.py + """ + + def __init__(self, name, bounds: ValueRanges[Any]): + assert isinstance(bounds, ValueRanges) + self.name = name + self.bounds = bounds + + def __str__(self): + return self.name + + def __hash__(self) -> int: + return hash(self.name) + + def __eq__(self, other) -> bool: + return type(other) == type(self) and other.name == self.name + + def update_on_args(self, name, args, kwargs): + pass + + +class CppWrapperKernelArgs(KernelArgs): + def wrap_ptr_arg(self, buf, dtype): + from .cpp import DTYPE_TO_CPP + + if config.abi_compatible: + # In the abi_compatible model, we just return the buf here. + # We will form correct call args later in wrapper.generate_kernel_all. + return buf + else: + return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())" + + def wrap_size_arg(self, size): + return f"{size}" + + +class CSE: + """Common subexpression elimination""" + + def __init__( + self, + prefix="", + suffix="", + name_prefix="tmp", + iter_buffers=None, + store_cache=None, + reduction_cache=None, + varname_map=None, + ): + self.prefix = prefix + self.suffix = suffix + self.cache = {} + self.name_prefix = name_prefix + self.store_cache = store_cache or {} + self.reduction_cache = reduction_cache or {} + self.iter_buffer_ids = iter_buffers or itertools.count() + self.invalidated_stores = set() + self.varname_map = varname_map or {} + + def invalidate(self, keep_vars: Set[str]): + for name, tmp in list(self.store_cache.items()): + if tmp not in keep_vars: + del self.store_cache[name] + self.invalidated_stores.add(name) + self.cache = {k: v for k, v in self.cache.items() if v in keep_vars} + + def clone(self): + # Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional + return CSE( + prefix=self.prefix, + suffix=self.suffix, + name_prefix=self.name_prefix, + iter_buffers=self.iter_buffer_ids, + store_cache=self.store_cache, + varname_map=self.varname_map, + ) + + def generate( + self, + buffer: IndentedBuffer, + expr: Union[str, CSEVariable, OpsValue, IndentedBuffer], + *, + bounds: ValueRanges[Any] = ValueRanges.unknown(), + write=True, + assignment=True, + ) -> CSEVariable: + if isinstance(expr, OpsValue): + expr = expr.value + + assert isinstance(expr, (str, CSEVariable, IndentedBuffer)), type(expr) + assert write or assignment + if isinstance(expr, CSEVariable): + # If the expressions were always created with all the information, we could + # assert expr.bounds == bounds, but sometimes the expression is created + # with the loose ValueRanges.unknown(), so we need to tighten the bounds + expr.bounds = expr.bounds.tighten(bounds) + return expr + cache_key = expr.getvalue() if isinstance(expr, IndentedBuffer) else expr + var = self.cache.get(cache_key, None) + if not var: + var = self.newvar(bounds) if assignment else None + self.cache[cache_key] = var + if write: + if V.kernel.current_node: + V.kernel.current_node.codegen_originating_info( + buffer, only_once=True + ) + if isinstance(expr, IndentedBuffer): + if assignment: + buffer.writeline(f"{self.prefix}{var} =") + buffer.splice(expr) + buffer.writeline(self.suffix) + else: + if assignment: + line = f"{self.prefix}{var} = {expr}{self.suffix}" + else: + line = f"{expr}{self.suffix}" + buffer.writeline(line) + else: + var.bounds = var.bounds.tighten(bounds) + + return var + + def newvar(self, bounds: ValueRanges[Any] = ValueRanges.unknown()) -> CSEVariable: + var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}" + var = V.kernel.create_cse_var(var_name, bounds) + self.varname_map[var_name] = var + return var + + +class IndirectAssertLine(DeferredLineBase): + def __init__(self, line, assert_fn, var, mask, size_map): + self.var = var + self.mask = mask + self.line = line + self.assert_fn = assert_fn + self.size_map = size_map + + def __call__(self): + size, size_str = self.size_map[(self.var, self.mask)] + + # We assert if we've not been able to prove the bound + assert_min = (self.var.bounds.lower >= 0) != sympy.true + assert_max = (self.var.bounds.upper < size) != sympy.true + + # FooBar interview question + if not (assert_min or assert_max): + return None + elif assert_min and assert_max: + # The conditions need to be in parens because of Python's operator precedence. + # It'd be less error-prone to use and/or/not, which is suported by triton + cond = f"(0 <= {self.var}) & ({self.var} < {size_str})" + cond_print = f"0 <= {self.var} < {size_str}" + elif assert_min: + cond = f"0 <= {self.var}" + cond_print = cond + else: + assert assert_max + cond = f"{self.var} < {size_str}" + cond_print = cond + + if self.mask: + cond = f"({cond}) | ~{self.mask}" + return self.line.format( + assert_fn=self.assert_fn, cond=cond, cond_print=cond_print + ) + + def _new_line(self, line): + return IndirectAssertLine( + line, self.assert_fn, self.var, self.mask, self.size_map + ) + + +class CodeGen: + def __init__(self): + super().__init__() + self.exit_stack = contextlib.ExitStack() + + def __enter__(self): + self.exit_stack.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.exit_stack.__exit__(exc_type, exc_val, exc_tb) + + +class Kernel(CodeGen): + newvar_prefix = "" + suffix = "" + overrides: Optional[Callable[[OpsHandler[Any]], OpsHandler[Any]]] = None + # TODO: these look dead, but with all the getattr it's hard to tell... + load_format: None = None + store_format: None = None + + def __init__(self, args=None, increase_kernel_count=True): + super().__init__() + if increase_kernel_count: + metrics.generated_kernel_count += 1 + self.args = args or KernelArgs() + self.loads = IndentedBuffer() + self.compute = IndentedBuffer() + self.stores = IndentedBuffer() + self.cse: CSE = CSE(self.newvar_prefix, self.suffix) + self.must_keep_buffers = set() + self.store_buffer_names = set() + self._load_mask = None + # set in set_current_node + self.current_node = None + self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges[Any]]] = None + # Upper bounds for indirect_indexing and their str representation + # NB: None, None is never stored in map, but it is the assumed + # "not set" value for the dict + self.indirect_max_sizes: Dict[ + Tuple[CSEVariable, str], Union[Tuple[sympy.Expr, str], Tuple[None, None]] + ] = {} + + self.removed_buffers = set() + self.inplaced_to_remove = set() + + # key: the buffer to write + # value: the buffer to read and whose memory can be reused for + # the buffer specified by key + self.inplace_update_buffers = dict() + # Set minimum number of elements processed per thread. + self.min_elem_per_thread = 1 + self.kernel_name = None + + @contextlib.contextmanager + def set_current_node(self, node): + prior = self.current_node + self.current_node = node + self.node_to_bounds = node._body.bounds().get_bounds() + try: + yield + finally: + self.current_node = prior + + @contextlib.contextmanager + def swap_buffers(self, lb, cb=None, sb=None): + if cb is None: + cb = lb + loads = self.loads + compute = self.compute + stores = self.stores + cse = self.cse + self.loads = lb + self.compute = cb + self.stores = sb + self.cse = cse.clone() + try: + yield + finally: + self.loads = loads + self.compute = compute + self.stores = stores + self.cse = cse + + def load(self, name: str, index: sympy.Expr) -> CSEVariable: + raise NotImplementedError() + + def indirect_load(self, name: str, index: sympy.Expr): + """A load the depends on an index we have read""" + prior = self.loads + try: + # put the load in the compute section as it might have deps + self.loads = self.compute + return self.load(name, index) + finally: + self.loads = prior + + def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): + raise NotImplementedError() + + def store( + self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None + ) -> None: + raise NotImplementedError() + + def reduction( + self, + dtype: torch.dtype, + src_dtype: torch.dtype, + reduction_type: ReductionType, + value: Union[CSEVariable, Tuple[CSEVariable, ...]], + ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: + raise NotImplementedError() + + def scan( + self, + dtype: torch.dtype, + combine_fn: Callable[[CSEVariable, CSEVariable], CSEVariable], + value: CSEVariable, + init: int, + ) -> CSEVariable: + raise NotImplementedError() + + def bucketize( + self, + values: CSEVariable, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ) -> CSEVariable: + """ + See [Note: Inductor bucketize op] + """ + raise NotImplementedError() + + @property + def assert_function(self) -> str: + raise NotImplementedError() + + def index_to_str(self, index: sympy.Expr) -> str: + raise NotImplementedError() + + def __enter__(self): + # TODO: hoist this to top level + class CSEProxy: + self.name = "CSEProxy" + + @staticmethod + def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc] + def inner(*args, **kwargs): + # TritonTemplateKernel has no current_node + buf_bounds = ValueRanges.unknown() + if hasattr(V.interpreter, "current_node"): + fx_node = V.interpreter.current_node + assert isinstance(self.node_to_bounds, dict) + buf_bounds = self.node_to_bounds.get( + fx_node, ValueRanges.unknown() + ) + + value = getattr(parent_handler, name)(*args, **kwargs) # type: ignore[has-type] + + def do_cse(v): + csevar = self.cse.generate(self.compute, v, bounds=buf_bounds) + csevar.update_on_args(name, args, kwargs) + return csevar + + return pytree.tree_map(do_cse, value) + + return inner + + @staticmethod + def indirect_indexing( + var: CSEVariable, size: sympy.Expr, check: bool = True + ): + # Skip CSE since this doesn't return an expression + + if var.bounds.lower < 0: # type: ignore[operator] + new_bounds = ValueRanges.unknown() + if var.bounds != ValueRanges.unknown() and isinstance( + size, sympy.Number + ): + # Take the negative part of the bound and add size to it + # Then take union of that and the positive part + # This is a tighter bound than that of a generic ops.where, as we have info on the cond + neg = var.bounds & ValueRanges(-sympy.oo, -1) + new_bounds = ValueRanges(neg.lower + size, neg.upper + size) + # We don't have a good way of representing the empty range + if var.bounds.upper >= 0: # type: ignore[operator] + pos = var.bounds & ValueRanges(0, sympy.oo) + new_bounds = new_bounds | pos + + stm = ops.add(var, self.rename_indexing(size)) + # Mixed negative and non-negative + if var.bounds.upper >= 0: # type: ignore[operator] + lt = ops.lt(var, "0") + stm = ops.where(lt, stm, var) + new_var = self.cse.generate(self.compute, stm, bounds=new_bounds) + + new_var.update_on_args("index_wrap", (var,), {}) + var = new_var + + if self.generate_assert(check): + mask = self.load_mask(var) + + # An assertion line may have been written already, if so just + # update the max size. + map_key = (var, mask) + existing_size, _ = self.indirect_max_sizes.get( + map_key, (None, None) + ) + if existing_size is not None: + size = sympy.Min(size, existing_size) + else: + line = ( + '{assert_fn}({cond}, "index out of bounds: {cond_print}")' + ) + self.compute.writeline( + IndirectAssertLine( + line, + self.assert_function, + var, + mask, + self.indirect_max_sizes, + ) + ) + + self.indirect_max_sizes[map_key] = (size, self.index_to_str(size)) + return sympy_index_symbol(str(var)) + + @staticmethod + def load(name: str, index: sympy.Expr) -> CSEVariable: + if name in self.cse.invalidated_stores: + # A load from an invalidated store requires us to + # keep the actual buffer around + V.kernel.must_keep_buffers.add(name) + if free_symbol_startswith(index, "tmp"): + return self.indirect_load(name, index) + store_cache = self.cse.store_cache + if name in store_cache: + return store_cache[name] + return self.load(name, index) + + @staticmethod + def store( + name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None + ) -> None: + self.store_buffer_names.add(name) + if mode is None: + self.cse.store_cache[name] = value + if self.current_node: + for other_name in self.current_node.get_mutations(): + self.cse.store_cache[other_name] = value + if name not in V.graph.removed_buffers: + return self.store(name, index, value, mode=mode) + else: + return None # type: ignore[return-value] + + @staticmethod + def store_reduction(name: str, index: sympy.Expr, value: CSEVariable): + self.store_buffer_names.add(name) + self.cse.store_cache[name] = value + if self.current_node: + for other_name in self.current_node.get_mutations(): + self.cse.store_cache[other_name] = value + + if name not in V.graph.removed_buffers: + return self.store_reduction(name, index, value) + + @staticmethod + def reduction( + dtype: torch.dtype, + src_dtype: torch.dtype, + reduction_type: ReductionType, + value: Union[CSEVariable, Tuple[CSEVariable, ...]], + ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: + return self.reduction(dtype, src_dtype, reduction_type, value) + + @staticmethod + def scan( + dtype: torch.dtype, + combine_fn: Callable[[CSEVariable, CSEVariable], CSEVariable], + value: CSEVariable, + init: int, + ) -> CSEVariable: + return self.scan(dtype, combine_fn, value, init) + + @staticmethod + def bucketize( + values: CSEVariable, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ) -> CSEVariable: + """ + [Note: Inductor bucketize op] + + Given values (tensor) and offsets_name (reference to the name of a 1D + tensor), calculate the bucket that each value belongs to. + + e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True + return = [ 0, 1, 1, 1, 1, 3, 3, 4]. + + When right == False, bucket i refers to range (offsets[i], offsets[i+1]]. + When right == True, bucket i refers to range [offsets[i], offsets[i+1]). + + Offsets must be non-decreasing or the result is undefined. + """ + return self.bucketize( + values, offsets_name, offsets_size, indexing_dtype, right + ) + + # Use mypy to check protocol implemented correctly + def _typecheck_CSEProxy(h: CSEProxy) -> OpsHandler[CSEVariable]: + return h + + super().__enter__() + assert self.overrides + parent_handler = self.overrides(V.get_ops_handler()) + self.exit_stack.enter_context(V.set_ops_handler(CSEProxy())) + self.exit_stack.enter_context(V.set_kernel_handler(self)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Note that V.graph.scheduler can be None when codegening triton template + kernels. + """ + if V.graph.scheduler: + V.graph.scheduler.remove_kernel_local_buffers() + super().__exit__(exc_type, exc_val, exc_tb) + + def generate_assert(self, check): + return (check or config.debug_index_asserts) and config.assert_indirect_indexing + + def load_mask(self, var) -> str: + # only the triton kernel requires mask + return "" + + def rename_indexing(self, index) -> sympy.Expr: + # adds the necessary kernel args for index expressions + # and renames variables in index expressions to kernel arg names + if isinstance(index, (list, tuple)): + return [self.rename_indexing(x) for x in index] # type: ignore[return-value] + index = V.graph.sizevars.simplify(index) + sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name) + replacements = { + x: self.args.size(x) + for x in sorted_symbols + if x.name.startswith(("s", "u", "ps")) + or (x.name.startswith("i") and not x.name.startswith("idx")) + } + return sympy_subs(index, replacements) + + def create_cse_var(self, *args, **kwargs): + return CSEVariable(*args, **kwargs) + + +@dataclasses.dataclass +class OptimizationContext: + key: ClassVar[str] = "opt_ctx" + + # Load value as mask + is_load_as_mask: bool = False + + dtype: Optional[torch.dtype] = None + ops_name: str = "" + + # Load uint8/int8 value as float32 + is_load_int8_as_float: bool = False + + +@functools.lru_cache(None) +def jinja2_env(): + try: + import jinja2 + + return jinja2.Environment( + undefined=jinja2.StrictUndefined, + ) + except ImportError: + return None + + +PrimitiveInfoType = Union[int, float, bool, str, List[Union[int, str, float, bool]]] + + +class ChoiceCaller: + """ + Represents a possible choice used in autotune_process.py. + During autotuning, self.benchmark() is first called to get benchmark result, + and if this choice is selected, self.output_node() is called to get the output_node. + + Children classes: TritonTemplateCaller, CUDATemplateCaller. + """ + + def __init__(self, name, input_nodes, layout): + super().__init__() + self.name = name + self.layout = layout + self.input_nodes = input_nodes + + def benchmark(self, *args, out) -> float: + algo = self.to_callable() + return do_bench(lambda: algo(*args, out=out)) + + def call_name(self) -> str: + raise NotImplementedError() + + def to_callable(self): + raise NotImplementedError() + + def hash_key(self) -> str: + raise NotImplementedError() + + def output_node(self) -> "TensorBox": + raise NotImplementedError() + + def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]: + """Information returned here is logged to the autotune log file when that is enabled.""" + return {} + + +class KernelTemplate: + """ + Base class for defining kernel templates. + + Children classes: TritonTemplate, CUDATemplate + """ + + @staticmethod + def _template_from_string(source): + env = jinja2_env() + if env is not None: + return env.from_string(source) + return None + + @staticmethod + def _fake_get_dtype(fake_out): + _get_dtype_real = V.graph.get_dtype + + def get_dtype(name): + if name == fake_out.get_name(): + return fake_out.get_dtype() + return _get_dtype_real(name) + + return get_dtype + + def __init__(self, name: str): + self.name = name + + def maybe_append_choice(self, choices, **kwargs): + """ + Maybe generates a new ChoiceCaller and appends it into existing choices. + + choices: A list of ChoiceCallers. + kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller. + """ + + try: + choices.append(self.generate(**kwargs)) + except NotImplementedError: + pass + + def generate(self, **kwargs) -> ChoiceCaller: + """ + Generates a ChoiceCaller instance from the given arguments. + """ + + raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..c30a3f6434a3d9f0bb7f2e7383e715207a060442 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py @@ -0,0 +1,4038 @@ +import contextlib +import dataclasses +import functools +import itertools +import logging +import math +import re +import sys +from copy import copy, deepcopy +from enum import Enum +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import sympy + +import torch +import torch.fx +from torch._inductor import dependencies +from torch._inductor.ir import StorageBox, TensorBox +from torch._prims_common import is_float_dtype +from torch.utils import _pytree as pytree +from torch.utils._sympy.functions import FloorDiv, ModularIndexing +from torch.utils._sympy.value_ranges import bound_sympy, ValueRanges + +from .. import codecache, config, ir, metrics +from ..codegen.wrapper import WrapperCodeGen +from ..optimize_indexing import range_expressable_in_32_bits +from ..scheduler import ( + BaseScheduling, + ForeachKernelSchedulerNode, + FusedSchedulerNode, + SchedulerNode, +) +from ..utils import ( + cache_on_self, + get_fused_kernel_name, + is_welford_reduction, + parallel_num_threads, + sympy_index_symbol, + sympy_product, + sympy_subs, +) + +from ..virtualized import ops, OpsValue, V +from .common import ( + BracesBuffer, + CppWrapperKernelArgs, + CSE, + CSEVariable, + DataTypePropagation, + DeferredLine, + DTYPE_TO_COMPUTATION_DTYPE, + ExprPrinter, + IndentedBuffer, + Kernel, + KernelArgs, + OpOverrides, + OptimizationContext, +) + +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") + +DTYPE_TO_CPP = { + torch.float32: "float", + torch.float64: "double", + torch.float16: "half", + torch.int64: "long", + torch.int32: "int", + torch.int16: "short", + torch.int8: "signed char", + torch.uint64: "unsigned long", + torch.uint32: "unsigned int", + torch.uint16: "unsigned short", + torch.uint8: "unsigned char", + torch.uint32: "unsigned int", + torch.uint64: "unsigned long", + torch.bool: "bool", + torch.bfloat16: "bfloat16", + torch.complex64: "complex64", + torch.float8_e4m3fn: "float8_e4m3fn", + torch.float8_e5m2: "float8_e5m2", +} + +DTYPE_TO_ATEN = { + torch.float32: "at::kFloat", + torch.float64: "at::kDouble", + torch.float16: "at::kHalf", + torch.int64: "at::kLong", + torch.int32: "at::kInt", + torch.int16: "at::kShort", + torch.int8: "at::kChar", + torch.uint64: "at::kUInt64", + torch.uint32: "at::kUInt32", + torch.uint16: "at::kUInt16", + torch.uint8: "at::kByte", + torch.uint32: "at::kUInt32", + torch.uint64: "at::kUInt64", + torch.bool: "at::kBool", + torch.bfloat16: "at::kBFloat16", + torch.complex32: "at::kComplexHalf", + torch.complex64: "at::kComplexFloat", + torch.complex128: "at::kComplexDouble", + torch.float8_e4m3fn: "at::kFloat8_e4m3fn", + torch.float8_e5m2: "at::kFloat8_e5m2", + torch.float8_e4m3fnuz: "at::kFloat8_e4m3fnuz", + torch.float8_e5m2fnuz: "at::kFloat8_e5m2fnuz", +} + +DEVICE_TO_ATEN = { + "cpu": "at::kCPU", + "cuda": "at::kCUDA", +} + +INDEX_TYPE = "long" + +NATIVE_OMP_RTYPES = {"+", "*", "^", "||", "min", "max"} +RTYPE_TO_CPP = { + "sum": "+", + "prod": "*", + "xor_sum": "^", + "min": "min", + "max": "max", + "argmin": "argmin", + "argmax": "argmax", + "any": "||", + "welford_reduce": "welford", + "welford_combine": "welford", +} +VECTORIZABLE_RTYPES = { + "max", + "min", + "sum", + "prod", + "xor_sum", + "welford_reduce", + "welford_combine", +} + +PYTHON_TO_CPP = { + "Tensor": "at::Tensor", + "int": "long", + "float": "double", + "bool": "bool", + "str": "std::string", + "ScalarType": "c10::ScalarType", + "MemoryFormat": "at::MemoryFormat", + "Layout": "at::Layout", + "Device": "at::Device", + "number": "at::Scalar", +} + +CONTAINER_PYTHON_TO_CPP = { + "List": "std::vector", + "Optional": "c10::optional", +} + +DTYPE_LOWP_FP = [ + torch.bfloat16, + torch.float16, +] + + +def value_to_cpp(value, cpp_type): + if value == float("-inf"): + return f"-std::numeric_limits<{cpp_type}>::infinity()" + elif value == float("inf"): + return f"std::numeric_limits<{cpp_type}>::infinity()" + elif isinstance(value, bool): + return f"static_cast<{cpp_type}>({str(value).lower()})" + elif math.isnan(value): + return f"std::numeric_limits<{cpp_type}>::quiet_NaN()" + else: + return f"static_cast<{cpp_type}>({repr(value)})" + + +def reduction_init(reduction_type, dtype): + if dtype in DTYPE_LOWP_FP: + # Since load promotes all half-precision inputs to float, the initial + # constant for reduction must be promoted as well + dtype = torch.float32 + if reduction_type in ("xor_sum", "sum", "any"): + return 0 + if reduction_type == "prod": + return 1 + if reduction_type in {"max", "argmax"}: + return ( + f"-std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + if is_float_dtype(dtype) + else f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::min()" + ) + if reduction_type in {"min", "argmin"}: + return ( + f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::infinity()" + if is_float_dtype(dtype) + else f"std::numeric_limits<{DTYPE_TO_CPP[dtype]}>::max()" + ) + if is_welford_reduction(reduction_type): + return f"Welford<{DTYPE_TO_CPP[dtype]}>()" + raise AssertionError(reduction_type) + + +def reduction_acc_type(reduction_type, dtype): + assert reduction_type not in {"argmin", "argmax"} + scalar_type = DTYPE_TO_CPP[DTYPE_TO_COMPUTATION_DTYPE[dtype]] + if is_welford_reduction(reduction_type): + return f"Welford<{scalar_type}>" + + return scalar_type + + +def reduction_combine(reduction_type, var, next_value): + if reduction_type == "sum": + return f"{var} + {next_value}" + if reduction_type == "prod": + return f"{var} * {next_value}" + if reduction_type == "xor_sum": + return f"{var} ^ {next_value}" + if reduction_type == "any": + return f"{var} || {next_value}" + if reduction_type in ("min", "max"): + return f"{reduction_type}_propagate_nan({var}, {next_value})" + if reduction_type == "welford_reduce": + return f"welford_combine({var}, {next_value})" + if reduction_type == "welford_combine": + if isinstance(next_value, tuple): + mean, m2, weight = next_value + else: + mean, m2, weight = reduction_project(reduction_type, next_value) + return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" + raise AssertionError(reduction_type) + + +def reduction_project(reduction_type, acc): + if is_welford_reduction(reduction_type): + return f"{acc}.mean", f"{acc}.m2", f"{acc}.weight" + elif reduction_type in {"argmin", "argmax"}: + return f"{acc}.index" + return acc + + +def is_to_lowp_dtype(expr): + to_exprs = ["cvt_fp32_to_lowp_fp", "c10::convert"] + if any(to_expr in expr for to_expr in to_exprs): + if "half" in expr: + return torch.half + if "bfloat16" in expr: + return torch.bfloat16 + return None + + +def get_lowp_to_fp32_expr(lowp_var, src_dtype, kernel): + if isinstance(kernel, CppVecKernel): + return f"cvt_lowp_fp_to_fp32<{DTYPE_TO_CPP[src_dtype]}>({lowp_var})" + else: + assert isinstance(kernel, CppKernel) + return f"c10::convert({lowp_var})" + + +index_value_name_counter = 1 + + +def argmax_argmin_prefix(reduction_type, src_dtype, tmpvar): + global index_value_name_counter + struct_name = f"IndexValue_{index_value_name_counter}" + index_value_name_counter += 1 + + # A small annoyance, due to it being a little cumbersome to just throw {} into strings + prefix = [ + f"struct {struct_name} {{size_t index; {DTYPE_TO_CPP[src_dtype]} value;}};", + f"{struct_name} {tmpvar}{{0, {reduction_init(reduction_type, src_dtype)}}};", + ] + + if reduction_type in ["argmax", "argmin"]: + compare_op = "greater_or_nan" if reduction_type == "argmax" else "less_or_nan" + prefix.extend( + [ + "#if !defined(__clang_major__) || __clang_major__ > 9", + f"#pragma omp declare reduction({reduction_type} : {struct_name} :\\", + f" omp_out = {compare_op}(omp_in.value, omp_out.value, omp_in.index, omp_out.index) ? omp_in : omp_out)\\", + f"\tinitializer(omp_priv = {{0, {reduction_init(reduction_type, src_dtype)}}})", + "#endif", + ] + ) + + return prefix + + +@functools.lru_cache +def stride_at(index: sympy.Expr, var: sympy.Symbol): + replacement = {var: var + 1} + new_index = sympy_subs(index, replacement) # type: ignore[arg-type] + return sympy.simplify(new_index - index) + + +@functools.lru_cache +def simplify_index_in_vec_range(index: sympy.Expr, var: sympy.Expr, vec_length: int): + """ + Simplifies the index expression within the range of a vectorized loop. + Given a vectorized loop variable `var` in the range of a loop with `vec_length`, + this function transforms the `index` into an equivalent form. It handles + simplifications for cases where `var` can be expressed as `vec_length * a + b`, + where `b` ranges from 0 to `vec_length - 1`. The function reduces occurrences + of `FloorDiv` and `ModularIndexing` in the `index` with best-effort optimizations. + + NOTE: + The simplified index expression is intended for analysis purposes only, not + for code generation. It replaces `FloorDiv` and `ModularIndexing` with free variables + which are not dependent on the loop variable `var` in the vectorized range. Check + https://github.com/pytorch/pytorch/pull/117221#discussion_r1449746217 for more details. + + Examples: + 1. If `var` is `x3` and `vec_length` is 16, and `x3 = 16*a + b`, then + `FloorDiv(x3, div)` or `ModularIndexing(x3, div, mod)` becomes a free variable + when `div` is divisible by 16. + 2. `ModularIndexing(x3, 1, mod)` can be simplified to `x3 + c` where `c` is a free + variable when `mod` is divisible by 16. + """ + + div_freevar_id = 0 + mod_freevar_id = 0 + + def visit_indexing_div(divisor): + nonlocal div_freevar_id + result = FloorDiv(var, divisor) + if sympy.gcd(divisor, vec_length) == vec_length: + result = sympy.Symbol(f"{var}_div_c{div_freevar_id}") + div_freevar_id += 1 + return result + + def visit_modular_indexing(divisor, modulus): + nonlocal mod_freevar_id + result = ModularIndexing(var, divisor, modulus) + if sympy.gcd(divisor, vec_length) == vec_length: + result = sympy.Symbol(f"{var}_mod_c{mod_freevar_id}") + mod_freevar_id += 1 + elif divisor == 1 and sympy.gcd(modulus, vec_length) == vec_length: + result = var + sympy.Symbol(f"{var}_mod_c{mod_freevar_id}") + mod_freevar_id += 1 + return result + + original_index = index + + div = sympy.Wild("divisor") + if index.has(FloorDiv): + index = index.replace(FloorDiv(var, div), visit_indexing_div) + + mod = sympy.Wild("modulus") + if index.has(ModularIndexing): + index = index.replace(ModularIndexing(var, div, mod), visit_modular_indexing) + + index = sympy.simplify(index) + if index != original_index: + return simplify_index_in_vec_range(index, var, vec_length) + + return index + + +@functools.lru_cache +def stride_at_vec_range(index: sympy.Expr, var: sympy.Symbol, vec_length: int): + index_vec_simplified = simplify_index_in_vec_range(index, var, vec_length) + return stride_at(index_vec_simplified, var) + + +class CppPrinter(ExprPrinter): + def _print_Integer(self, expr): + return f"{int(expr)}L" + + def _print_Where(self, expr): + c = self.paren(self.doprint(expr.args[0])) + p = self.paren(self.doprint(expr.args[1])) + q = self.paren(self.doprint(expr.args[2])) + return f"{c} ? {p} : {q}" + + def _print_ModularIndexing(self, expr): + x, div, mod = expr.args + x = self.paren(self.doprint(x)) + if div != 1: + div = self.paren(self.doprint(div)) + if expr.is_integer: + x = f"c10::div_floor_integer({x}, {div})" + else: + x = f"c10::div_floor_floating(static_cast({x}), static_cast({div}))" + mod = self.paren(self.doprint(mod)) + return f"static_cast<{INDEX_TYPE}>({x}) % static_cast<{INDEX_TYPE}>({mod})" + + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + if expr.is_integer: + return f"c10::div_floor_integer({x}, {div})" + return f"c10::div_floor_floating(static_cast({x}), static_cast({div}))" + + def _print_floor(self, expr): + assert len(expr.args) == 1 + r = f"std::floor({self._print(expr.args[0])})" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Pow(self, expr): + # Uses float constants to perform FP div + base, exp = expr.args + base = self._print(base) + + if exp == 0.5 or exp == -0.5: + return f"std::sqrt({base})" if exp == 0.5 else f"1.0/std::sqrt({base})" + assert exp.is_integer + exp = int(exp) + if exp > 0: + r = "*".join([self.paren(base)] * exp) + elif exp < 0: + r = "1.0/" + self.paren("*".join([self.paren(base)] * abs(exp))) + else: # exp == 0 + r = "1.0" + + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Rational(self, expr): + # Uses float constants to perform FP div + if expr.q == 1: + r = f"{expr.p}" + else: + r = f"{expr.p}.0/{expr.q}.0" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + r = f"std::ceil({self._print(expr.args[0])})" + return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r + + def _print_Min(self, expr): + args = [self._print(a) for a in expr.args] + if len(args) == 2: + return f"std::min({args[0]}, {args[1]})" + else: + # Initializer list overload + il = "{" + ", ".join(args) + "}" + return f"std::min({il})" + + def _print_Max(self, expr): + args = [self._print(a) for a in expr.args] + if len(args) == 2: + return f"std::max({args[0]}, {args[1]})" + else: + # Initializer list overload + il = "{" + ", ".join(args) + "}" + return f"std::max({il})" + + def _print_Abs(self, expr): + assert len(expr.args) == 1 + return f"std::abs({self._print(expr.args[0])})" + + def _print_cos(self, expr): + assert len(expr.args) == 1 + return f"std::cos({self._print(expr.args[0])})" + + def _print_cosh(self, expr): + assert len(expr.args) == 1 + return f"std::cosh({self._print(expr.args[0])})" + + def _print_acos(self, expr): + assert len(expr.args) == 1 + return f"std::acos({self._print(expr.args[0])})" + + def _print_sin(self, expr): + assert len(expr.args) == 1 + return f"std::sin({self._print(expr.args[0])})" + + def _print_sinh(self, expr): + assert len(expr.args) == 1 + return f"std::sinh({self._print(expr.args[0])})" + + def _print_asin(self, expr): + assert len(expr.args) == 1 + return f"std::asin({self._print(expr.args[0])})" + + def _print_tan(self, expr): + assert len(expr.args) == 1 + return f"std::tan({self._print(expr.args[0])})" + + def _print_tanh(self, expr): + assert len(expr.args) == 1 + return f"std::tanh({self._print(expr.args[0])})" + + def _print_atan(self, expr): + assert len(expr.args) == 1 + return f"std::atan({self._print(expr.args[0])})" + + def _print_Round(self, expr): + assert len(expr.args) == 1 + return f"std::lrint({self._print(expr.args[0])})" + + def _print_RoundDecimal(self, expr): + assert len(expr.args) == 2 + number, ndigits = expr.args + if number.is_integer: + # ndigits < 0 should have been filtered by the sympy function + assert ndigits < 0 + raise ValueError( + f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." + ) + return f"static_cast(std::nearbyint(1e{ndigits} * {self.paren(self._print(number))}) * 1e{-ndigits})" + + +# A function to print, useful for printing sympy symbols. +cexpr = CppPrinter().doprint + + +def cexpr_index(index): + return f"static_cast<{INDEX_TYPE}>({cexpr(index)})" + + +class RecordOptimizationContext: + def __init__(self, func_name: str = ""): + self.func_name = func_name + self.current_node: Optional[torch.fx.Node] = None + self.opt_ctx: Optional[OptimizationContext] = None + + def __enter__(self): + assert V.interpreter + assert V.interpreter.current_node + + self.current_node = V.interpreter.current_node + assert self.current_node is not None + if OptimizationContext.key in self.current_node.meta: + self.opt_ctx = self.current_node.meta[OptimizationContext.key] + else: + self.opt_ctx = OptimizationContext() + assert self.opt_ctx is not None + self.opt_ctx.ops_name = self.func_name + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self.current_node + assert self.opt_ctx + self.current_node.meta[OptimizationContext.key] = self.opt_ctx + + def get_opt_ctx(self): + return self.opt_ctx + + def get_fx_node(self): + assert self.current_node + return self.current_node + + +def get_opt_ctx(node: torch.fx.Node) -> OptimizationContext: + return node.meta.get(OptimizationContext.key, None) + + +def get_current_node_opt_ctx() -> OptimizationContext: + assert V.interpreter.current_node + return get_opt_ctx(V.interpreter.current_node) + + +class CppVecUnsupportedError(Exception): + pass + + +class CppCSEVariable(CSEVariable): + def __init__(self, name, bounds: ValueRanges[Any]): + super().__init__(name, bounds) + self.is_vec = False + self.dtype: Optional[torch.dtype] = None + self.dependent_itervars: Set[sympy.Symbol] = set() + + def update_on_args(self, name, args, kwargs): + if name == "load": + # args[1] is index + self._set_dependent_itervars(args[1]) + else: + # propagate relevant itervars and is_vec from args + self.dependent_itervars.update( + *[ + arg.dependent_itervars + for arg in args + if isinstance(arg, CppCSEVariable) + ] + ) + if name == "index_expr": + self._set_dependent_itervars(args[0]) + if any(arg.is_vec for arg in args if isinstance(arg, CppCSEVariable)): + self.is_vec = True + # NOTE [dtype of CppCSEVariable] + # Deciding dtype according to the current optimization context is not + # always accurate since the dtypes are initialized during dtype propagation + # at the beginning of the codegen. It is possible that some ops are invoked + # during the codegen of the current op and take different dtypes from the + # current op. + # TODO(jgong5): A more accurate way of deciding the dtype of the variables is to + # propagate the dtypes here inside `update_on_args`. + if ( + hasattr(V.interpreter, "current_node") + and get_current_node_opt_ctx() is not None + ): + self.dtype = get_current_node_opt_ctx().dtype + + def _set_dependent_itervars(self, index: sympy.Expr): + """ + Set the relevant itervars for this variable based on the `index` expression. + This includes the itervars directly used in the `index` as well as relevant itervars + of other cse variables used in the `index`. + """ + for s in index.free_symbols: + if s in V.kernel.itervars: + self.dependent_itervars.add(s) # type: ignore[arg-type] + elif s.name in V.kernel.cse.varname_map: # type: ignore[attr-defined] + self.dependent_itervars.update( + V.kernel.cse.varname_map[s.name].dependent_itervars # type: ignore[attr-defined] + ) + + def depends_on(self, itervar: sympy.Symbol): + return itervar in self.dependent_itervars + + +class CppOverrides(OpOverrides): + """Map element-wise ops to C++""" + + @staticmethod + def add(a, b): + return f"decltype({a})({a} + {b})" + + @staticmethod + def sub(a, b): + return f"decltype({a})({a} - {b})" + + @staticmethod + def mul(a, b): + return f"decltype({a})({a} * {b})" + + @staticmethod + def to_dtype(x, dtype, src_dtype=None): + assert dtype in DTYPE_TO_CPP, f"{dtype} missing from {__name__}.DTYPE_TO_CPP" + return f"c10::convert<{DTYPE_TO_CPP[dtype]}>({x})" + + @staticmethod + def to_dtype_bitcast(x, dtype, src_dtype): + assert dtype in DTYPE_TO_CPP, f"{dtype} missing from {__name__}.DTYPE_TO_CPP" + if src_dtype in (torch.float16, torch.bfloat16): + # c10::bit_cast requires the source and target have the bitwidth. + # Because the input tensor's dtype could be promoted, e.g. from float16 to + # float, we have to cast the tensor to its original source dtype before + # invoking bit_cast. We also need to convert the bit-casted tensor + # back to float to make sure we keep using higher precision values + # for the rest of the computation. + cast_x = f"c10::convert<{DTYPE_TO_CPP[src_dtype]}>({x})" + cast_x = f"c10::bit_cast<{DTYPE_TO_CPP[dtype]}>({cast_x})" + return f"c10::convert<{DTYPE_TO_CPP[torch.float32]}>({cast_x})" + else: + return f"c10::bit_cast<{DTYPE_TO_CPP[dtype]}>({x})" + + @staticmethod + def abs(x): + return f"std::abs({x})" + + @staticmethod + def sin(x): + return f"std::sin({x})" + + @staticmethod + def cos(x): + return f"std::cos({x})" + + @staticmethod + def neg(x): + return f"decltype({x})(-{x})" + + @staticmethod + def exp(x): + # return f"Sleef_expf_u10({x})" + return f"std::exp({x})" + + @staticmethod + def exp2(x): + return f"std::exp2({x})" + + @staticmethod + def expm1(x): + return f"std::expm1({x})" + + @staticmethod + def erf(x): + return f"std::erf({x})" + + @staticmethod + def erfc(x): + return f"std::erfc({x})" + + @staticmethod + def erfinv(x): + return f"calc_erfinv({x})" + + @staticmethod + def sqrt(x): + return f"std::sqrt({x})" + + @staticmethod + def rsqrt(x): + return f"1 / std::sqrt({x})" + + @staticmethod + def log1p(x): + bug = config.cpp.inject_log1p_bug_TESTING_ONLY + if bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"std::log1p({x})" + else: + raise AssertionError( + f"unrecognized config cpp.inject_log1p_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def tan(x): + return f"std::tan({x})" + + @staticmethod + def tanh(x): + return f"std::tanh({x})" + + @staticmethod + def signbit(x): + return f"std::signbit({x})" + + @staticmethod + def pow(a, b): + return f"std::pow({a}, {b})" + + @staticmethod + def log(x): + return f"std::log({x})" + + @staticmethod + def round(x): + return f"std::nearbyint({x})" + + @staticmethod + def floor(x): + return f"std::floor({x})" + + @staticmethod + def floordiv(a, b): + # a and b are integer type + quot = f"{a} / {b}" + rem = f"{a} % {b}" + return f"(({a} < 0) != ({b} < 0) ? ({rem} != 0 ? {quot} - 1 : {quot}) : {quot})" + + @staticmethod + def ceil(x): + return f"std::ceil({x})" + + @staticmethod + def trunc(x): + return f"std::trunc({x})" + + @staticmethod + def truncdiv(a, b): + # a and b are integer type + return f"{a} / {b}" + + @staticmethod + def fmod(a, b): + return f"std::fmod({a}, {b})" + + @staticmethod + def isinf(x): + return f"std::isinf({x})" + + @staticmethod + def isnan(x): + return f"std::isnan({x})" + + @staticmethod + def lgamma(x): + return f"std::lgamma({x})" + + @staticmethod + def acos(x): + return f"std::acos({x})" + + @staticmethod + def acosh(x): + return f"std::acosh({x})" + + @staticmethod + def cosh(x): + return f"std::cosh({x})" + + @staticmethod + def sinh(x): + return f"std::sinh({x})" + + @staticmethod + def asin(x): + return f"std::asin({x})" + + @staticmethod + def asinh(x): + return f"std::asinh({x})" + + @staticmethod + def atan2(x, y): + return f"std::atan2({x}, {y})" + + @staticmethod + def atan(x): + return f"std::atan({x})" + + @staticmethod + def atanh(x): + return f"std::atanh({x})" + + @staticmethod + def copysign(x, y): + return f"std::copysign({x}, {y})" + + @staticmethod + def frexp(x): + cache_keys = f"frexp({x})[0]", f"frexp({x})[1]" + if all(cache_key in V.kernel.cse.cache for cache_key in cache_keys): + return tuple(V.kernel.cse.cache[cache_key] for cache_key in cache_keys) + + code = BracesBuffer() + exponent = V.kernel.cse.newvar() + mantissa = V.kernel.cse.newvar() + code.writeline(f"int32_t {exponent};") + code.writeline(f"auto {mantissa} = std::frexp({x}, &{exponent});") + V.kernel.compute.splice(code) + cse_vars = (mantissa, exponent) + for cache_key, cse_var in zip(cache_keys, cse_vars): + V.kernel.cse.cache[cache_key] = cse_var + return mantissa, exponent + + @staticmethod + def hypot(x, y): + return f"std::hypot({x}, {y})" + + @staticmethod + def log10(x): + return f"std::log10({x})" + + @staticmethod + def nextafter(x, y): + return f"std::nextafter({x}, {y})" + + @staticmethod + def relu(x): + bug = config.cpp.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + return f"{x}; throw 1" + elif bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"std::max({x}, decltype({x})(0))" + else: + raise AssertionError( + f"unrecognized config cpp.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def minimum(a, b): + return f"min_propagate_nan({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"max_propagate_nan({a}, {b})" + + @staticmethod + def where(a, b, c): + return f"{a} ? {b} : {c}" + + @staticmethod + def mod(a, b): + return f"mod({a}, {b})" + + @staticmethod + def constant(val, dtype): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx and opt_ctx.dtype is not None + dtype = opt_ctx.dtype + if dtype in DTYPE_LOWP_FP: + # Since load promotes all half-precision inputs to float, constants + # must be promoted as well + dtype = torch.float32 + return value_to_cpp(val, DTYPE_TO_CPP[dtype]) + + @staticmethod + def index_expr(expr, dtype): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx and opt_ctx.dtype is not None + dtype = opt_ctx.dtype + return ops.to_dtype(cexpr(V.kernel.rename_indexing(expr)), dtype) + + @staticmethod + def masked(mask, body, other): + code = BracesBuffer() + + # Write masked operation into a lambda + body_var = V.kernel.cse.newvar() + code.writeline(f"auto {body_var} = [&]") + with V.kernel.swap_buffers(code), code.indent(): + result = body() + code.writeline(f"return {result};") + code.writeline(";") + V.kernel.compute.splice(code) + + # Use the lambda's return type as the type of other + other_code = value_to_cpp(other, f"decltype({body_var}())") + return f"{mask} ? {body_var}() : {other_code}" + + @staticmethod + def logical_and(a, b): + return f"{a} && {b}" + + @staticmethod + def logical_not(a): + return f"!{a}" + + @staticmethod + def logical_or(a, b): + return f"{a} || {b}" + + @staticmethod + def logical_xor(a, b): + return f"{a} != {b}" + + @staticmethod + def bitwise_and(a, b): + return f"decltype({a})({a} & {b})" + + @staticmethod + def bitwise_not(a): + return f"decltype({a})(~{a})" + + @staticmethod + def bitwise_or(a, b): + return f"decltype({a})({a} | {b})" + + @staticmethod + def bitwise_xor(a, b): + return f"decltype({a})({a} ^ {b})" + + @staticmethod + def bitwise_left_shift(a, b): + return f"decltype({a})({a} << {b})" + + @staticmethod + def bitwise_right_shift(a, b): + return f"decltype({a})({a} >> {b})" + + @staticmethod + def rand(seed: sympy.Expr, offset: sympy.Expr): + return f"normalized_rand_cpu({seed}, {offset})" + + @staticmethod + def randn(seed: sympy.Expr, offset: sympy.Expr): + return f"randn_cpu({seed}, {offset})" + + @staticmethod + def randint64(seed: sympy.Expr, offset: sympy.Expr, low, high): + return f"randint64_cpu({seed}, {offset}, {low}, {high})" + + @staticmethod + def sigmoid(x): + return f"decltype({x})(1) / (decltype({x})(1) + std::exp(-{x}))" + + @staticmethod + def sign(x): + code = BracesBuffer() + scalar_zero = f"decltype({x})(0)" + scalar_one = f"decltype({x})(1)" + code.writeline("[&]()") + with code.indent(): + code.writeline(f"auto left = {x} > 0 ? {scalar_one} : {scalar_zero};") + code.writeline(f"auto right = {x} < 0 ? {scalar_one} : {scalar_zero};") + code.writeline("return left - right;") + code.writeline("()") + return code + + +CppOverrides._initialize_pointwise_overrides("cpp") + + +class CppVecOverrides(CppOverrides): + """Map element-wise ops to aten vectorization C++""" + + def __new__(cls, *args, **kargs): + self = super().__new__(cls) + + def wrap(func): + # `CppVecKernel` generates both scalar ops and vector ops according to + # whether the inputs are scalars or vectors while all ops in `CppVecOverrides` + # (except for some ops explained below) assume the inputs are vectors. We wrap the ops in + # `CppVecOverrides` to broadcast scalar inputs to vectors if needed or fallback to + # `CppOverrides` when all inputs are scalars. + # + # Notes on ops handled separately in their own functions: + # `ops.masked`: + # needs recursive handling of masked body. + # `ops.index_expr`: + # needs to further analyze the dependency of the index expression on + # the tiling itervar. + def wrapper(*args, **kwargs): + scalars = [ + arg + for arg in args + if isinstance(arg, CppCSEVariable) and not arg.is_vec + ] + vectors = [ + arg + for arg in args + if isinstance(arg, CppCSEVariable) and arg.is_vec + ] + new_args = list(args) + if scalars and vectors: + # broadcast scalar args to vector if needed + new_args = [] + vec_dtype = vectors[0].dtype + for arg in args: + if isinstance(arg, CppCSEVariable) and not arg.is_vec: + assert isinstance(V.kernel, CppVecKernel) + # align scalar data type to the vector for binary ops + if len(args) == 2 and arg.dtype != vec_dtype: + arg = ops.to_dtype(arg, vec_dtype) + arg = arg.value if isinstance(arg, OpsValue) else arg + # See NOTE [dtype of CppCSEVariable]: we have to fix arg.dtype since + # the dtype from optimization context could be wrong. + assert isinstance(arg, CppCSEVariable) + arg.dtype = vec_dtype + new_arg = V.kernel.broadcast(arg) + new_args.append(new_arg) + else: + new_args.append(arg) + if vectors: + return func(*new_args, **kwargs) + else: + # fallback to scalar ops + scalar_ops = super(CppVecOverrides, self) + scalar_func = getattr( + scalar_ops, func.__name__, scalar_ops.__getattr__(func.__name__) # type: ignore[attr-defined] + ) + assert scalar_func is not None + return scalar_func(*args, **kwargs) + + return wrapper + + for name, method in vars(CppVecOverrides).items(): + if getattr(method, "__class__", None) == staticmethod and name not in [ + "masked", + "index_expr", + ]: + setattr(self, name, wrap(method.__func__)) + return self + + @staticmethod + def add(a, b): + return f"{a} + {b}" + + @staticmethod + def sub(a, b): + return f"{a} - {b}" + + @staticmethod + def mul(a, b): + return f"{a} * {b}" + + @staticmethod + def truediv(a, b): + return f"{a} / {b}" + + @staticmethod + def abs(x): + return f"{x}.abs()" + + @staticmethod + def sin(x): + return f"{x}.sin()" + + @staticmethod + def cos(x): + return f"{x}.cos()" + + @staticmethod + def exp(x): + return f"{x}.exp()" + + @staticmethod + def exp2(x): + return f"{x}.exp2()" + + @staticmethod + def expm1(x): + # decompose for a better performance + vec_one = f"decltype({x})(1)" + return f"{x}.exp() - {vec_one}" + + @staticmethod + def erf(x): + return f"{x}.erf()" + + @staticmethod + def erfc(x): + return f"{x}.erfc()" + + @staticmethod + def erfinv(x): + return f"{x}.erfinv()" + + @staticmethod + def sqrt(x): + return f"{x}.sqrt()" + + @staticmethod + def eq(x, y): + return f"to_float_mask({x} == {y})" + + @staticmethod + def ne(x, y): + return f"to_float_mask({x} != {y})" + + @staticmethod + def lt(x, y): + return f"to_float_mask({x} < {y})" + + @staticmethod + def gt(x, y): + return f"to_float_mask({x} > {y})" + + @staticmethod + def le(x, y): + return f"to_float_mask({x} <= {y})" + + @staticmethod + def ge(x, y): + return f"to_float_mask({x} >= {y})" + + @staticmethod + def and_(x, y): + return f"{x} & {y}" + + @staticmethod + def rsqrt(x): + return f"{x}.rsqrt()" + + @staticmethod + def pow(a, b): + return f"{a}.pow({b})" + + @staticmethod + def log(x): + return f"{x}.log()" + + @staticmethod + def round(x): + return f"{x}.round()" + + @staticmethod + def floor(x): + return f"{x}.floor()" + + @staticmethod + def ceil(x): + return f"{x}.ceil()" + + @staticmethod + def trunc(x): + return f"{x}.trunc()" + + @staticmethod + def fmod(a, b): + return f"{a}.fmod({b})" + + @staticmethod + def lgamma(x): + return f"{x}.lgamma()" + + @staticmethod + def logical_and(a, b): + return f"({a} != 0) & ({b} != 0)" + + @staticmethod + def logical_not(a): + return f"{a} == 0" + + @staticmethod + def logical_or(a, b): + return f"({a} != 0) | ({b} != 0)" + + @staticmethod + def logical_xor(a, b): + return f"({a} != 0) ^ ({b} != 0)" + + @staticmethod + def tan(a): + return f"{a}.tan()" + + @staticmethod + def tanh(a): + vec_one = f"decltype({a})(1)" + vec_two = f"decltype({a})(2)" + vec_minus_two = f"decltype({a})(-2)" + return f"{vec_two} / ({vec_one} + ({vec_minus_two} * {a}).exp()) - {vec_one}" + + @staticmethod + def reciprocal(a): + return f"{a}.reciprocal()" + + @staticmethod + def atan(x): + return f"{x}.atan()" + + @staticmethod + def acos(x): + return f"{x}.acos()" + + @staticmethod + def asin(x): + return f"{x}.asin()" + + @staticmethod + def cosh(x): + return f"{x}.cosh()" + + @staticmethod + def sinh(x): + return f"{x}.sinh()" + + @staticmethod + def log10(x): + return f"{x}.log10()" + + @staticmethod + def nextafter(x): + return f"{x}.nextafter()" + + @staticmethod + def copysign(a, b): + return f"{a}.copysign({b})" + + @staticmethod + def atan2(a, b): + return f"{a}.atan2({b})" + + @staticmethod + def hypot(a, b): + return f"{a}.hypot({b})" + + @staticmethod + def atanh(x): + # For real x, atanh(x) = 1/2 * log((1+x)/(1-x)) + vec_one = f"decltype({x})(1)" + vec_one_half = f"decltype({x})(0.5)" + return f"{vec_one_half} * (({vec_one} + {x})/({vec_one} - {x})).log()" + + @staticmethod + def asinh(x): + # For real x, asinh(x) = log(x + sqrt(1 + x**2)) + vec_one = f"decltype({x})(1)" + return f"({x} + ({vec_one} + {x}*{x}).sqrt()).log()" + + @staticmethod + def acosh(x): + return f"{x}.acosh()" + + @staticmethod + def relu(x): + bug = config.cpp.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + return f"{x}; throw 1" + elif bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"at::vec::clamp_min({x}, decltype({x})(0))" + else: + raise AssertionError( + f"unrecognized config cpp.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + # TODO: this seems to be dead + @staticmethod + def sigmoid(x): + return f"decltype({x})(1)/(decltype({x})(1) + {x}.neg().exp())" + + @staticmethod + def neg(x): + return f"{x}.neg()" + + @staticmethod + def floordiv(a, b): + # a and b are integer type + _t = f"decltype({a})" + quot = f"{a} / {b}" + has_rem = f"({a} % {b} != {_t}(0))" + is_neg = f"(({a} < {_t}(0)) != ({b} < {_t}(0)))" + return f"{_t}::blendv({quot}, {quot} - {_t}(1), {has_rem} & {is_neg})" + + @staticmethod + def truncdiv(a, b): + # a and b are integer type + return f"{a} / {b}" + + @staticmethod + def minimum(a, b): + return f"at::vec::minimum({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"at::vec::maximum({a}, {b})" + + @staticmethod + def square(a): + return f"{a} * {a}" + + @staticmethod + def where(a, b, c): + assert isinstance(b, CppCSEVariable) + if b.dtype != torch.float: + raise CppVecUnsupportedError( + "where with non-float tensor is not supported in vectorized codegen" + ) + return f"decltype({b})::blendv({c}, {b}, {a})" + + @staticmethod + def sign(x): + code = BracesBuffer() + vec_zero = f"decltype({x})(0)" + vec_one = f"decltype({x})(1)" + blendv_l = f"decltype({x})::blendv({vec_zero}, {vec_one}, {vec_zero} < {x})" + blendv_r = f"decltype({x})::blendv({vec_zero}, {vec_one}, {x} < {vec_zero})" + code.writeline("[&]()") + with code.indent(): + code.writeline(f"auto left = {blendv_l};") + code.writeline(f"auto right = {blendv_r};") + code.writeline("return left - right;") + code.writeline("()") + return code + + @staticmethod + def to_dtype(x, dtype, src_dtype=None): + assert dtype in [ + torch.bool, + torch.float, + torch.bfloat16, + torch.float16, + torch.uint8, + torch.int8, + torch.int32, + torch.int64, + ], f"{__name__} does not support {dtype}" + node: torch.fx.Node = V.interpreter.current_node + assert node and isinstance(node, torch.fx.Node) + opt_ctx_x = get_opt_ctx(node.args[1]) + assert opt_ctx_x + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype == torch.bool: + return f"vec_convert_to_mask({x})" + if opt_ctx_x.dtype == torch.bool and dtype in (torch.float, torch.float32): + return f"mask_convert_to_float({x})" + if opt_ctx_x.dtype == torch.bool and dtype in DTYPE_LOWP_FP: + return f"mask_convert_to_lowp<{DTYPE_TO_CPP[dtype]}>({x})" + if opt_ctx_x.dtype == torch.bool and dtype == torch.int64: + return f"mask_convert_to_int64({x})" + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype in DTYPE_LOWP_FP: + return f"cvt_fp32_to_lowp_fp<{DTYPE_TO_CPP[dtype]}>({x})" + if opt_ctx_x.dtype in DTYPE_LOWP_FP and dtype in (torch.float, torch.float32): + return f"cvt_lowp_fp_to_fp32<{DTYPE_TO_CPP[opt_ctx_x.dtype]}>({x})" + if opt_ctx_x.dtype in (torch.uint8, torch.int8) and dtype in ( + torch.float, + torch.float32, + ): + # Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + return f"at::vec::convert_int8_to_float({x})" + if opt_ctx_x.dtype in (torch.float, torch.float32) and dtype in ( + torch.uint8, + torch.int8, + ): + # if we already handle the saturation previously. + # * Pattern match of quantization op in the loop body. + # * Skip the explicit saturation and clamp inside at::vec::convert_float_to_int8. + return f"at::vec::convert_float_to_int8<{DTYPE_TO_CPP[dtype]}>({x})" + if opt_ctx_x.dtype == torch.int32 and dtype == torch.float: + return f"at::vec::convert_to_fp_of_same_size({x})" + if opt_ctx_x.dtype == torch.float and dtype == torch.int32: + return f"at::vec::convert_to_int_of_same_size({x})" + if opt_ctx_x.dtype == torch.int64 and dtype == torch.float: + return f"cvt_int64_to_fp32({x})" + if opt_ctx_x.dtype == torch.float and dtype == torch.int64: + return f"cvt_fp32_to_int64({x})" + if opt_ctx_x.dtype == torch.int32 and dtype == torch.int64: + return f"cvt_int32_to_int64({x})" + if opt_ctx_x.dtype == torch.int64 and dtype == torch.int32: + return f"cvt_int64_to_int32({x})" + # TODO(jgong5): support conversion for other types + # currently we only allow load/store torch.uint8 and handle conversion there + return f"({x})" + + @staticmethod + def log1p(x): + bug = config.cpp.inject_log1p_bug_TESTING_ONLY + if bug == "accuracy": + return f"{x} + decltype({x})(1)" + elif bug is None: + return f"{x}.log1p()" + else: + raise AssertionError( + f"unrecognized config cpp.inject_log1p_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def masked(mask, body, other): + assert isinstance(V.kernel, CppVecKernel) + code = BracesBuffer() + var = V.kernel.cse.newvar() + with V.kernel.masked(mask) as new_mask: + code.writeline(f"auto {var} = [&]") + with V.kernel.swap_buffers(code), code.indent(): + result = body() + code.writeline(f"return {result};") + code.writeline(";") + V.kernel.compute.splice(code) + + body_code = f"{var}()" + body_code_vec = ( + body_code + if result.is_vec + else f"{V.kernel._get_vec_type(torch.float)}({body_code})" + ) + other_code = value_to_cpp(other, "float") + other_code_vec = f"{V.kernel._get_vec_type(torch.float)}({other_code})" + assert isinstance(new_mask, CppCSEVariable), new_mask + if new_mask.is_vec or result.is_vec: + if result.dtype != torch.float: + raise CppVecUnsupportedError( + "masked with non-float tensor is not supported in vectorized codegen" + ) + type = f"decltype({body_code_vec})" + float_mask = f"to_float_mask({new_mask})" + code = BracesBuffer() + code.writeline("[&]") + with V.kernel.swap_buffers(code), code.indent(): + code.writeline(f"if (all_zero({float_mask}))") + with code.indent(): + code.writeline(f"return {other_code_vec};") + code.writeline("else") + with code.indent(): + code.writeline( + f"return {type}::blendv({other_code_vec}, {body_code_vec}, {float_mask});" + ) + code.writeline("()") + csevar = V.kernel.cse.generate( + V.kernel.compute, + code, + ) + else: + csevar = V.kernel.cse.generate( + V.kernel.compute, f"{mask} ? {body_code} : {other_code}" + ) + # `result` is explicitly added to the args for correct propagation + # of relevant itervars and vectorization status. + csevar.update_on_args("masked", (mask, body, other, result), {}) + return csevar + + @staticmethod + def index_expr(expr, dtype): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx and opt_ctx.dtype is not None + dtype = opt_ctx.dtype + assert dtype == torch.int32 + assert isinstance(V.kernel, CppVecKernel) + index = V.kernel.rename_indexing(expr) + tiling_var = V.kernel.itervars[V.kernel.tiling_idx] + stride = stride_at_vec_range(index, tiling_var, V.kernel.tiling_factor) + if stride.is_number and not V.kernel.index_indirect_depends_on( + index, tiling_var + ): + if stride == 0: + return CppOverrides.index_expr(expr, dtype) + value = ops.to_dtype(cexpr(index), dtype) + if isinstance(value, OpsValue): + value = value.value + csevar = V.kernel.arange(value, stride) + else: + csevar = V.kernel.load_non_contiguous(None, index, dtype, V.kernel.compute) + csevar.update_on_args("index_expr", (expr, dtype), {}) + return csevar + + +CppVecOverrides._initialize_pointwise_overrides("cppvec") + + +class CppTile2DOverrides(CppVecOverrides): + @staticmethod + def index_expr(expr, dtype): + assert isinstance(V.kernel, CppTile2DKernel) + expr = V.kernel.transform_indexing(expr) + return CppVecOverrides.index_expr(expr, dtype) + + +class CppKernel(Kernel): + overrides = CppOverrides # type: ignore[assignment] + sexpr = cexpr + newvar_prefix = "auto " + suffix = ";" + + def __init__(self, args, num_threads): + super().__init__(args) + self.call_ranges: Optional[Tuple[sympy.Expr, ...]] = None + self.ranges: List[sympy.Expr] = [] + self.itervars: List[sympy.Symbol] = [] + self.reduction_depth = None + self.reduction_prefix = IndentedBuffer() + self.reduction_suffix = IndentedBuffer() + self.reduction_var_map = {} + self.reduction_cse = CSE(self.newvar_prefix, self.suffix, name_prefix="tmp_acc") + self.preloads = IndentedBuffer() + self.poststores = IndentedBuffer() + self.num_threads = num_threads # num_threads the kernel specialized for + self.reduction_omp_dec: Dict[Tuple[str, str], str] = {} + + @contextlib.contextmanager + def masked(self, mask): + """Context manager to add an additional mask to loads and stores.""" + prior = self._load_mask + if prior: + mask = ops.and_(mask, prior) + if isinstance(mask, OpsValue): + mask = mask.value + assert isinstance(mask, CppCSEVariable) + # see NOTE [dtype of CppCSEVariable] + # mask's dtype should be bool + mask.dtype = torch.bool + + self._load_mask = mask + try: + yield mask + finally: + self._load_mask = prior + + def cache_fp32_cse_var_before_lowp_store(self, var_to_store): + """ + https://github.com/pytorch/pytorch/issues/115260 + For FusedSchedulerNode[node1, node2], the node2 loads what node1 stores and the buffer is + in low-precision floating point data type. When the output of node1 also serves as the output of the + kernel, the result of nodes would be different from the case when output of node1 is not the output + of the kernel (where we don't need to insert `to_dtype` for legalization). To address the problem, on + storing the lowp node1 output, we also add the inverse dtype conversion to high precision data type + to the cse cache. + + Example (pseudo code): + node1_output = ... + node1_output_lowp = to_dtype(node1_output, dtype=torch.bfloat16) + store(buf, node1_output_lowp) + node2_input_lowp = load(buf) + node2_input = to_dtype(node2_input_lowp, dtype=torch.float) + + Without cse cache trick: + node1_output = ... + node1_output_lowp = to_dtype(node1_output, dtype=torch.bfloat16) + store(buf, node1_output_lowp) + node2_input_lowp = node_output_lowp # hit store cache + node2_input = to_dtype(node2_input_lowp, dtype=torch.float) + + With cse cache trick: + node1_output = ... + node1_output_lowp = to_dtype(node1_output, dtype=torch.bfloat16) + # also add `to_dtype(node1_input_lowp, dtype=torch.float)` -> `node1_output` to cse cache + store(buf, node1_output_lowp) + node2_input_lowp = node_output_lowp # hit store cache + node2_input = node1_output # hit cse cache + """ + + if var_to_store.dtype not in DTYPE_LOWP_FP: + # only need to cache fp32 cse var while var_to_store is lowp data + return + + def find_fp32_var(var, cache): + fp32_cse_var = None + fp32_cse_var_name = None + lowp_dtype = None + for expr, cse_var in cache.items(): + if cse_var == var: + lowp_dtype = is_to_lowp_dtype(expr) + if lowp_dtype: + m = re.search(r"tmp\d+", expr) + assert m + fp32_cse_var_name = m.group() + if fp32_cse_var_name: + for cse_var in cache.values(): + if cse_var.name == fp32_cse_var_name: + fp32_cse_var = cse_var + break + assert fp32_cse_var is not None + return fp32_cse_var, lowp_dtype + + fp32_var, lowp_dtype = find_fp32_var(var_to_store, self.cse.cache) + if fp32_var: + self.cse.cache[ + get_lowp_to_fp32_expr(var_to_store, lowp_dtype, self) + ] = fp32_var + + def scale_index_with_offset( + self, index: sympy.Expr, scale=1, itervar_idx=-1, offset=0 + ): + var = self.itervars[itervar_idx] + replacement = {var: var * scale + offset} + new_index = sympy_subs(index, replacement) + return new_index + + def index_to_str(self, index: sympy.Expr) -> str: + """ + Convert an index expr to a string that can be used in cpp code. + e.g. a sympy expression "s2" may actually appear as "ks1" in the cpp kernel. + """ + return cexpr(self.rename_indexing(index)) + + def index_indirect_depends_on(self, index: sympy.Expr, itervar: sympy.Symbol): + """ + Check if an index has free symbol CppCSEVariable that depends on `itervar`. + """ + return any( + self.cse.varname_map[s.name].depends_on(itervar) # type: ignore[attr-defined] + for s in index.free_symbols + if s.name in self.cse.varname_map # type: ignore[attr-defined] + and isinstance(self.cse.varname_map[s.name], CppCSEVariable) # type: ignore[attr-defined] + ) + + def index_depends_on(self, index: sympy.Expr, itervar: sympy.Symbol): + return itervar in index.free_symbols or self.index_indirect_depends_on( + index, itervar + ) + + def load(self, name: str, index: sympy.Expr): + var = self.args.input(name) + index = self.rename_indexing(index) + line = f"{var}[{cexpr_index(index)}]" + if V.graph.get_dtype(name) in [torch.float16]: + line = f"static_cast({line})" + csevar = self.cse.generate(self.loads, line) + csevar.update_on_args("load", (name, index), {}) + return csevar + + def store(self, name, index, value, mode=None): + assert "buf" in name + var = self.args.output(name) + self.cache_fp32_cse_var_before_lowp_store(value) + index = self.rename_indexing(index) + if mode is None: + line = f"{var}[{cexpr_index(index)}] = {value};" + elif mode == "atomic_add": + if not config.cpp.dynamic_threads and self.num_threads == 1: + line = f"{var}[{cexpr_index(index)}] += {value};" + else: + dtype = V.graph.get_dtype(name) + # mirroring static_cast(...) in load: + value = f"static_cast<{DTYPE_TO_CPP[dtype]}>({value})" + line = f"atomic_add(&{var}[{cexpr_index(index)}], {value});" + else: + raise NotImplementedError(f"store mode={mode}") + self.stores.writeline(DeferredLine(name, line)) + + def reduction(self, dtype, src_dtype, reduction_type, value): + argmax_or_argmin = reduction_type in {"argmax", "argmin"} + + reduction_key = src_dtype, reduction_type, value + if reduction_key in self.reduction_cse.reduction_cache: + return self.reduction_cse.reduction_cache[reduction_key] + + acc = self.reduction_cse.generate( + self.loads, f"reduction {reduction_key}", write=False + ) + self.reduction_var_map[acc] = reduction_type + if argmax_or_argmin: + self.reduction_prefix.writelines( + argmax_argmin_prefix(reduction_type, src_dtype, acc) + ) + compare_op = ( + "greater_or_nan" if reduction_type == "argmax" else "less_or_nan" + ) + assert self.reduction_depth is not None + index = self.itervars[self.reduction_depth] + for i in range(self.reduction_depth + 1, len(self.itervars)): + index = index * self.ranges[i] + self.itervars[i] + self.stores.writelines( + [ + f"if(!({compare_op}({acc}.value, {value}, {acc}.index, {cexpr_index(index)}))) {{", + f" {acc}.index = {cexpr_index(index)}; {acc}.value = {value};", + "}", + ], + ) + else: + acc_type = reduction_acc_type(reduction_type, dtype) + + if (reduction_type, acc_type) not in self.reduction_omp_dec: + if RTYPE_TO_CPP[reduction_type] not in NATIVE_OMP_RTYPES: + # Scalar reduction for other reductions are declared by default + self.reduction_prefix.splice( + f"""\ + #pragma omp declare reduction(\ + {RTYPE_TO_CPP[reduction_type]}:{acc_type}:\ + omp_out = {reduction_combine(reduction_type, "omp_out", "omp_in")}) \ + initializer(omp_priv={{{reduction_init(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type] = RTYPE_TO_CPP[ + reduction_type + ] + + self.reduction_prefix.writeline( + f"{acc_type} {acc} = {reduction_init(reduction_type, dtype)};" + ) + self.stores.writeline( + f"{acc} = {reduction_combine(reduction_type, acc, value)};" + ) + + result = reduction_project(reduction_type, acc) + self.reduction_cse.reduction_cache[reduction_key] = result + return result + + def store_reduction(self, name, index, value): + index = self.rename_indexing(index) + var = self.args.output(name) + self.reduction_suffix.writeline( + DeferredLine(name, f"{var}[{cexpr_index(index)}] = {value};") + ) + + def set_ranges(self, lengths, reduction_lengths): + if self.call_ranges: + assert self.call_ranges == tuple(lengths) + tuple( + reduction_lengths + ), f"{self.call_ranges} == {tuple(lengths)} + {tuple(reduction_lengths)}" + assert self.reduction_depth == len(lengths) + else: + self.call_ranges = tuple(lengths) + tuple(reduction_lengths) + self.ranges = [self.rename_indexing(x) for x in self.call_ranges] + self.itervars = [ + sympy_index_symbol(f"x{n}") for n in range(len(self.ranges)) + ] + self.reduction_depth = len(lengths) + return ( + self.itervars[: self.reduction_depth], + self.itervars[self.reduction_depth :], + ) + + def size_hint(self): + return V.graph.sizevars.size_hint( + sympy_product(self.call_ranges), fallback=8192 + ) + + def codegen_loops_impl(self, loop_nest, code, worksharing): + threads = parallel_num_threads() + assert self.call_ranges is not None + par_depth = self.decide_parallel_depth( + self.call_ranges[: loop_nest.max_parallel_depth()], threads + ) + with contextlib.ExitStack() as stack: + if par_depth: + if loop_nest.is_reduction_only(): + # need to close the worksharing scope to define reduction vars outside it + worksharing.close() + else: + worksharing.parallel(threads) + loop_nest.mark_parallel(par_depth) + elif threads > 1: + if worksharing.single(): + stack.enter_context(code.indent()) + + def gen_kernel(kernel): + with contextlib.ExitStack() as stack: + assert kernel + if hasattr(kernel, "codegen_inner_loops"): + code.splice(kernel.preloads) + kernel.codegen_inner_loops(code) + stack.enter_context(code.indent()) + code.splice(kernel.loads) + code.splice(kernel.compute) + code.splice(kernel.stores) + if hasattr(kernel, "codegen_inner_loops"): + code.splice(kernel.poststores) + + def get_reduction_code_buffer(loops, is_suffix=True): + for loop in loops: + for kernel in loop.get_kernels(): + if is_suffix: + return kernel.reduction_suffix + else: + return kernel.reduction_prefix + return None + + def gen_loops(loops: List[LoopLevel], in_reduction=False): + with contextlib.ExitStack() as stack_outer: + if loops: + loop = loops[0] + if loop.is_reduction() and not in_reduction: + reduction_prefix = get_reduction_code_buffer( + loops, is_suffix=False + ) + if reduction_prefix: + stack_outer.enter_context(code.indent()) + code.splice(reduction_prefix) + if loop_nest.is_reduction_only() and loop.parallel: + worksharing.parallel(threads) + + for loop in loops: + gen_loop(loop, in_reduction) + + if loops: + loop = loops[0] + if loop_nest.is_reduction_only() and loop.parallel: + worksharing.close() + if loop.is_reduction() and not in_reduction: + code.splice( + get_reduction_code_buffer(loops, is_suffix=True) + ) + + def gen_loop(loop: LoopLevel, in_reduction=False): + with contextlib.ExitStack() as stack: + loop_lines = loop.lines() + if loop_lines is None: + return + code.writelines(loop_lines) + stack.enter_context(code.indent()) + # generate inner loops or loop body + if loop.inner: + gen_loops(loop.inner, loop.is_reduction()) + else: + kernels = loop.get_kernels() + assert len(kernels) == 1 + gen_kernel(kernels[0]) + + stack.enter_context(code.indent()) + if loop_nest.root: + gen_loops(loop_nest.root) + else: + gen_kernel(loop_nest.kernel) + + def codegen_loops(self, code, worksharing): + loop_nest = LoopNestWithSplit.build(self) + self.codegen_loops_impl(loop_nest, code, worksharing) + + @property + def assert_function(self) -> str: + if V.graph.aot_mode: + return "AOTI_TORCH_CHECK" + else: + return "TORCH_CHECK" + + def decide_parallel_depth(self, ranges, threads): + seq = self.size_hint() + par = 1 + depth = 0 + for expr in ranges: + hint = V.graph.sizevars.size_hint(expr, fallback=8192) + if par >= 2 * threads or par == threads: + break + if seq // threads < config.cpp.min_chunk_size: + # not enough work + break + depth += 1 + par *= hint + seq /= hint + # if we assume thread number is dynamic, make sure we + # have at least one parallel scope and let OMP runtime + # to manage the serial vs. parallel. + if config.cpp.dynamic_threads and depth == 0 and len(ranges) > 0: + depth = 1 + return depth + + @contextlib.contextmanager + def write_to_suffix(self): + prior = (self.loads, self.compute, self.stores, self.cse) + self.loads = IndentedBuffer() + self.compute = IndentedBuffer() + self.stores = IndentedBuffer() + self.cse = self.cse.clone() + yield + self.reduction_suffix.splice(self.loads) + self.reduction_suffix.splice(self.compute) + self.reduction_suffix.splice(self.stores) + (self.loads, self.compute, self.stores, self.cse) = prior + + def create_cse_var(self, *args, **kwargs): + return CppCSEVariable(*args, **kwargs) + + +class CppVecKernel(CppKernel): + overrides = CppVecOverrides # type: ignore[assignment] + + def __init__( + self, + args, + num_threads, + tiling_factor=0, + tiling_idx=-1, + tiling_dtype=torch.float, + ): + super().__init__(args, num_threads) + self.vec_isa = codecache.pick_vec_isa() + assert self.vec_isa + if tiling_factor == 0: + tiling_factor = self.vec_isa.nelements(dtype=tiling_dtype) + self.tiling_factor = tiling_factor + self.tiling_idx = tiling_idx + + def _get_num_vectors(self, dtype: torch.dtype) -> int: + num_vectors = math.ceil( + self.tiling_factor * dtype.itemsize * 8 / self.vec_isa.bit_width() + ) + assert num_vectors >= 1 + return num_vectors + + def _get_vec_type(self, dtype: torch.dtype) -> str: + num_vectors = self._get_num_vectors(dtype) + if num_vectors == 1: + return f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>" + else: + return f"at::vec::VectorizedN<{DTYPE_TO_CPP[dtype]},{num_vectors}>" + + def _get_vec_load_line( + self, + var: str, + index: sympy.Expr, + dtype: torch.dtype, + load_mask: Optional[CppCSEVariable] = None, + ): + """ + Get a load line str that loads a vector from `var` at `index` of type `dtype`. + If `load_mask` is not None, we do a masked load accordingly. + Notes on the `dtype`: + 1. We always load `self.tiling_factor` number of elements regardless of the `dtype`. + It means we load half of the vector lanes for 16-bit data types and quarter of the + vector lanes for 8-bit data types. + 2. `torch.bool` and `torch.uint8` could mean masks and we load them as float mask vectors. + """ + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx is not None + load_mask_str = f"to_float_mask({load_mask})" if load_mask else None + loadbuf = f"{var} + {cexpr_index(index)}" if index != 0 else var + if dtype in (torch.uint8, torch.int8) and opt_ctx.is_load_int8_as_float: + assert self._get_num_vectors(torch.uint8) == 1 + line = ( + f"masked_load({loadbuf}, {load_mask_str})" + if load_mask_str + else f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>::loadu_one_fourth({loadbuf})" + ) + elif opt_ctx.is_load_as_mask: + line = f"flag_to_float_vec({loadbuf})" + elif dtype in DTYPE_LOWP_FP: + line = ( + f"masked_load({loadbuf}, {load_mask_str})" + if load_mask_str + else f"{self._get_vec_type(dtype)}::loadu({loadbuf}, {self.tiling_factor})" + ) + else: + line = ( + f"masked_load({loadbuf}, {load_mask_str})" + if load_mask_str + else f"{self._get_vec_type(dtype)}::loadu({loadbuf})" + ) + return line + + def load_non_contiguous( + self, + var: Optional[str], + index: sympy.Expr, + dtype: torch.dtype, + buffer: Optional[IndentedBuffer] = None, + ) -> CppCSEVariable: + """ + Load a vector in a non-contiguous way. The vector is initialized from an array that is + filled in an inner loop over the tiling factor. + :param var: buffer to load from, i.e. `var[transformed(index)]`. If None, we load the index + as index expression, i.e. `transformed(index)`. + :param index: index into the `var` or the index expression by its own if `var` is None. + The `index` could contain indirect indexing or the tiling itervar. When used in + the inner loop, the index is transformed as follows: + 1. the index is linearized along the tiling dim. + 2. the indirect indexing vector variables are transformed into arrays over the tiling dim. + :param dtype: data type of `var` or `index` if `var` is None. + :param buffer: the code buffer to write the generated code to. If None, we write to `self.loads`. + :return: a CppCSEVariable that represents the loaded vector. + """ + if buffer is None: + buffer = self.loads + + def get_result_size(dtype: torch.dtype) -> int: + if dtype.itemsize < 4: + return self.tiling_factor * (4 // dtype.itemsize) + else: + return self.tiling_factor + + def vec_to_array(vec_var: CppCSEVariable) -> CppCSEVariable: + assert vec_var.is_vec + code = BracesBuffer() + code.writeline("[&]") + with self.swap_buffers(code), code.indent(): + vec_dtype = vec_var.dtype + assert vec_dtype is not None + if vec_dtype == torch.bool: + vec_dtype = torch.float + result_size = get_result_size(vec_dtype) + code.writeline( + f"__at_align__ std::array<{DTYPE_TO_CPP[vec_dtype]}, {result_size}> tmpbuf;" + ) + line = f"{vec_var}.store(tmpbuf.data());" + code.writeline(line) + code.writeline("return tmpbuf;") + code.writeline("()") + csevar = self.cse.generate(buffer, code) + assert isinstance(csevar, CppCSEVariable) + return csevar + + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + assert opt_ctx is not None + is_mask = opt_ctx.is_load_as_mask + code = BracesBuffer() + code.writeline("[&]") + with self.swap_buffers(code), code.indent(): + result_type = "float" if is_mask else f"{DTYPE_TO_CPP[dtype]}" + result_size = get_result_size(dtype) + result_declare = ( + f"__at_align__ std::array<{result_type}, {result_size}> tmpbuf;" + ) + code.writeline(result_declare) + itervar_inner = sympy_index_symbol( + f"{self.itervars[self.tiling_idx]}_inner" + ) + replacements = {} + for indirect_var in ( + self.cse.varname_map[s.name] # type: ignore[attr-defined] + for s in index.free_symbols + if s.name.startswith("tmp") # type: ignore[attr-defined] + ): + assert isinstance(indirect_var, CppCSEVariable) + if indirect_var.is_vec: + array_var = vec_to_array(indirect_var) + replacements[indirect_var] = f"{array_var}[{itervar_inner}]" + load_mask = None + if self._load_mask is not None: + assert isinstance(self._load_mask, CppCSEVariable), self._load_mask + if self._load_mask.is_vec: + load_mask = ( + f"vector_lane_mask_check({self._load_mask}, {itervar_inner})" + ) + else: + load_mask = f"{self._load_mask} != 0" + index = sympy_subs(index, replacements) # type: ignore[arg-type] + index = self.scale_index_with_offset( + index, itervar_idx=self.tiling_idx, offset=itervar_inner + ) + if codecache.is_gcc(): + code.writeline(f"#pragma GCC unroll {self.tiling_factor}") + else: + code.writeline(f"#pragma unroll {self.tiling_factor}") + code.writeline( + f"for (long {itervar_inner} = 0; {itervar_inner} < {self.tiling_factor}; {itervar_inner}++)" + ) + with code.indent(), contextlib.ExitStack() as stack: + rhs = ( + f"{var}[{cexpr_index(index)}]" + if var is not None + else f"{cexpr_index(index)}" + ) + if is_mask: + rhs = f"flag_to_float_scalar({rhs})" + if load_mask: + code.writeline(f"if ({load_mask})") + stack.enter_context(code.indent()) + code.writeline(f"tmpbuf[{itervar_inner}] = {rhs};") + load_line = self._get_vec_load_line("tmpbuf.data()", 0, dtype) # type: ignore[arg-type] + code.writeline(f"return {load_line};") + code.writeline("()") + csevar = self.cse.generate(buffer, code) + assert isinstance(csevar, CppCSEVariable) + csevar.is_vec = True + return csevar + + def load(self, name: str, index: sympy.Expr): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.input(name) + index = self.rename_indexing(index) + dtype = V.graph.get_dtype(name) + tiling_var = self.itervars[self.tiling_idx] + stride = stride_at_vec_range(index, tiling_var, self.tiling_factor) + if stride == 0: + # load scalar and lazily broadcast it on demand + return super().load(name, index) + non_contiguous = stride != 1 or self.index_indirect_depends_on( + index, tiling_var + ) + if non_contiguous: + csevar = self.load_non_contiguous(var, index, dtype) + else: + line = self._get_vec_load_line(var, index, dtype, self._load_mask) + csevar = self.cse.generate(self.loads, line) # type: ignore[assignment] + assert isinstance(csevar, CppCSEVariable) + csevar.update_on_args("load", (name, index), {}) + csevar.is_vec = True + return csevar + + def _get_vec_store_line( + self, + value: Union[str, CppCSEVariable], + var: str, + index: sympy.Expr, + dtype: torch.dtype, + ): + """ + Get a store line str that stores `value` into `var` at `index` of `dtype`. + :param value: Vectorized type templaterized on `dtype`. + :param var: buffer to store into. + :index: index into the `var`. + """ + # when value's type is str (e.g., welford reduction), caller should make sure + # it is a vector + assert isinstance(value, str) or ( + isinstance(value, CppCSEVariable) and value.is_vec + ), value + tiling_var = self.itervars[self.tiling_idx] + assert index.has(tiling_var), f"index: {index}, tiling_var: {tiling_var}" + var_expr = f"{var} + {cexpr_index(index)}" + stride = stride_at_vec_range(index, tiling_var, self.tiling_factor) + non_contiguous = stride != 1 or self.index_indirect_depends_on( + index, tiling_var + ) + if non_contiguous: + var_expr = "tmpbuf" + if dtype == torch.float: + line = f"{value}.store({var_expr});" + else: + line = f"{value}.store({var_expr}, {self.tiling_factor});" + if non_contiguous: + inner = sympy_index_symbol(f"{tiling_var}_inner") + new_index = self.scale_index_with_offset( + index, itervar_idx=self.tiling_idx, offset=inner + ) + tmp_bufsize = ( + f"{self.tiling_factor}*sizeof(float)/sizeof({DTYPE_TO_CPP[dtype]})" + ) + line = ( + f"{{ __at_align__ {DTYPE_TO_CPP[dtype]} tmpbuf[{tmp_bufsize}]; {line} " + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++) " + f"{var}[{cexpr_index(new_index)}] = tmpbuf[{inner}]; }}" + ) + return line + + def store(self, name, index, value, mode=None): + assert "buf" in name + assert mode is None + assert isinstance(value, CppCSEVariable), value + if not value.is_vec: + # this happens when we store a scalar into a vectorized buffer like "fill" + value = self.broadcast(value) + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.output(name) + self.cache_fp32_cse_var_before_lowp_store(value) + index = self.rename_indexing(index) + self.stores.writeline( + DeferredLine( + name, + self._get_vec_store_line(value, var, index, V.graph.get_dtype(name)), + ) + ) + + def reduction(self, dtype, src_dtype, reduction_type, value): + assert reduction_type in { + "max", + "min", + "sum", + "prod", + "xor_sum", + "welford_reduce", + "welford_combine", + } + assert dtype == src_dtype + assert dtype in [torch.float, torch.int64] + assert isinstance(value, CppCSEVariable), value + + if not value.is_vec: + value = self.broadcast(value) + + acc_type = reduction_acc_type(reduction_type, dtype) + acc_type_vec = self.reduction_acc_type_vec(reduction_type, dtype) + + if (reduction_type, acc_type) not in self.reduction_omp_dec: + if RTYPE_TO_CPP[reduction_type] not in NATIVE_OMP_RTYPES: + # Scalar reduction for other reductions are declared by default + self.reduction_prefix.splice( + f"""\ +#pragma omp declare reduction(\ +{RTYPE_TO_CPP[reduction_type]}:{acc_type}:\ +omp_out = {reduction_combine(reduction_type, "omp_out", "omp_in")}) \ +initializer(omp_priv={{{reduction_init(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type] = RTYPE_TO_CPP[ + reduction_type + ] + + if (reduction_type, acc_type_vec) not in self.reduction_omp_dec: + self.reduction_prefix.splice( + f"""\ +#pragma omp declare reduction(\ +{RTYPE_TO_CPP[reduction_type]}:{acc_type_vec}:\ +omp_out = {self.reduction_combine_vec(reduction_type, "omp_out", "omp_in")}) \ +initializer(omp_priv={{{self.reduction_init_vec(reduction_type, dtype)}}}) + """ + ) + self.reduction_omp_dec[reduction_type, acc_type_vec] = RTYPE_TO_CPP[ + reduction_type + ] + + reduction_key = src_dtype, reduction_type, value + if reduction_key in self.reduction_cse.reduction_cache: + return self.reduction_cse.reduction_cache[reduction_key] + + acc = self.reduction_cse.generate( + self.loads, f"reduction {reduction_key}", write=False + ) + acc_vec = f"{acc}_vec" + + self.reduction_var_map[acc_vec] = reduction_type + self.reduction_prefix.writeline( + f"{acc_type} {acc} = {reduction_init(reduction_type, dtype)};" + ) + self.reduction_prefix.writeline( + f"{acc_type_vec} {acc_vec} = {self.reduction_init_vec(reduction_type, dtype)};" + ) + self.stores.writeline( + f"{acc_vec} = {self.reduction_combine_vec(reduction_type, acc_vec, value)};" + ) + + tmpvar: Union[str, CSEVariable] + if self.tiling_idx >= self.reduction_depth: + # Horizontal reduction + if is_welford_reduction(reduction_type): + assert ( + self._get_num_vectors(dtype) == 1 + ), "Welford reduction does not support VectorizedN (N>1)" + next_value = f"welford_vec_reduce_all({acc_vec})" + else: + reduce_all_body = ( + "{ return " + + self.reduction_combine_vec(reduction_type, "x", "y") + + "; }" + ) + vec = f"at::vec::Vectorized<{DTYPE_TO_CPP[dtype]}>" + vec_reduce_all_func = f"at::vec::vec_reduce_all<{DTYPE_TO_CPP[dtype]}>" + next_value = f"{vec_reduce_all_func}([]({vec}& x, {vec}& y) {reduce_all_body}, {acc_vec})" + + self.reduction_suffix.writeline( + f"{acc} = {reduction_combine(reduction_type, acc, next_value)};" + ) + tmpvar = acc + else: + tmpvar = acc_vec + + result = reduction_project(reduction_type, tmpvar) + self.reduction_cse.reduction_cache[reduction_key] = result + return result + + def store_reduction(self, name, index, value): + index = self.rename_indexing(index) + var = self.args.output(name) + out_dtype = V.graph.get_dtype(name) + # Only float reductions are vectorized currently + dtype = torch.float + if self.tiling_idx >= self.reduction_depth: + # Horizontal reduction + self.reduction_suffix.writeline( + DeferredLine( + name, + f"{var}[{cexpr_index(index)}] = static_cast<{DTYPE_TO_CPP[out_dtype]}>({value});", + ) + ) + else: + # Vertical reduction + store_lines = [] + if out_dtype != dtype: + if out_dtype in DTYPE_LOWP_FP and dtype == torch.float: + _lowp_fp_tmpvar_vec = f"{DTYPE_TO_CPP[out_dtype]}_{value}" + store_lines = [ + DeferredLine( + name, + f"auto {_lowp_fp_tmpvar_vec} = cvt_fp32_to_lowp_fp<{DTYPE_TO_CPP[out_dtype]}>({value});", + ) + ] + value = _lowp_fp_tmpvar_vec + else: + raise AssertionError( + f"Unsupported reduction type from {dtype} to {out_dtype}" + ) + store_lines += [ + DeferredLine( + name, + self._get_vec_store_line(value, var, index, out_dtype), + ) + ] + self.reduction_suffix.writelines(store_lines) + + def broadcast(self, scalar_var: CppCSEVariable) -> CppCSEVariable: + assert not scalar_var.is_vec + if scalar_var.dtype == torch.bool: + vec_var = self.cse.generate( + self.compute, f"to_float_mask({scalar_var.name})" + ) + else: + assert scalar_var.dtype is not None + vec_var = self.cse.generate( + self.compute, + f"{self._get_vec_type(scalar_var.dtype)}({scalar_var.name})", + ) + assert isinstance(vec_var, CppCSEVariable) + vec_var.dtype = scalar_var.dtype + vec_var.dependent_itervars = scalar_var.dependent_itervars + vec_var.is_vec = True + return vec_var + + def arange( + self, index: Union[sympy.Expr, CppCSEVariable], stride: sympy.Symbol + ) -> CppCSEVariable: + if isinstance(index, sympy.Expr): + index = cexpr(index) + else: + assert isinstance(index, CppCSEVariable) + assert not index.is_vec + csevar = self.cse.generate( + self.compute, + f"{self._get_vec_type(torch.int32)}::arange({index}, {stride})", + ) + assert isinstance(csevar, CppCSEVariable) + csevar.dtype = torch.int32 + csevar.is_vec = True + return csevar + + def reduction_init_vec(self, reduction_type, dtype): + scalar_type = DTYPE_TO_COMPUTATION_DTYPE[dtype] + vec_type = self._get_vec_type(scalar_type) + + if is_welford_reduction(reduction_type): + return f"Welford<{vec_type}>()" + + scalar_init = reduction_init(reduction_type, dtype) + return f"{vec_type}({scalar_init})" + + def reduction_acc_type_vec(self, reduction_type, dtype): + assert reduction_type not in {"argmin", "argmax"} + scalar_type = DTYPE_TO_COMPUTATION_DTYPE[dtype] + vec_type = self._get_vec_type(scalar_type) + if is_welford_reduction(reduction_type): + return f"Welford<{vec_type}>" + + return vec_type + + def reduction_combine_vec(self, reduction_type, var, next_value): + if reduction_type == "max": + return f"at::vec::maximum({var}, {next_value})" + elif reduction_type == "min": + return f"at::vec::minimum({var}, {next_value})" + elif reduction_type == "sum": + return f"{var} + {next_value}" + elif reduction_type == "prod": + return f"{var} * {next_value}" + elif reduction_type == "xor_sum": + return f"{var} ^ {next_value}" + elif reduction_type == "welford_reduce": + return f"welford_combine({var}, {next_value})" + elif reduction_type == "welford_combine": + if isinstance(next_value, tuple): + # When reading a value from Inductor IR we have a tuple of variable names + mean, m2, weight = next_value + else: + # When combining intermediate accumulators we have a Welford struct + mean, m2, weight = reduction_project(reduction_type, next_value) + return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" + else: + raise NotImplementedError() + + +class CppTile2DKernel(CppVecKernel): + """ + A vector kernel that handles the 2d tiles with the tile size defined in `tiling_factor` on + the inner-most loop level and one of the outer loop level (`outer_tiling_idx`). When the data + tile is accessed in a contiguous way from the outer loop axis, a transposition is applied on the + tile to make the access contiguous from the inner-most loop axis. Then, the same vectorization + logic from its parent `CppVecKernel` is leveraged for load/store/compute. The transposed tile load + and store are generated into kernel.preloads and kernel.poststores buffers. + + The loop structure looks like below: + for ... + for i_outer ... + for ... + for inner_most ... + // generated by CppTile2DKernel + float tmp0[16*16]; at::vec::transpose_mxn<...>(tmp0, in_ptr0 + ..., ...); // into kernel.preloads + float tmp1[16*16]; // into kernel.preloads + for i_inner ... { // the kernel inner loop + vectorized loads/compute/stores (e.g., load tmp0, store tmp1) // into kernel.loads/compute/stores + } + at::vec::transpose_mxn(out_ptr0 + ..., tmp1, ...) // into kernel.poststores + for inner_most ... (tail) + // generated by CppVecKernel + ... + for i_outer ... (tail) + for ... + for ... + // generated by CppKernel + ... + """ + + overrides = CppTile2DOverrides # type: ignore[assignment] + + def __init__(self, args, num_threads, tiling_factor, tiling_indices, tiling_dtype): + super().__init__( + args, num_threads, tiling_factor, tiling_indices[1], tiling_dtype + ) + self.tiling_indices = tiling_indices + + def inner_itervar(self): + return sympy_index_symbol(f"{self.itervars[self.outer_idx]}_inner") + + def need_vec_transpose(self, index): + outer_var = self.itervars[self.outer_idx] + inner_var = self.itervars[self.tiling_idx] + outer_stride = stride_at_vec_range(index, outer_var, self.tiling_factor) + inner_stride = stride_at_vec_range(index, inner_var, self.tiling_factor) + return ( + self._load_mask is None # TODO: support transposition with mask + and outer_stride == 1 + and index.has(inner_var) + and not inner_stride.has(inner_var) + and not inner_stride.has(outer_var) + ) + + def gen_transposed_tile_load_store(self, name, var, index, is_store): + # transposed tile load/store outside the kernel inner loop + dtype = V.graph.get_dtype(name) + factor = self.tiling_factor + src = f"{var} + {cexpr_index(index)}" + dst = "__place_holder__" + ld_src = f"{cexpr_index(stride_at_vec_range(index, self.itervars[self.tiling_idx], self.tiling_factor))}" + ld_dst = f"{factor}" + if is_store: + src, dst = dst, src + ld_src, ld_dst = ld_dst, ld_src + + need_define = True + load_or_store = f"at::vec::transpose_mxn<{DTYPE_TO_CPP[dtype]},{factor},{factor}>({src}, {ld_src}, {dst}, {ld_dst});" + if is_store: + tile_var = self.cse.newvar() + elif load_or_store not in self.cse.cache: + tile_var = self.cse.generate(self.preloads, load_or_store, write=False) + else: + need_define = False + tile_var = self.cse.cache[load_or_store] + + if need_define: + define_line = f"{DTYPE_TO_CPP[dtype]} {tile_var}[{factor}*{factor}] __attribute__ ((aligned ({factor})));" + self.preloads.writeline(define_line) + + load_or_store = load_or_store.replace("__place_holder__", str(tile_var)) + if is_store: + self.poststores.writeline(DeferredLine(name, load_or_store)) + else: + self.preloads.writeline(load_or_store) + + return tile_var + + def load(self, name: str, index: sympy.Expr): + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.input(name) + index = self.rename_indexing(index) + + inner = self.inner_itervar() + if self.need_vec_transpose(index): + tile_var = self.gen_transposed_tile_load_store( + name, var, index, is_store=False + ) + # vector load inside the kernel inner loop + loadbuf = f"{tile_var} + {cexpr_index(inner * self.tiling_factor)}" + dtype = V.graph.get_dtype(name) + line = self._get_vec_load_line(loadbuf, 0, dtype) # type: ignore[arg-type] + csevar = self.cse.generate(self.loads, line) + csevar.update_on_args("load", (name, index), {}) + assert isinstance(csevar, CppCSEVariable) + csevar.is_vec = True + return csevar + else: + new_index = self.transform_indexing(index) + return super().load(name, new_index) + + def store(self, name, index, value, mode=None): + assert "buf" in name + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + var = self.args.output(name) + + inner = self.inner_itervar() + index = self.rename_indexing(index) + assert mode is None + if self.need_vec_transpose(index): + tile_var = self.gen_transposed_tile_load_store( + name, var, index, is_store=True + ) + # vector store inside the kernel inner loop + storebuf = f"{tile_var} + {cexpr_index(inner * self.tiling_factor)}" + if V.graph.get_dtype(name) in DTYPE_LOWP_FP: + line = f"{value}.store({storebuf}, {self.tiling_factor});" + elif V.graph.get_dtype(name) in (torch.uint8, torch.int8): + line = f"{value}.store({storebuf}, {self.tiling_factor});" + else: + line = f"{value}.store({storebuf});" + self.stores.writeline(DeferredLine(name, line)) + else: + new_index = self.transform_indexing(index) + super().store(name, new_index, value, mode) + + def codegen_inner_loops(self, code): + inner = self.inner_itervar() + code.writeline( + f"for (long {inner} = 0; {inner} < {self.tiling_factor}; {inner}++)" + ) + + def set_ranges(self, group, reduction_group): + vars = super().set_ranges(group, reduction_group) + # do vertical reduction as the tail loop + self.outer_idx, self.tiling_idx = ( + self.tiling_indices + if self.tiling_indices[1] < self.reduction_depth + else reversed(self.tiling_indices) + ) + return vars + + def transform_indexing(self, index: sympy.Expr) -> sympy.Expr: + return self.scale_index_with_offset( + index, + itervar_idx=self.outer_idx, + offset=self.inner_itervar(), + ) + + +class CppVecKernelChecker(CppVecKernel): + def __init__(self, args, num_threads, tiling_factor, tiling_idx=-1): + super().__init__(args, num_threads, tiling_factor, tiling_idx) + + # Since this kernel is only for checker but does not generate any + # code, so we need to decrease the kernel count. + metrics.generated_kernel_count -= 1 + + # Used to record the graph wrapper code as the wrapper_code status could be + # changed during graph run. + self._orig_wrapper_code = None + + self.simd_vec = True + + self.fast_vec_list = [] + for k, v in CppVecOverrides.__dict__.items(): + if isinstance(v, staticmethod): + self.fast_vec_list.append(k) + self.exit_stack = contextlib.ExitStack() + + # Cache all the load result + self.load_supported_dtypes: List[torch.dtype] = [ + torch.float, + torch.bfloat16, + torch.float16, + torch.bool, + torch.uint8, + torch.int8, + torch.int32, + torch.int64, + ] + self.store_supported_dtypes: List[torch.dtype] = [ + torch.float, + torch.bfloat16, + torch.float16, + torch.uint8, + torch.int8, + torch.int32, + torch.int64, + ] + # Cache the dtypes of the store operation. If the store is mixing dtypes, the + # vectorization would not support it as it is hard to determine the vec dtype + self.store_dtypes: List[torch.dtype] = [] + # The dtype is used for vectorization + self.vec_dtype: torch.dtype = torch.float32 + + def disable_vec(self, msg=None): + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Disabled vectorization: %s", msg) + self.simd_vec = False + + def is_mask(self, name: str, users: Dict[torch.fx.Node, None]): + load_type = V.graph.get_dtype(name) + if load_type == torch.bool: + return all(user.target in ("where", "masked") for user in users.keys()) + elif load_type in (torch.uint8, torch.int8): + """ + If the load value is torch.uint8/int8, then we only support the loaded + value is as the mask. + """ + if not all( + user.target == "to_dtype" and user.args[-1] == torch.bool + for user in users.keys() + ): + return False + + for to_dtype_node in users.keys(): + assert to_dtype_node.target == "to_dtype" + if not all( + user.target in ("where", "masked") + for user in to_dtype_node.users.keys() + ): + return False + return True + else: + return False + + def is_load_int8_as_float(self, name: str, users: Dict[torch.fx.Node, None]): + """ + Check: + 1. load_type is torch.uint8 or torch.int8 + 2. has 1 user node of target to_dtype + 3. dtype of to_dtype is torch.float + """ + load_type = V.graph.get_dtype(name) + if load_type not in (torch.uint8, torch.int8): + return False + if len(users) == 1: + user = next(iter(users)) + if (user.target == "to_dtype") and (user.args[-1] == torch.float): + return True + return False + return False + + def can_store_fp32_as_int8(self, store_var: str, value_node: torch.fx.Node): + """ + Check: + 1. store_type is torch.uint8/torch.int8 + 2. value_node is of target to_dtype + 3. dtype of to_dtype node is torch.uint8/torch.int8 + """ + store_type = V.graph.get_dtype(store_var) + if store_type not in (torch.uint8, torch.int8): + return False + if value_node.target == "to_dtype" and value_node.args[-1] in ( + torch.uint8, + torch.int8, + ): + return True + + return False + + def is_load_integer_scalar_tensor(self, name: str, index: sympy.Expr): + load_dtype = V.graph.get_dtype(name) + buffer = V.graph.get_buffer(name) + return ( + load_dtype in [torch.int32, torch.int64] + and isinstance(buffer, TensorBox) + and isinstance(buffer.data, StorageBox) + and (len(buffer.data.layout.size) == 0) + and (index == 0) + ) + + def load(self, name: str, index: sympy.Expr): + with RecordOptimizationContext(__name__) as node_ctx: + load_dtype = V.graph.get_dtype(name) + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = load_dtype + opt_ctx.is_load_as_mask = self.is_mask(name, node_ctx.get_fx_node().users) + opt_ctx.is_load_int8_as_float = self.is_load_int8_as_float( + name, node_ctx.get_fx_node().users + ) + + var = self.cse.newvar() + + if len(self.itervars) == 0: + self.disable_vec("not a loop") + return var + + if load_dtype in (torch.bool, torch.uint8, torch.int8) and not ( + opt_ctx.is_load_as_mask or opt_ctx.is_load_int8_as_float + ): + if not opt_ctx.is_load_as_mask: + self.disable_vec(f"{load_dtype} not loaded as mask") + elif not opt_ctx.is_load_int8_as_float: + self.disable_vec(f"{load_dtype} not loaded as float") + return var + + if ( + (load_dtype not in self.load_supported_dtypes) + and not self.is_load_integer_scalar_tensor(name, index) + and index.has(self.itervars[self.tiling_idx]) + ): + self.disable_vec(f"{load_dtype} not supported by load") + return var + + return var + + def store(self, name, index, value, mode=None): + with RecordOptimizationContext(__name__) as node_ctx: + if len(self.itervars) == 0: + self.disable_vec("not a loop") + return self.simd_vec + + store_dtype = V.graph.get_dtype(name) + + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = store_dtype + + store_dtype = torch.float if store_dtype == torch.float32 else store_dtype + self.store_dtypes.append(store_dtype) + if store_dtype not in self.store_supported_dtypes: + self.disable_vec(f"{store_dtype} not supported by store") + return self.simd_vec + + if store_dtype in (torch.uint8, torch.int8): + value_node = node_ctx.get_fx_node().all_input_nodes[-1] + if not self.can_store_fp32_as_int8(name, value_node): + self.disable_vec("not support store float32 as uint8/int8") + return self.simd_vec + + assert "buf" in name + index = self.rename_indexing(index) + + if mode: + self.disable_vec(f"store mode: {mode}") + return self.simd_vec + + if index.is_number: + self.disable_vec(f"constant store index: {index}") + return self.simd_vec + + def reduction(self, dtype, src_dtype, reduction_type, value): + if ( + (dtype == torch.float and src_dtype == torch.float) + or (dtype == torch.int64 and src_dtype == torch.int64) + and reduction_type in VECTORIZABLE_RTYPES + ): + pass + else: + self.disable_vec( + f"reduction: dtype {dtype}, src_dtype {src_dtype}, reduction_type {reduction_type}" + ) + if is_welford_reduction(reduction_type): + return tuple([self.simd_vec] * 3) + return self.simd_vec + + def store_reduction(self, name, index, value): + return self.simd_vec + + def is_supported_cmp(self, node: torch.fx.Node): + def get_node_dtype(node): + if type(node) == torch.fx.Node: + opt_ctx: OptimizationContext = get_current_node_opt_ctx() + return opt_ctx.dtype if opt_ctx else None + else: + return None + + def get_cmp_dtypes(node: torch.fx.Node): + return get_node_dtype(node.args[-2]), get_node_dtype(node.args[-1]) + + assert len(node.args) >= 2 + # cmp(x, y): y is a magic value like x >= 1 + if type(node.args[-1]) in [int, float]: + return True + # cmp(x, y): x is a magic value like 1 >= y + if type(node.args[-2]) in [int, float]: + return False + + left_dtype, right_dtype = get_cmp_dtypes(node) + if left_dtype is None or right_dtype is None: + # TODO(Eikan): To record, deduce and propagate the data type of every expression. + return True + else: + return left_dtype == right_dtype + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._orig_wrapper_code is not None + # Restore the wrapper_code + V.graph.wrapper_code = self._orig_wrapper_code + self.exit_stack.__exit__(exc_type, exc_val, exc_tb) + + def __enter__(self): + # Record the graph wrapper code. The wrapper_code status could be + # changed during graph run. Regarding this checker, we also need to + # run the graph but we don't expect to change any status that would + # impact the code generation. Hence, we record the graph wrapper code + # and replace it with a dummy wrapper_code and then restore to the + # original one as long as the checker is finished. + self._orig_wrapper_code = V.graph.wrapper_code + V.graph.wrapper_code = WrapperCodeGen() + + parent_handler = V.MockHandler() + + class VecCheckerProxy: + bin_cmp_ops = ["eq", "ne", "le", "ge", "lt", "gt"] + + @staticmethod + def _bin_cmp_op(x, y): + current_node: torch.fx.Node = V.interpreter.current_node + if not self.is_supported_cmp(current_node): + self.disable_vec(f"binary comparison op: {current_node}") + return self.simd_vec + + @staticmethod + def __getattr__(name): # type: ignore[misc] + def inner(*args, **kwargs): + if name in VecCheckerProxy.bin_cmp_ops: + return VecCheckerProxy._bin_cmp_op(args, kwargs) + + if name not in self.fast_vec_list: + self.disable_vec(f"op: {name}") + + parent_val = getattr(parent_handler, name)(*args, **kwargs) + return pytree.tree_map(lambda _: self.simd_vec, parent_val) + + return inner + + @staticmethod + def load(name: str, index: sympy.Expr): + return self.load(name, index) + + @staticmethod + def store(name, index, value, mode=None): + return self.store(name, index, value, mode=mode) + + @staticmethod + def reduction(dtype, src_dtype, reduction_type, value): + return self.reduction(dtype, src_dtype, reduction_type, value) + + @staticmethod + def store_reduction(name, index, value): + return self.store_reduction(name, index, value) + + @staticmethod + def constant(val, dtype): + with RecordOptimizationContext(__name__) as node_ctx: + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + # VecKernel override dtype for constant + # Vectorization only support int32/fp32 now + # So if dtype = int64/fp64, we will cast it to int32/fp32 if possible + i32_iinfo = torch.iinfo(torch.int32) + if ( + dtype == torch.int64 + and val <= i32_iinfo.max + and val >= i32_iinfo.min + ): + opt_ctx.dtype = torch.int32 + + f32_iinfo = torch.finfo(torch.float32) + if dtype == torch.double: + if ( + (val <= f32_iinfo.max and val >= f32_iinfo.min) + or (val == torch.inf) + or (val == -torch.inf) + ): + opt_ctx.dtype = torch.float32 + + supported_dtypes = [ + torch.float32, + torch.int32, + torch.int64, + torch.bfloat16, + torch.float16, + torch.bool, + ] + + if opt_ctx.dtype not in supported_dtypes or ( + opt_ctx.dtype == torch.int32 + and not all( + user.target in VecCheckerProxy.bin_cmp_ops + for user in node_ctx.current_node.users + ) + ): + self.disable_vec(f"constant dtype: {opt_ctx.dtype}") + return val + + @staticmethod + def index_expr(expr, dtype): + assert len(self.ranges) == len(self.itervars) + if not len(self.ranges) or not all( + not isinstance(range, sympy.Expr) or sympy.simplify(range).is_number + for range in self.ranges + ): + # if the range value is sympy.Expr, we might could not deduce the accurate loop interval. + self.disable_vec(f"index_expr: {expr}, dtype {dtype}") + return self.cse.newvar() + + def can_use_int32(): + free_symbols = list(expr.free_symbols) + sizes = { + k: v + for k, v in zip(self.itervars, self.ranges) + if k in free_symbols + } + # Trivial case: Range empty + if any(v == 0 for v in sizes.values()): + return True + + vars_ranges = {k: ValueRanges(0, v - 1) for k, v in sizes.items()} + if not vars_ranges or len(vars_ranges) != len(free_symbols): + i32_iinfo = torch.iinfo(torch.int32) + return ( + expr.is_number + and expr <= i32_iinfo.max + and expr >= i32_iinfo.min + ) + expr_ranges = bound_sympy(expr, vars_ranges) + if math.isinf(expr_ranges.lower) or math.isinf(expr_ranges.upper): # type: ignore[arg-type] + return False + # If something takes the values 0..7, we will compare in the loop + # x < 8. As such, for the loop not to overflow in the last iteration, we want + # to check that expr_ranges.upper + 1 is representable as well + return range_expressable_in_32_bits( + ValueRanges( + int(expr_ranges.lower), int(expr_ranges.upper) + 1 # type: ignore[arg-type] + ) + ) + + with RecordOptimizationContext(__name__) as node_ctx: + assert len(self.ranges) == len(self.itervars) + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + if ( + dtype == torch.int64 + and can_use_int32() + and all( + user.target in VecCheckerProxy.bin_cmp_ops + for user in node_ctx.current_node.users + ) + ): + opt_ctx.dtype = torch.int32 + else: + opt_ctx.dtype = dtype + self.disable_vec(f"index_expr: {expr}, dtype {dtype}") + + tmp_var = self.cse.newvar() + return tmp_var + + @staticmethod + def indirect_indexing(index_var, size, check=True): + return sympy_index_symbol(str(index_var)) + + @staticmethod + def masked(mask, body, other): + body() + return self.cse.newvar() + + @staticmethod + def to_dtype(x, dtype, src_dtype=None): + with RecordOptimizationContext(__name__) as node_ctx: + opt_ctx: OptimizationContext = node_ctx.get_opt_ctx() + assert opt_ctx + opt_ctx.dtype = dtype + + cur_node = node_ctx.get_fx_node() + input_value: torch.fx.Node = cur_node.all_input_nodes[1] + if dtype == torch.float: + if input_value.target in [ + "load", + ]: + # Support masked_load for BF16/FP16. Because the legalization will + # insert to_dtype to convert the BF16/FP16 input to FP32. + dtype = ( + V.graph.get_dtype(input_value.args[1]) # type: ignore[arg-type] + if input_value.target == "load" + else input_value.args[-1] + ) + if dtype in [ + torch.float16, + torch.bfloat16, + torch.float, + torch.float64, + torch.uint8, + torch.int8, + torch.int32, + torch.int64, + ]: + # Convert from dtype to torch.float + pass + else: + self.disable_vec(f"to_dtype: dtype {dtype}") + elif dtype in DTYPE_LOWP_FP: + if not all(usr.target == "store" for usr in cur_node.users): + self.disable_vec( + "to_dtype: bfloat16/float16 expecting users are all stores" + ) + return x + + store_names = [usr.args[1] for usr in cur_node.users] + if not all( + V.graph.get_dtype(name) in [dtype] for name in store_names + ): + self.disable_vec( + "to_dtype: expecting all stores into bfloat16 or float16" + ) + return x + elif dtype == torch.bool: + pass + elif dtype in (torch.uint8, torch.int8): + # Only allow below 2 cases: + # Case 1: to_int8 and store which corresponding to the single quant node + # at last of fusion pattern. + is_to_int8_and_store = all( + usr.target in ["store"] for usr in cur_node.users + ) + # Case 2: to_int8 and to_float which corresponding to pair of quant/dequant node + # at middle of fusion pattern. + is_to_int8_and_to_float = all( + ( + usr.target in ["to_dtype"] + and usr.args[2] == torch.float32 + ) + for usr in cur_node.users + ) + if not (is_to_int8_and_store or is_to_int8_and_to_float): + self.disable_vec(f"to_dtype: dtype {dtype}") + elif dtype in [torch.int64, torch.int32]: + pass + else: + self.disable_vec(f"to_dtype: dtype {dtype}") + return x + + self.exit_stack.enter_context(V.set_ops_handler(VecCheckerProxy())) + self.exit_stack.enter_context(V.set_kernel_handler(self)) + return self + + +class CppKernelProxy(CppKernel): + def __init__(self, kernel_group): + super().__init__(kernel_group.args, kernel_group.ws.num_threads) + self.kernel_group = kernel_group + self.loop_nest = None + self.call_ranges = None + self.picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa() + + def data_type_propagation(self, nodes): + for _node in nodes: + assert isinstance(_node, SchedulerNode) + DataTypePropagation.propagate_scheduler_node(_node) + + # Check if all the nodes of a given fx graph can support BF16/FP16 + def is_lowp_fp_scheduler(self, scheduler_node: SchedulerNode): + if not isinstance(scheduler_node._body, ir.LoopBody): + return True + + _lowp_fp_type: Optional[torch.dtype] = None + + # Propagate the dtype to check if all the fx node is bf16/fp16 + DataTypePropagation.propagate_scheduler_node(scheduler_node) + + sub_blocks = [scheduler_node._body.root_block] + list( + scheduler_node._body.subblocks.values() + ) + for sub_block in sub_blocks: + for _node in sub_block.graph.nodes: + # TODO(Eikan): Regarding get_index and index_expr, we should conclude the + # the data type as well. + if _node.op == "placeholder" or _node.target in ( + "get_index", + "index_expr", + ): + continue + + # Fast path if all operations can support bf16/fp16 without converting to fp32 + if _node.target not in [ + "load", + "store", + "abs", + "neg", + "output", + ]: + return False + + if hasattr(_node, "meta") and _node.meta: + assert OptimizationContext.key in _node.meta + opt_ctx: OptimizationContext = _node.meta[OptimizationContext.key] + if not opt_ctx.dtype or opt_ctx.dtype not in DTYPE_LOWP_FP: + return False + if _lowp_fp_type: + assert ( + _lowp_fp_type == opt_ctx.dtype + ), "scheduler node do not support bf16/fp16 mix" + else: + _lowp_fp_type = opt_ctx.dtype + else: + return False + + scheduler_node._lowp_fp_type = _lowp_fp_type # type: ignore[attr-defined] + return True + + def legalize_lowp_fp_dtype(self, nodes): + def add_to_dtype(sub_graph: torch.fx.Graph): + def is_lowp_fp_load(node: torch.fx.Node): + if node.target not in ["load"]: + return False + assert len(node.args) == 3 + load_dtype = V.graph.get_dtype(node.args[1]) # type: ignore[arg-type] + return load_dtype in DTYPE_LOWP_FP + + def is_lowp_fp_store(node: torch.fx.Node): + if node.target != "store": + return False + _, store_var, _, _, _ = node.args + store_dtype = V.graph.get_dtype(store_var) # type: ignore[arg-type] + return store_dtype in DTYPE_LOWP_FP + + sub_graph_nodes = list(sub_graph.nodes) + to_lowp_fp_legalized_nodes = [] + for _node in sub_graph_nodes: + if is_lowp_fp_load(_node): + # No need to promote to float if all users are direct stores + if all(user.target == "store" for user in _node.users): + continue + ops = _node.args[0] + with sub_graph.inserting_after(_node): + to_type_node = sub_graph.call_method( + "to_dtype", args=(ops, _node, torch.float) + ) + to_type_node_args = to_type_node.args + _node.replace_all_uses_with(to_type_node) + to_type_node.args = to_type_node_args + metrics.cpp_to_dtype_count += 1 + elif is_lowp_fp_store(_node): + ops, name, _, value_var, _ = _node.args + # No need to promote to float if it is a user of a load which are all directly stored + if value_var.target == "load" and all( + user.target == "store" for user in value_var.users + ): + continue + dtype = V.graph.get_dtype(name) + with sub_graph.inserting_before(_node): + to_type_node = sub_graph.call_method( + "to_dtype", args=(ops, value_var, dtype) + ) + _node.replace_input_with(value_var, to_type_node) + metrics.cpp_to_dtype_count += 1 + elif _node.target == "reduction": + ( + ops, + dtype, + src_dtype, + reduction_type, + value, + ) = _node.args + if src_dtype in DTYPE_LOWP_FP: + # Since we always convert the load/store value to float if the tensor is bfloat16/float16. + # Therefore, the reduction should never work with bfloat16/float16 value. Hence, we update + # the bfloat16/float16 reduction by + # 1) updating the src_dtype to float + # and 2) updating the dtype to float if it is bfloat16/float16. + assert dtype in [ + torch.float, + torch.bfloat16, + torch.float16, + torch.int64, + ] + _node.args = ( + ops, + torch.float if dtype in DTYPE_LOWP_FP else dtype, + torch.float, + reduction_type, + value, + ) + elif _node.target == "to_dtype" and _node.args[-1] in DTYPE_LOWP_FP: + (ops, x, _) = _node.args + # The legalization always loads the BF16/FP16 tensor as FP32 for computation + # and converts back to BF16/FP16 after the computation. + # Hence, there should be no computation w/ BF16/FP16. + # Therefore, we update the to_dtype by replacing the bf16/fp16 dtype with fp32. + # Save the legalized to_dtype node for the elimination(eliminate_to_dtype step): + # 1) Eliminate the redundant to_dtype node if we have a pattern as follows: + # graph(): + # %lowp_fp_legalized = call_method[target=to_dtype](args = (%ops, %input, torch.float)) + # %to_dtype2 = call_method[target=to_dtype](args = (%ops, %lowp_fp_legalized, torch.bfloat16/float16)) + # Regarding the first to_dtype, it is redundant because + # the second to_type also converts to the torch.bfloat16/torch.float16. + # Hence, we remove the first to_type. + to_lowp_fp_legalized_nodes.append(_node) + _node.args = (ops, x, torch.float) + else: + pass + + def eliminate_to_dtype(sub_graph: torch.fx.Graph): + def _eliminate_duplicate_to_node(sub_graph: torch.fx.Graph): + # Eliminate the redundant to_dtype node. Let's consider a pattern as follows: + # graph(): + # %to_dtype1 = call_method[target=to_dtype](args = (%ops, %input, torch.float), kwargs = {}) + # %to_dtype2 = call_method[target=to_dtype](args = (%ops, %to_dtype1, torch.float), kwargs = {}) + # Regarding the first to_dtype, it is redundant because the second to_type also converts to the + # torch.float. Hence, we remove the first to_type + def _used_by_to(to_node: torch.fx.Node): + return all(usr.target == "to_dtype" for usr in to_node.users) + + all_to_nodes = [ + node for node in sub_graph.nodes if node.target == "to_dtype" + ] + all_to_nodes_and_users = [ + {node: node.users} for node in all_to_nodes if _used_by_to(node) + ] + for node_users in all_to_nodes_and_users: + for node, users in node_users.items(): + if node in sub_graph.nodes and ( + all(usr.args[-1] == node.args[-1] for usr in users) + or ( + node in to_lowp_fp_legalized_nodes + and all( + usr.args[-1] in DTYPE_LOWP_FP for usr in users + ) + ) + ): + val_node = node.all_input_nodes[-1] + node.replace_all_uses_with(val_node) + sub_graph.erase_node(node) + + # For debug mode, the graph of LoopBody will attach a new GraphModule as + # owning_module for debugging while the release mode will not. The lint will + # check whether the graph has owning_module to decide if it needs to check + # call_module. LoopBody might contain get_index as a module call. But it + # is just a function. Hence, it cannot pass the lint check for debug mode. + # We bypass the check if the owning_module is None. Eventually, we should call + # get_index via call_function but not call_module. + if sub_graph.owning_module is None: + sub_graph.lint() + + _eliminate_duplicate_to_node(sub_graph) + + eliminate_to_dtype(sub_graph) + + def _legalize_lowp_fp(loop_body: ir.LoopBody): + sub_blocks = [loop_body.root_block] + list(loop_body.subblocks.values()) + for sub_block in sub_blocks: + add_to_dtype(sub_block.graph) + + if all( + isinstance(_node, SchedulerNode) and self.is_lowp_fp_scheduler(_node) + for _node in nodes + ): + # Mark the load node to load bf16/fp16 + for _node in nodes: + sub_blocks = [_node._body.root_block] + list( + _node._body.subblocks.values() + ) + for sub_block in sub_blocks: + for fx_node in sub_block.graph.nodes: + if fx_node.target in ["load", "store"]: + assert fx_node.meta + assert OptimizationContext.key in fx_node.meta + opt_ctx: OptimizationContext = fx_node.meta[ + OptimizationContext.key + ] + assert opt_ctx.dtype in DTYPE_LOWP_FP + + # Bypass the legalization as the kernel can run with bf16/fp16 directly + return + + for _node in nodes: + assert isinstance(_node, SchedulerNode) + assert isinstance(_node._body, ir.LoopBody) + node: SchedulerNode = _node + + def is_memory_copy_scheduler_node(node: SchedulerNode): + op_counts = node.read_writes.op_counts + return ( + len(op_counts) == 2 and "load" in op_counts and "store" in op_counts + ) + + should_legalize = not is_memory_copy_scheduler_node(node) + if should_legalize: + body: ir.LoopBody = node._body + _legalize_lowp_fp(body) + + def codegen_nodes(self, nodes: List[SchedulerNode]): + # Legalize BF16 node by adding to_dtype explicitly + self.legalize_lowp_fp_dtype(nodes) + self.data_type_propagation(nodes) + + assert len(nodes) >= 1 + first_node = nodes[0] + vec_dtype = ( + first_node._lowp_fp_type # type: ignore[attr-defined] + if all( + hasattr(_node, "_lowp_fp_type") + and _node._lowp_fp_type == first_node._lowp_fp_type # type: ignore[attr-defined] + for _node in nodes + ) + else torch.float + ) + + kernel_group = self.kernel_group + _, (group, reduction_group) = max( + nodes, key=lambda x: int(x.is_reduction()) + ).group + + self.set_ranges(group, reduction_group) + + def codegen_kernel(cls, *args): + with kernel_group.new_kernel(cls, *args) as kernel: + # Ugly hack to maintain the metrics kernel count since + # we only count in CppKernelProxy, not those contained in it + metrics.generated_kernel_count -= 1 + + run(kernel) + return kernel + + def run(kernel): + vars, reduction_vars = kernel.set_ranges(group, reduction_group) + in_suffix = False + for node in nodes: + if node.group[1] in [ + (group, reduction_group), + (group + reduction_group, ()), + ]: + assert not in_suffix + node.run(vars, reduction_vars) + else: + in_suffix = True + assert node.group[1] == ( + group, + (), + ), f"unexpected group: {node.group[1]} != {group}, {reduction_group}" + # we can fuse in some extra pointwise into the suffix + with kernel.write_to_suffix(): + node.run(vars, ()) + + scalar_kernel = codegen_kernel(CppKernel) + V.graph.removed_buffers |= scalar_kernel.removed_buffers + V.graph.inplaced_to_remove |= scalar_kernel.inplaced_to_remove + self.loop_nest = LoopNestWithSplit.build(scalar_kernel) + + if not self.picked_vec_isa: + return + + def select_tiling_indices(tiling_factor): + all_index = [] + for node in nodes: + rw = dependencies.extract_read_writes(node._body, *node._sizes) + all_index += [dep.index for dep in itertools.chain(rw.reads, rw.writes)] + contig_vars = set() + contig_vars_list = [] + non_contig_stride_const = set() + non_contig_stride_other = set() + for index in all_index: + for var in index.free_symbols: + if not re.search(r"^d\d+$", var.name): + continue + stride = stride_at_vec_range(index, var, tiling_factor) + if stride == 0: + continue + elif stride == 1: + contig_vars.add(int(var.name[1:])) + contig_vars_list.append(int(var.name[1:])) + elif all(s.name.startswith("s") for s in stride.free_symbols): + non_contig_stride_const.add(int(var.name[1:])) + else: + non_contig_stride_other.add(int(var.name[1:])) + contig_only = ( + contig_vars - non_contig_stride_const - non_contig_stride_other + ) + if len(contig_vars) == 0: + # no contiguous vars + return [len(self.itervars) - 1] + if contig_only: + return sorted(contig_only)[-1:] + contig_and_const_stride = ( + contig_vars & non_contig_stride_const + ) - non_contig_stride_other + contig_vars_sorted = sorted(contig_vars) + if ( + len(contig_vars_sorted) == 2 + and contig_vars_sorted[-1] in contig_and_const_stride + and contig_vars_sorted[-1] == len(self.itervars) - 1 + ): + return contig_vars_sorted + return sorted(contig_vars_sorted, key=contig_vars_list.count)[-1:] + + def select_tiling(dtype: torch.dtype = torch.float): + # TODO(jgong5): support alternative tiling factors and data types + tiling_factor = self.picked_vec_isa.nelements(dtype=dtype) + tiling_indices = select_tiling_indices(tiling_factor) + if tiling_indices: + could_vec = True + for tiling_indice in tiling_indices: + with CppVecKernelChecker( + deepcopy(self.kernel_group.args), + parallel_num_threads(), + tiling_factor, + tiling_indice, + ) as vec_checker: + run(vec_checker) + could_vec = could_vec and vec_checker.simd_vec + if not could_vec: + break + if could_vec: + if len(tiling_indices) == 1: + return [tiling_factor], tiling_indices + if len(tiling_indices) == 2: + return [tiling_factor, tiling_factor], tiling_indices + return [], [] + + # Kernels share the same global contexts like V.graph.wrapper_code, V.kernel.args. + # But the generated scalar kernel has updated these global contexts. Hence, the other kernels + # should not do this again to avoid context conflict. By now, we only control the + # config.inplace_buffers. In the future, we could maintain more contexts. + with torch._inductor.config.patch(inplace_buffers=False): + tiling_factors, tiling_indices = select_tiling(vec_dtype) + assert len(tiling_factors) == len(tiling_indices) + try: + if len(tiling_indices) == 1: + vec_kernel = codegen_kernel( + CppVecKernel, tiling_factors[0], tiling_indices[0], vec_dtype + ) + metrics.generated_cpp_vec_kernel_count += 1 + main_loop, tail_loop = self.loop_nest.split_with_tiling( + tiling_indices[0], factor=tiling_factors[0] + ) + main_loop.set_kernel(vec_kernel) + tail_loop.set_kernel(scalar_kernel) + main_loop.simd_vec = True + tail_loop.simd_omp = True + # We chop the loop into two cubes by the nelements - main loop and tail loop. + # Regarding the main loop, it is straightforward that it could be vectorized with + # nelements. But for the tail loop, it still could be vectorized. For example, + # if the nelements is 8(256bits), then the tail loop still could be vectorized + # as 4(128bits). + tail_loop.simd_nelements = tiling_factors[0] // 2 + elif len(tiling_indices) == 2: + assert ( + tiling_indices[1] == len(self.itervars) - 1 + and tiling_factors[0] == tiling_factors[1] + ) + tile2d_kernel = codegen_kernel( + CppTile2DKernel, tiling_factors[0], tiling_indices, vec_dtype + ) + vec_kernel = codegen_kernel( + CppVecKernel, tiling_factors[0], tiling_indices[0], vec_dtype + ) + metrics.generated_cpp_vec_kernel_count += 2 + outer_main_loop, outer_tail_loop = self.loop_nest.split_with_tiling( + tiling_indices[0], factor=tiling_factors[0] + ) + outer_tail_loop.set_kernel(scalar_kernel) + ( + inner_main_loop, + inner_tail_loop, + ) = outer_main_loop.split_with_tiling( + tiling_indices[1] - tiling_indices[0], factor=tiling_factors[0] + ) + inner_main_loop.set_kernel(tile2d_kernel) + inner_tail_loop.set_kernel(vec_kernel) + except CppVecUnsupportedError as e: + if schedule_log.isEnabledFor(logging.DEBUG): + schedule_log.debug("Disabled vectorization: %s", e) + + def codegen_loops(self, code, worksharing): + self.codegen_loops_impl(self.loop_nest, code, worksharing) + + +class ReasonFusedNodes(Enum): + SAME_VARS_REDUCE = "same_vars_reduce" + COMPATIBLE_REDUCTION = "compatible_reduction" + COMPATIBLE_RANGES_NO_REDUCTION = "compatible_ranges_no_reduction" + + +class CppScheduling(BaseScheduling): + # ctypes limits the number of args to 1024, refer to: + # https://github.com/python/cpython/commit/a285af7e626d1b81cf09f8b2bf7656f100bc1237 + # We set a conservative threshold here. + MAX_FUSED_KERNEL_ARGS_NUM = 500 + + def __init__(self, scheduler): + self.scheduler = scheduler + self.get_kernel_group() + self._ready_to_flush = False + + def _set_flush_status(self, status: bool): + self._ready_to_flush = status + + def group_fn(self, sizes): + return tuple(tuple(map(V.graph.sizevars.simplify, s)) for s in sizes) + + def get_kernel_group(self): + from .cpp_wrapper_cpu import CppWrapperCpu + + self.kernel_group: Union[CppWrapperKernelGroup, KernelGroup] + if isinstance(V.graph.wrapper_code, CppWrapperCpu): + self.kernel_group = CppWrapperKernelGroup() + else: + self.kernel_group = KernelGroup() + + def fuse(self, node1, node2): + if node1.is_foreach() or node2.is_foreach(): + return ForeachKernelSchedulerNode.fuse(node1, node2) + else: + if ( + self._why_fuse_nodes(node1, node2) + == ReasonFusedNodes.COMPATIBLE_RANGES_NO_REDUCTION + ): + assert isinstance(node1, (SchedulerNode, FusedSchedulerNode)) + assert isinstance(node2, (SchedulerNode, FusedSchedulerNode)) + + _, (vars1, reduce1) = node1.group + _, (vars2, reduce2) = node2.group + assert reduce1 == () and reduce2 == (), (reduce1, reduce2) + + def get_indexing_ranges_exprs(node): + if isinstance(node, FusedSchedulerNode): + assert len(node.snodes) > 0 + return get_indexing_ranges_exprs(node.snodes[0]) + else: + assert isinstance(node, SchedulerNode) + comp_buffer = node.node + assert isinstance(comp_buffer, ir.ComputedBuffer) + _, body, _ = comp_buffer.get_default_sizes_body() + return body.var_ranges, list(body.indexing_exprs.values()) + + node_to_recomp = node1 if len(vars1) < len(vars2) else node2 + assert isinstance(node_to_recomp, SchedulerNode) + + ref_node = node2 if len(vars1) < len(vars2) else node1 + + extra_indexing_constraints = get_indexing_ranges_exprs(ref_node) + + node_to_recomp.recompute_size_and_body( + extra_indexing_constraints=extra_indexing_constraints + ) + + _, (vars1, _) = node1.group + _, (vars2, _) = node2.group + assert vars1 == vars2, (vars1, vars2) + + return FusedSchedulerNode.fuse(node1, node2) + + def _why_fuse_nodes(self, node1, node2) -> Optional[ReasonFusedNodes]: + _, (vars1, reduce1) = node1.group + _, (vars2, reduce2) = node2.group + + if vars1 == vars2 and reduce1 == reduce2: + return ReasonFusedNodes.SAME_VARS_REDUCE + if reduce1 == () and vars1 == vars2 + reduce2: + return ReasonFusedNodes.COMPATIBLE_REDUCTION + if self._can_fuse_nodes_with_compatible_ranges(node1, node2): + return ReasonFusedNodes.COMPATIBLE_RANGES_NO_REDUCTION + # TODO(jansel): allow fusion pointwise (vars1, ()) suffix? + return None + + def _can_fuse_nodes_with_compatible_ranges(self, node1, node2): + # Here we try to fuse SchedulerNode/FusedSchedulerNode with compatible ranges + # e.g. (s0, s1, s2) and (s0 * s1 * s2) + _, (vars1, reduce1) = node1.group + _, (vars2, reduce2) = node2.group + + c1 = reduce1 == () and reduce2 == () + c2 = math.prod(vars1) == math.prod(vars2) + c3 = len(vars1) == 1 or len(vars2) == 1 + if not (c1 and c2 and c3): + return False + + node_to_recomp = node1 if len(vars1) < len(vars2) else node2 + ref_node = node2 if len(vars1) < len(vars2) else node1 + + # We can not recompute sizes and body for nodes other than SchedulerNode + # TODO: we can extend fusion support with compatible ranges for FusedSchedulerNode + if isinstance(node_to_recomp, FusedSchedulerNode): + return False + + def get_buffer(node): + if isinstance(node, FusedSchedulerNode): + assert len(node.snodes) > 0 + # use the last scheduler node from the list as it has the most + # relevant indexing expressions + return get_buffer(node.snodes[-1]) + else: + assert isinstance(node, SchedulerNode) + return node.node + + ref_node_buffer = get_buffer(ref_node) + if isinstance(ref_node_buffer, ir.TemplateBuffer): + return False + + assert isinstance(ref_node_buffer, ir.ComputedBuffer) + + # It may happen that node1 and node2 compatible number of elements + # but different original ranges, for example: + # {d0: s0, d1: s1, d2: s2} vs {d0: s0*s1*s2} + # See https://github.com/pytorch/pytorch/pull/120077/files#r1500427848 for more details + # TODO: we can fix if it allows us to CSE at least one of the variables + var_ranges1 = ref_node_buffer.get_read_writes().var_ranges + var_ranges2 = node_to_recomp.node.get_read_writes().var_ranges + if var_ranges1 != var_ranges2: + return False + + return True + + def _can_fuse_horizontal_impl(self, node1, node2): + assert isinstance(node1, (FusedSchedulerNode, SchedulerNode)) + assert isinstance(node2, (FusedSchedulerNode, SchedulerNode)) + return self._why_fuse_nodes(node1, node2) is not None + + def can_fuse_horizontal(self, node1, node2): + if ( + len(node1.get_nodes()) + len(node2.get_nodes()) + > config.cpp.max_horizontal_fusion_size + ): + return False + + return self._can_fuse_horizontal_impl(node1, node2) + + def can_fuse_vertical(self, node1, node2): + return self._can_fuse_horizontal_impl(node1, node2) and not node1.is_reduction() + + def codegen_nodes(self, nodes: List[SchedulerNode]): + """ + Turn an set of pre-fused nodes into a C++ kernel. + """ + kernel_group = self.kernel_group + + cpp_kernel_proxy = CppKernelProxy(kernel_group) + cpp_kernel_proxy.codegen_nodes(nodes) + + kernel_group.finalize_kernel(cpp_kernel_proxy, nodes) + + args_num = self._get_scheduled_num_args() + if args_num > CppScheduling.MAX_FUSED_KERNEL_ARGS_NUM: + self._set_flush_status(True) + + def _get_scheduled_num_args(self): + return self.kernel_group.get_num_args() + + def ready_to_flush(self): + return self._ready_to_flush + + def codegen_sync(self): + pass + + def flush(self): + self.kernel_group.codegen_define_and_call(V.graph.wrapper_code) + self.get_kernel_group() + self._set_flush_status(False) + + +class KernelGroup: + def __init__(self): + super().__init__() + self.args = KernelArgs() + self.loops_code = BracesBuffer() + self.ws = WorkSharing(self.loops_code) + self.stack = contextlib.ExitStack() + self.stack.enter_context(self.ws) + self.scheduled_nodes = [] + + def new_kernel(self, cls, *args): + return cls(self.args, parallel_num_threads(), *args) + + def finalize_kernel(self, new_kernel, nodes): + self.scheduled_nodes += nodes + code = self.loops_code + ws = self.ws + new_kernel.codegen_loops(code, ws) + + def get_num_args(self): + arg_defs, call_args, arg_types = self.args.cpp_argdefs() + args_num = len(arg_defs) + return args_num + + def codegen_define_and_call(self, wrapper): + self.stack.close() + if not self.scheduled_nodes: + return + + fused_name = ( + get_fused_kernel_name(self.scheduled_nodes, config.cpp.descriptive_names) + if config.cpp.descriptive_names + else "" + ) + kernel_name = "_".join(["cpp", fused_name, wrapper.next_kernel_suffix()]) + arg_defs, call_args, arg_types = self.args.cpp_argdefs() + arg_defs = ",\n".ljust(25).join(arg_defs) + code = BracesBuffer() + # TODO: support kernel profile on other platforms + enable_kernel_profile = ( + config.cpp.enable_kernel_profile and sys.platform == "linux" + ) + if enable_kernel_profile: + code.writelines(["#include "]) + kernel_decl_name = kernel_name if V.graph.cpp_wrapper else "kernel" + code.writeline(codecache.cpp_prefix()) + + code.writeline(f'extern "C" void {kernel_decl_name}({arg_defs})') + with code.indent(): + if enable_kernel_profile: + graph_id = V.graph.graph_id + prefix = "graph_" + str(graph_id) + "_" if graph_id is not None else "" + code.writelines( + [ + f'RECORD_FUNCTION("{prefix + kernel_name}", c10::ArrayRef({{}}));' + ] + ) + for old, new in self.args.aliases(): + code.writeline(f"auto {old} = {new};") + code.splice(self.loops_code) + + codecache_def = IndentedBuffer() + if not V.graph.cpp_wrapper: + codecache_def.writeline(f"async_compile.cpp_pybinding({arg_types!r}, '''") + codecache_def.splice(code) + if not V.graph.cpp_wrapper: + codecache_def.writeline("''')") + + codecache_str = codecache_def.getvalue() + # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does + # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. + codecache_str = codecache_str.replace("#pragma CMT", "//") + wrapper.define_kernel(kernel_name, codecache_str, cuda=False) + # generate the code to call this + wrapper.generate_kernel_call( + kernel_name, call_args, cuda=False, arg_types=arg_types + ) + + +class CppWrapperKernelGroup(KernelGroup): + def __init__(self): + super().__init__() + self.args = CppWrapperKernelArgs() + + +class WorkSharing: + def __init__(self, code): + self.code = code + self.in_parallel = False + self.num_threads = None + self.stack = contextlib.ExitStack() + + def parallel(self, threads): + if self.in_parallel and threads != self.num_threads: + # wrong number of threads + self.close() + if not self.in_parallel: + self.num_threads = threads + self.in_parallel = True + if config.cpp.dynamic_threads: + self.code.writeline("#pragma omp parallel") + else: + self.code.writeline(f"#pragma omp parallel num_threads({threads})") + self.stack.enter_context(self.code.indent()) + + def single(self): + if self.in_parallel: + self.code.writeline("#pragma omp single") + return self.in_parallel + + def close(self): + self.stack.close() + self.in_parallel = False + + def __enter__(self): + self.stack.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stack.__exit__(exc_type, exc_val, exc_tb) + + +@dataclasses.dataclass +class LoopLevel: + var: Optional[sympy.Expr] = None + size: Optional[sympy.Expr] = None + offset: sympy.Expr = sympy.Integer(0) + steps: sympy.Expr = sympy.Integer(1) + parallel: int = 0 + simd_omp: bool = False + simd_vec: bool = False + collapsed: bool = False + reduction_var_map: Optional[Dict[str, str]] = None + parent: Optional["LoopLevel"] = None + # the next inner level of the loop, empty if it is inner-most + # contains >1 LoopLevel if the inner level of loop is split + inner: List["LoopLevel"] = dataclasses.field(default_factory=list) + # kernel assigned to this loop level, only valid when it is a leaf + kernel: Optional[CppKernel] = None + + def __post_init__(self): + # Regarding the C++/OpenMP backend, `codecache.pick_vec_isa()` to check + # vectorization ISA is a time-consuming and one-shot operation. It leads + # to taking a longer time to import `codegen.cpp` package because the + # `LoopLevel` of the package is decorated by `@dataclasses.dataclass` while + # the decorator will invoke `codecache.pick_vec_isa()` to initialize the + # `simd_nelements` of the `LoopLevel`. It might introduce additional compilation + # overhead to the Triton backend. Therefore, we moved the `simd_nelements` to + # `__post_init__` + picked_vec_isa: codecache.VecISA = codecache.pick_vec_isa() + self.simd_nelements: int = picked_vec_isa.nelements() if picked_vec_isa else 0 + + def get_kernels(self) -> List[CppKernel]: + """Get all kernel objects under this loop level""" + if self.kernel: + return [self.kernel] + kernels = [] + for loop in self.inner: + kernels += loop.get_kernels() + return kernels + + def set_kernel(self, kernel: CppKernel): + """ + Set the kernel under this loop level. No split is allowed under + this loop level. + """ + if not self.inner: + self.kernel = kernel + loop: Optional[LoopLevel] = self + assert loop is not None + if loop.is_reduction(): + loop.reduction_var_map = kernel.reduction_var_map.copy() + loop = loop.parent + while loop is not None and loop.is_reduction(): + assert loop.reduction_var_map is not None + loop.reduction_var_map.update(kernel.reduction_var_map) + loop = loop.parent + return + assert len(self.inner) == 1 + self.inner[0].set_kernel(kernel) + + def get_loops_at(self, depth) -> List["LoopLevel"]: + if depth == 0: + return [self] + else: + loops = [] + for loop in self.inner: + loops += loop.get_loops_at(depth - 1) + return loops + + def is_reduction(self): + return bool(self.reduction_var_map) + + def split_with_tiling(self, depth, factor): + def clone_inner(): + inner = [] + if self.inner: + for loop in self.inner: + inner.append(loop.clone()) + return inner + + def do_split_with_tiling(): + sympy_factor = sympy.Integer(factor) + + offset = FloorDiv(self.size, sympy_factor) * sympy_factor + main_loop = LoopLevel(self.var, offset) + main_loop.steps = sympy_factor + main_loop.parallel = self.parallel + main_loop.collapsed = False + main_loop.reduction_var_map = self.reduction_var_map + main_loop.inner = clone_inner() + if main_loop.inner: + for loop in main_loop.inner: + loop.parent = main_loop + + tail_loop = LoopLevel(self.var, self.size) + tail_loop.offset = offset + tail_loop.parallel = self.parallel + tail_loop.collapsed = False + tail_loop.reduction_var_map = self.reduction_var_map + tail_loop.inner = clone_inner() + if tail_loop.inner: + for loop in tail_loop.inner: + loop.parent = tail_loop + + return main_loop, tail_loop + + if depth == 0: + main_loop, tail_loop = do_split_with_tiling() + parent = self.parent + if parent: + parent.inner = [main_loop, tail_loop] + main_loop.parent = parent + tail_loop.parent = parent + return main_loop, tail_loop + else: + assert len(self.inner) == 1 + return self.inner[0].split_with_tiling(depth - 1, factor) + + def clone(self): + loop = copy(self) + loop.inner = [] + if self.inner: + for inner_loop in self.inner: + inner_loop_clone = inner_loop.clone() + inner_loop_clone.parent = loop + loop.inner.append(inner_loop_clone) + loop.kernel = deepcopy(self.kernel) + return loop + + def lines(self): + offset_expr = cexpr_index(self.offset) + size_expr = cexpr_index(self.size) + if config.cpp.no_redundant_loops and offset_expr == size_expr: + return None + if self.reduction_var_map: + reduction = " " + " ".join( + f"reduction({RTYPE_TO_CPP[rtype]}:{var})" + for var, rtype in self.reduction_var_map.items() + ) + else: + reduction = "" + simd = ( + f"simd simdlen({self.simd_nelements}) " + if self.simd_omp and self.simd_nelements > 1 + else "" + ) + if self.parallel: + # TODO(jansel): look into chunk size and other schedules + line1 = f"#pragma omp for{reduction} " + if self.parallel > 1: + line1 += f" collapse({self.parallel})" + if self.simd_omp: + line1 = line1.replace(" for ", f" for {simd}") + elif self.simd_vec: + line1 = "" + elif self.simd_omp: + line1 = f"#pragma omp {simd}{reduction}" + elif not self.reduction_var_map and codecache.is_gcc(): + line1 = "#pragma GCC ivdep" + else: + line1 = "" + offset_str = f"{INDEX_TYPE} {self.var}={offset_expr}" + size_str = f"{self.var}<{size_expr}" + steps_str = f"{self.var}+={cexpr_index(self.steps)}" + line2 = f"for({offset_str}; {size_str}; {steps_str})" + if self.collapsed or not line1: + return [line2] + return [line1, line2] + + +@dataclasses.dataclass +class LoopNestWithSplit: + """ + A loop-nest like structure but with some loop level split along + the loop range into the main tiling loop and the tail. It is built + with the `build` method as a loop nest and then split with + `split_with_tiling` at some depth. + + A typical case is for vectorization where we typically split at the inner-most + loop level. A more complicated case is 2D tiling where we split at + both inner-most and outer levels. + """ + + root: Optional[List[LoopLevel]] = None + kernel: Optional[CppKernel] = None + + @staticmethod + def build(kernel: CppKernel): + """Build a LoopNest with the given `kernel` as the leaf""" + itervars = kernel.itervars + ranges = kernel.ranges + reduction_depth = kernel.reduction_depth + assert reduction_depth is not None + + root: List[LoopLevel] = [] + levels: List[LoopLevel] = root + loop: Optional[LoopLevel] = None + for loop_idx, (var, size) in enumerate(zip(itervars, ranges)): + loop = LoopLevel(var, size, parent=loop) + if loop_idx >= reduction_depth: + loop.reduction_var_map = kernel.reduction_var_map.copy() + levels.append(loop) + levels = loop.inner + loop_nest = LoopNestWithSplit(root) + if loop: + loop.kernel = kernel + else: + loop_nest.kernel = kernel + return loop_nest + + def __bool__(self): + return bool(self.root) + + def get_loops_at(self, depth) -> List[LoopLevel]: + """Get all the loop levels at the given `depth` (most outer loop has depth 0)""" + loops: List[LoopLevel] = [] + assert self.root is not None + for loop in self.root: + loops += loop.get_loops_at(depth) + return loops + + @cache_on_self + def max_parallel_depth(self): + """ + Maximal allowed depth for parallelism: + 1) Levels without splitting and + 2) All reduction or non-reduction levels + When the loop is split at the top level, the max depth is 1. + """ + max_depth = 0 + assert self.root is not None + loops = self.root + if len(loops) > 1: + return 1 + is_reduction = loops[0].is_reduction() if loops else False + while len(loops) == 1 and loops[0].is_reduction() == is_reduction: + max_depth += 1 + loops = loops[0].inner + return max_depth + + def is_reduction_only(self): + """ + Whether all the loops are for reduction. Reduction loops + are always the inner most ones. + """ + return ( + self.root is not None and len(self.root) > 0 and self.root[0].is_reduction() + ) + + def mark_parallel(self, par_depth): + assert ( + par_depth <= self.max_parallel_depth() + ), "Parallel depth cannot exceed the maximal allowed parallel depth" + assert self.root is not None + loops = self.root + for loop in loops: + loop.parallel = par_depth + for i in range(1, par_depth): + loops = loops[0].inner + loops[0].collapsed = True + + def split_with_tiling(self, depth, factor): + """ + Split the loop into main and tail loops at given `depth` so that the range + of the main loop has range `floor_div(range, factor) * factor` and + the tail loop handles the remainder. The main loop is tiled + according to the `factor`. + """ + loops = self.get_loops_at(depth) + assert len(loops) == 1 + split_loops = loops[0].split_with_tiling(0, factor) + if depth == 0: + self.root = split_loops + return split_loops diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h new file mode 100644 index 0000000000000000000000000000000000000000..d7773672c06c23bcdfbbf669e3fa6d6d976059b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h @@ -0,0 +1,595 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR) +#define INDUCTOR_USE_VECTOR_TYPES() 1 +#else +#define INDUCTOR_USE_VECTOR_TYPES() 0 +#endif + +#if INDUCTOR_USE_VECTOR_TYPES() +#include +#include +#include +#endif + +typedef at::Half half; +typedef at::BFloat16 bfloat16; + +typedef at::Float8_e4m3fn float8_e4m3fn; +typedef at::Float8_e5m2 float8_e5m2; + +template +struct Welford { + T mean = T(0); + T m2 = T(0); + T weight = T(0); +}; + + +template +struct IsVecType: std::false_type {}; + +#if INDUCTOR_USE_VECTOR_TYPES() +template +struct IsVecType>: std::true_type {}; +#endif + +template +Welford welford_combine(const Welford &a, const Welford &b) { + if constexpr (!IsVecType::value) { + if (a.weight == 0) { + return b; + } + if (b.weight == 0) { + return a; + } + } + auto delta = b.mean - a.mean; + auto new_weight = a.weight + b.weight; + auto wb_over_w = b.weight / new_weight; + if constexpr (IsVecType::value) { + // Guard against division by zero + wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0)); + } + auto result = Welford{ + a.mean + delta * wb_over_w, + a.m2 + b.m2 + delta * delta * a.weight * wb_over_w, + new_weight + }; + return result; +} + +template +Welford welford_combine(const Welford &acc, T data) { + // Add a single data point + auto delta = data - acc.mean; + auto new_weight = acc.weight + T(1); + auto new_mean = acc.mean + delta / new_weight; + auto new_delta = data - new_mean; + auto result = Welford{ + new_mean, + acc.m2 + delta * new_delta, + new_weight + }; + return result; +} + +// Refer to https://github.com/pytorch/pytorch/blob/b5b36cf0c4e1958f1ff25120f5d4beeef3288187/ +// aten/src/ATen/native/SharedReduceOps.h#L419-L445 +template +inline bool greater_or_nan(scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) { + // If (a == b), then choose the one with lower idx, else max(a, b) + if (at::_isnan(a)) { + if (at::_isnan(b)) { + return idx_a < idx_b; + } + return true; + } + return (a == b) ? idx_a < idx_b : (a > b); +} + +template +inline bool less_or_nan(scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) { + // If (a == b), then choose the one with lower idx, else min(a, b) + if (at::_isnan(a)) { + if (at::_isnan(b)) { + return idx_a < idx_b; + } + return true; + } + return (a == b) ? idx_a < idx_b : (a < b); +} + +#if INDUCTOR_USE_VECTOR_TYPES() +template +inline at::vec::Vectorized vec_shuffle_down(at::vec::Vectorized x, size_t n) { + using Vec = at::vec::Vectorized; + alignas(alignof(Vec)) scalar_t array[Vec::size()]; + x.store(array); + for (size_t i = 0; i + n < Vec::size(); i += 2 * n) { + array[i] = array[i + n]; + } + return Vec::loadu(array); +} + +#ifdef CPU_CAPABILITY_AVX2 +inline at::vec::Vectorized vec_shuffle_down(at::vec::Vectorized x, size_t n) { + using vec_t = at::vec::Vectorized; +#define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w) + switch (n) { + case 1: + return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3))); + case 2: + return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2))); + case 4: + return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1))); + } + TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n); +} +#endif + +template +Welford welford_vec_reduce_all(Welford> acc) { + using Vec = at::vec::Vectorized; + for (size_t n = 1; n < Vec::size(); n *= 2) { + auto shuffled = Welford{ + vec_shuffle_down(acc.mean, n), + vec_shuffle_down(acc.m2, n), + vec_shuffle_down(acc.weight, n) + }; + acc = welford_combine(acc, shuffled); + } + + Welford result; + alignas(alignof(Vec)) scalar_t array[Vec::size()]; + acc.mean.store(array); + result.mean = array[0]; + + acc.m2.store(array); + result.m2 = array[0]; + + acc.weight.store(array); + result.weight = array[0]; + + return result; +} +#endif + + +template inline typename std::common_type::type mod(T a, U b) { return a % b; } +template <> inline float mod(float a, float b) { return std::fmod(a, b); } +template <> inline double mod(double a, double b) { return std::fmod(a, b); } + +template +inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) { + if (at::_isnan(a)) { + return a; + } + return a > b ? a : b; +} + +template +inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) { + if (at::_isnan(a)) { + return a; + } + return a < b ? a : b; +} + +constexpr float uint32_to_uniform_float(uint32_t value) { + // maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + constexpr float scale = 4.6566127342e-10; + return static_cast(value & 0x7FFFFFFF) * scale; +} + +float normalized_rand_cpu(uint32_t seed, uint32_t offset) { + return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)()); +} + +float randn_cpu(uint32_t seed, uint32_t offset) { + at::Philox4_32 engine(seed, 0, offset); + return engine.randn(10); +} + +int64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) { + auto gen = at::Philox4_32(seed, 0, offset); + uint64_t r0 = gen(); + uint64_t r1 = gen(); + uint64_t result = r0 | (r1 << 32); + return static_cast(result % (high - low)) + low; +} + +template struct AsIntegerType { typedef T type; }; +template <> struct AsIntegerType { typedef uint32_t type; }; +template <> struct AsIntegerType { typedef uint64_t type; }; +template <> struct AsIntegerType { typedef uint16_t type; }; + +template +typename std::enable_if::value, T>::type +inline fetch_value(volatile T *addr) { + return *addr; +} + +template +typename std::enable_if::value, T>::type +inline fetch_value(volatile T *addr) { + return T(addr->x, T::from_bits()); +} + +template +typename std::enable_if::value>::type +atomic_add(volatile T *addr, T offset) { + typedef typename AsIntegerType::type alt_type; + + static_assert(sizeof(std::atomic) == sizeof(T), + "std::atomic issue"); + + alt_type expected; + + alt_type desired; + + std::atomic *atomic_addr = (std::atomic *)addr; + do { + T val = fetch_value(addr); + reinterpret_cast(&expected)[0] = val; + reinterpret_cast(&desired)[0] = val + offset; + } while (!atomic_addr->compare_exchange_weak(expected, desired, + std::memory_order_relaxed)); +} + +// Since C++20 float is supported by fetch_add, but the performance may not +// better than compare_exchange_weak, which can be checked by microbenchmark +// inductor_cpu_atomic.py +template +typename std::enable_if::value>::type +atomic_add(volatile T *addr, T offset) { + static_assert(sizeof(std::atomic) == sizeof(T), + "std::atomic issue"); + std::atomic *atomic_addr = (std::atomic *)addr; + atomic_addr->fetch_add(offset, std::memory_order_relaxed); +} + +// This function is used to convert bool or uint8 to float mask for +// vectorization. The caller needs to make sure the src represents TRUE/FALSE +// correctly. +template +inline float flag_to_float_scalar(T src) { + float ret; + *(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0; + return ret; +} + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR) + +inline at::vec::Vectorized masked_load(const float* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + at::vec::Vectorized zero_vec(0); + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_loadu_ps(zero_vec, mmask, src); +# elif defined(CPU_CAPABILITY_AVX2) + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + return _mm256_maskload_ps(src, mmask); +# elif defined(CPU_CAPABILITY_ZVECTOR) + auto result = at::vec::Vectorized::loadu(src); + return (result & mask); +# else +# error Unsupported vectorization CPU capability +# endif +} + +template +typename std::enable_if::value || std::is_same::value, at::vec::Vectorized>::type +inline masked_load(const T* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + auto zero = _mm256_set1_epi16(0); + auto temp = _mm256_mask_loadu_epi16(zero, mmask, src); + return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1); +# elif defined(CPU_CAPABILITY_AVX2) + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + __at_align__ uint32_t mmask[8]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec); + __at_align__ uint16_t result[16]; + for (auto i = 0; i < 8; i++) { + result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0); + } + return at::vec::Vectorized::loadu(result); +# elif defined(CPU_CAPABILITY_ZVECTOR) + auto result = at::vec::Vectorized::loadu(src, 8); + uint32_t maskdata[8] = { 0 }; + uint16_t maskdata_dest[16] = { 0 }; + mask.store(maskdata); + for (auto i = 0; i < 8; i++) { + maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFFFF: 0; + } + auto maskvector = at::vec::Vectorized::loadu(maskdata_dest); + return (result & maskvector); +# else +# error Unsupported vectorization CPU capability +# endif +} + +template +typename std::enable_if::value || std::is_same::value, at::vec::Vectorized>::type +inline masked_load(const T* src, at::vec::Vectorized mask) { +# if defined(CPU_CAPABILITY_AVX512) + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ); + auto zero = _mm_set1_epi8(0); + auto temp = _mm_mask_loadu_epi8(zero, mmask, src); + return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0); +# elif defined(CPU_CAPABILITY_AVX2) + auto all_ones = _mm256_set1_epi32(0xFFFFFFFF); + auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones); + __at_align__ uint32_t mmask[8]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec); + __at_align__ T result[32]; + for (auto i = 0; i < 8; i++) { + result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: T(0); + } + return at::vec::Vectorized::loadu(result); +# elif defined(CPU_CAPABILITY_ZVECTOR) + auto result = at::vec::Vectorized::loadu(src, 8); + uint32_t maskdata[8]; + T maskdata_dest[32] = { 0 }; + mask.store(maskdata); + for (auto i = 0; i < 8; i++) { + maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFF: 0; + } + auto maskvector = at::vec::Vectorized::loadu(maskdata_dest); + return (result & maskvector); +# else +# error Unsupported vectorization CPU capability +# endif +} + +template +inline at::vec::Vectorized flag_to_float_vec(const T* src) { + __at_align__ float dst_tmp[at::vec::Vectorized::size()]; + #pragma unroll + for (int64_t i = 0; i < at::vec::Vectorized::size(); i++) { + dst_tmp[i] = flag_to_float_scalar(src[i]); + } + return at::vec::Vectorized::loadu(dst_tmp); +} + +template +inline at::vec::Vectorized cvt_lowp_fp_to_fp32( + at::vec::Vectorized src) { + at::vec::Vectorized res_vec1(0); + at::vec::Vectorized res_vec2(0); + std::tie(res_vec1, res_vec2) = at::vec::convert_to_float(src); + return res_vec1; +} + +template +inline at::vec::Vectorized cvt_fp32_to_lowp_fp( + at::vec::Vectorized src) { + return at::vec::convert_from_float(src, src); +} + +inline at::vec::Vectorized mask_convert_to_float(at::vec::Vectorized src) { + auto zeros = at::vec::Vectorized(0); + auto ones = at::vec::Vectorized(1); + return at::vec::Vectorized::blendv(zeros, ones, src); +} + +template +inline +typename std::enable_if::value || std::is_same::value, at::vec::Vectorized>::type +mask_convert_to_lowp(at::vec::Vectorized src) { + auto fp_vec = mask_convert_to_float(src); + return cvt_fp32_to_lowp_fp(fp_vec); +} + +template +inline at::vec::Vectorized vec_convert_to_mask(at::vec::Vectorized src) { + assert( + at::vec::Vectorized::size() == at::vec::Vectorized::size()); + at::vec::Vectorized res_vec(0); + __at_align__ float dst_tmp[at::vec::Vectorized::size()]; + __at_align__ SRC src_tmp[at::vec::Vectorized::size()]; + src.store(src_tmp); + +#pragma unroll + for (int i = 0; i < at::vec::Vectorized::size(); i++) { + *(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0; + } + + return res_vec.loadu(dst_tmp); +} + +template +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { + return vec_convert_to_mask(src); +} + +#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) +template <> +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { +#if defined(CPU_CAPABILITY_AVX2) + return at::vec::Vectorized(_mm256_castsi256_ps(src)); +#else + return at::vec::Vectorized(_mm512_castsi512_ps(src)); +#endif +} +#endif + +template <> +inline at::vec::Vectorized to_float_mask(at::vec::Vectorized src) { + return src; +} + +inline at::vec::Vectorized to_float_mask(int src) { + union { + float fmask; + uint32_t imask; + } mask; + mask.imask = src ? 0xFFFFFFFF : 0; + return at::vec::Vectorized(mask.fmask); +} + +inline bool all_zero(at::vec::Vectorized src) { +# if defined(CPU_CAPABILITY_AVX512) + auto src_int = _mm512_castps_si512(src); + __mmask16 mask = _mm512_test_epi32_mask(src_int, src_int); + return mask == 0; +# elif defined(CPU_CAPABILITY_AVX2) + return _mm256_testz_ps(src, src); +# else + __at_align__ int mask[at::vec::Vectorized::size()]; + src.store(mask); + for (int i = 0; i < at::vec::Vectorized::size(); i++) { + if (mask[i] != 0) { + return false; + } + } + return true; +# endif +} + +inline bool vector_lane_mask_check(at::vec::Vectorized src, int lane) { +# if defined(CPU_CAPABILITY_AVX512) + return _mm512_movepi32_mask(_mm512_castps_si512(src)) & (1 << lane); +# elif defined(CPU_CAPABILITY_AVX2) + return _mm256_movemask_ps(src) & (1 << lane); +# else + __at_align__ int mask[at::vec::Vectorized::size()]; + src.store(mask); + return mask[lane] != 0; +# endif +} + +inline at::vec::Vectorized cvt_int64_to_fp32(at::vec::VectorizedN src) { +# if defined(CPU_CAPABILITY_AVX512) + auto low = _mm512_cvtepi64_ps(src[0]); + auto high = _mm512_cvtepi64_ps(src[1]); + return _mm512_insertf32x8(_mm512_castps256_ps512(low), high, 1); +# elif defined(CPU_CAPABILITY_AVX2) + auto low_double = at::vec::convert_to_fp_of_same_size(src[0]); + auto low = _mm256_cvtpd_ps(low_double); + auto high_double = at::vec::convert_to_fp_of_same_size(src[1]); + auto high = _mm256_cvtpd_ps(high_double); + return _mm256_insertf128_ps(_mm256_castps128_ps256(low), high, 1); +# else + constexpr int float_vec_size = at::vec::Vectorized::size(); + constexpr int int64_vec_size = at::vec::Vectorized::size(); + __at_align__ float result[float_vec_size]; + __at_align__ int64_t src_buf[int64_vec_size]; + for (int i = 0; i < 2; i++) { + src[i].store(src_buf + i * int64_vec_size); + for (int j = 0; j < int64_vec_size; j++) { + result[i * int64_vec_size + j] = static_cast(src_buf[i * int64_vec_size + j]); + } + } + return at::vec::Vectorized::loadu(result); +# endif +} + +inline at::vec::VectorizedN cvt_fp32_to_int64(at::vec::Vectorized src) { + at::vec::VectorizedN result; +# if defined(CPU_CAPABILITY_AVX512) + result[0] = _mm512_cvt_roundps_epi64(_mm512_castps512_ps256(src), _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); + result[1] = _mm512_cvt_roundps_epi64(_mm512_extractf32x8_ps(src, 1), _MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC); +# elif defined(CPU_CAPABILITY_AVX2) + auto int32_vec = at::vec::convert_to_int_of_same_size(src); + result[0] = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(int32_vec)); + result[1] = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(int32_vec, 1)); +# else + constexpr int float_vec_size = at::vec::Vectorized::size(); + constexpr int int64_vec_size = at::vec::Vectorized::size(); + __at_align__ float src_buf[float_vec_size]; + __at_align__ int64_t result_buf[int64_vec_size]; + src.store(src_buf); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < int64_vec_size; j++) { + result_buf[j] = static_cast(src_buf[i * int64_vec_size + j]); + } + result[i] = at::vec::Vectorized::loadu(result_buf); + } +# endif + return result; +} + +inline at::vec::Vectorized cvt_int64_to_int32(at::vec::VectorizedN src) { +# if defined(CPU_CAPABILITY_AVX512) + auto low = _mm512_cvtepi64_epi32(src[0]); + auto high = _mm512_cvtepi64_epi32(src[1]); + return _mm512_inserti32x8(_mm512_castsi256_si512(low), high, 1); +# elif defined(CPU_CAPABILITY_AVX2) + auto low = _mm256_shuffle_epi32(src[0], _MM_SHUFFLE(2, 0, 2, 0)); + auto high = _mm256_shuffle_epi32(src[1], _MM_SHUFFLE(2, 0, 2, 0)); + auto low_perm = _mm256_permute4x64_epi64(low, _MM_SHUFFLE(3, 1, 2, 0)); + auto high_perm = _mm256_permute4x64_epi64(high, _MM_SHUFFLE(3, 1, 2, 0)); + return _mm256_blend_epi32(low_perm, high_perm, 0xF0); +# else + constexpr int int32_vec_size = at::vec::Vectorized::size(); + constexpr int int64_vec_size = at::vec::Vectorized::size(); + __at_align__ int32_t result[int32_vec_size]; + __at_align__ int64_t src_buf[int64_vec_size]; + for (int i = 0; i < 2; i++) { + src[i].store(src_buf + i * int64_vec_size); + for (int j = 0; j < int64_vec_size; j++) { + result[i * int64_vec_size + j] = static_cast(src_buf[i * int64_vec_size + j]); + } + } + return at::vec::Vectorized::loadu(result); +# endif +} + +inline at::vec::VectorizedN cvt_int32_to_int64(at::vec::Vectorized src) { + at::vec::VectorizedN result; +# if defined(CPU_CAPABILITY_AVX512) + result[0] = _mm512_cvtepi32_epi64(_mm512_castsi512_si256(src)); + result[1] = _mm512_cvtepi32_epi64(_mm512_extracti32x8_epi32(src, 1)); +# elif defined(CPU_CAPABILITY_AVX2) + result[0] = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(src)); + result[1] = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(src, 1)); +#else + constexpr int int32_vec_size = at::vec::Vectorized::size(); + constexpr int int64_vec_size = at::vec::Vectorized::size(); + __at_align__ int32_t src_buf[int32_vec_size]; + __at_align__ int64_t result_buf[int64_vec_size]; + src.store(src_buf); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < int64_vec_size; j++) { + result_buf[j] = static_cast(src_buf[i * int64_vec_size + j]); + } + result[i] = at::vec::Vectorized::loadu(result_buf); + } +# endif + return result; +} + +inline at::vec::VectorizedN mask_convert_to_int64(at::vec::Vectorized src) { + return cvt_fp32_to_int64(mask_convert_to_float(src)); +} + +inline at::vec::Vectorized to_float_mask(at::vec::VectorizedN src) { + return to_float_mask(cvt_int64_to_int32(src)); +} + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_wrapper_cpu.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_wrapper_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..7990af0c8668b83afe4c9b1762b15617f100d723 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_wrapper_cpu.py @@ -0,0 +1,1851 @@ +import functools +import os +import sys +from itertools import count +from typing import List, Optional, Tuple + +import sympy +from sympy import Expr + +import torch +import torch._ops +from .. import config, ir + +from ..codecache import CudaKernelParamCache +from ..utils import cache_on_self, sympy_product +from ..virtualized import V +from .common import IndentedBuffer +from .wrapper import EnterSubgraphLine, ExitSubgraphLine, pexpr, WrapperCodeGen + + +class CppWrapperCpu(WrapperCodeGen): + """ + Generates cpp wrapper for running on CPU and calls cpp kernels + """ + + def __init__(self): + if not hasattr(self, "device"): + self.device = "cpu" + super().__init__() + self.declare = "auto " + self.declare_maybe_reference = "decltype(auto) " + self.ending = ";" + self.open_bracket = "{" + self.closed_bracket = "}" + self.comment = "//" + self.namespace = "at::" + self.none_str = "nullptr" if config.abi_compatible else "at::Tensor()" + self.extern_call_ops = set() + self.size = "sizes()" + self.stride = "strides()" + self.cuda = False + self.supports_intermediate_hooks = False + self.outputs_need_copy = set() + self.kernel_callsite_id = count() + self.int_array_id = count() # for int array local variable declarations + self.declared_int_array_vars = set() + self.tmp_tensor_id = count() # for tmp tensor local variable declarations + self.arg_var_id = count() + self.used_cached_devices = set() + self.used_cached_dtypes = set() + self.cached_output_id = count() + self.scalar_to_tensor_id = count() + + from .cpp import cexpr, CppPrinter + + self.expr_printer = cexpr + + # CppPrinter sometimes calls at::native functions which causes problems in + # the ABI-compatible mode. Currently we are hitting this problem when codegen + # Grid computation expressions, but we my need to fix other size computation + # as well. + class GridExprCppPrinter(CppPrinter): + def _print_FloorDiv(self, expr): + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + assert expr.is_integer, "Expect integers in GridExprPrinter" + return f"({x}/{div})" + + self.grid_expr_printer = GridExprCppPrinter().doprint + + def generate_kernel_call( + self, + name, + call_args, + grid=None, + device_index=None, + cuda=True, + triton=True, + arg_types=None, + grid_fn: str = "grid", + triton_meta=None, + ): + """ + Generates kernel call code. + + cuda: Defines whether the backend is GPU. Otherwise the backend is CPU. + + triton: Defines whether the GPU backend uses Triton for codegen. + Otherwise it uses the CUDA language for codegen. + Only valid when cuda == True. + """ + if cuda: + return super().generate_kernel_call( + name, + call_args, + grid, + device_index, + cuda, + triton, + arg_types, + grid_fn, + ) + else: + if config.abi_compatible: + assert arg_types is not None and len(call_args) == len( + arg_types + ), "Mismatch call_args and arg_types in generate_kernel_call" + new_args = [] + for idx, arg in enumerate(call_args): + if "*" in arg_types[idx]: + var_name = f"var_{next(self.arg_var_id)}" + self.writeline( + f"auto* {var_name} = get_data_ptr_wrapper({arg});" + ) + new_args.append(f"({arg_types[idx]})({var_name})") + else: + # arg is a scalar + new_args.append(arg) + self.writeline(self.wrap_kernel_call(name, new_args)) + else: + self.writeline(self.wrap_kernel_call(name, call_args)) + + def write_constant(self, name, hashed): + # include a hash so our code cache gives different constants different files + self.header.writeline(f"// {name} {hashed}") + + def write_header(self): + if V.graph.is_const_graph: + # We do not write header for constant graph, it will be written by main module. + return + + if V.graph.aot_mode: + for header_cpp_file in ("interface.cpp", "implementation.cpp"): + with open( + os.path.join( + os.path.dirname(__file__), "aoti_runtime", header_cpp_file + ) + ) as f: + self.header.splice(f.read()) + else: + self.header.splice( + """ + import torch + from torch._inductor.codecache import CppWrapperCodeCache + + cpp_wrapper_src = ( + ''' + """ + ) + + if config.abi_compatible: + if config.c_shim_version == "1": + self.header.splice("#include ") + else: + self.header.splice( + f"#include " + ) + self.header.splice( + """ + #include + #include + #include + """ + ) + if V.graph.aot_mode: + self.header.splice( + """ + #include + """ + ) + else: + self.header.splice( + """ + #include + #include + #include + #include + #include + #include + #include + #include + + #define reinterpret_tensor torch::inductor::_reinterpret_tensor + #define alloc_from_pool torch::inductor::_alloc_from_pool + """ + ) + + self.header.splice("#include ") + + if not V.graph.aot_mode: + self.header.splice( + """ + #include + + using namespace torch::aot_inductor; + """ + ) + + from .memory_planning import ALIGN_BYTES + + # Round up to the nearest multiple of ALIGN_BYTES + # ALIGN_BYTES must be a power of 2 + self.header.splice( + f""" + [[maybe_unused]] static int64_t align(int64_t nbytes) {{ + return (nbytes + {ALIGN_BYTES} - 1) & -{ALIGN_BYTES}; + }} + """ + ) + + def mark_output_type(self): + # mark output type to unwrap tensor back to python scalar + from ..ir import ShapeAsConstantBuffer + + output_is_tensor = dict() + for idx, x in enumerate(V.graph.graph_outputs): + if isinstance(x, ShapeAsConstantBuffer): + output_is_tensor[idx] = False + else: + output_is_tensor[idx] = True + + self.output_is_tensor = output_is_tensor + + def write_prefix(self): + if V.graph.is_const_graph: + # We do not write prefix for constant graph, it will be written by main module. + return + + if V.graph.aot_mode: + self.prefix.writeline("namespace torch {") + self.prefix.writeline("namespace aot_inductor {") + + def write_input_output_info( + self, + info_kind: str, + idx: int, + name: str, + ): + self.prefix.writeline(f"""{info_kind}[{idx}].name = "{name}";""") + + @staticmethod + def get_input_cpp_type(input): + assert config.use_minimal_arrayref_interface + from .cpp import DTYPE_TO_CPP + + if isinstance(input, sympy.Expr): + from ..graph import may_get_constant_buffer_dtype + + dtype = may_get_constant_buffer_dtype(input) + assert dtype is not None, f"Failed to get the dtype of sympy.Expr: {input}" + return DTYPE_TO_CPP[dtype] + return f"ArrayRefTensor<{DTYPE_TO_CPP[input.get_dtype()]}>" + + def write_wrapper_decl(self): + inputs_len = len(V.graph.graph_inputs.keys()) + if V.graph.aot_mode: + if config.use_minimal_arrayref_interface and not V.graph.is_const_graph: + from .cpp import DTYPE_TO_CPP + + input_cpp_types = ", ".join( + f"{CppWrapperCpu.get_input_cpp_type(x)}" + for x in V.graph.graph_inputs.values() + ) + + output_arrayref_types = ", ".join( + f"ArrayRefTensor<{DTYPE_TO_CPP[x.get_dtype()]}>" + for x in V.graph.graph_outputs + ) + + self.prefix.splice( + f""" + using AOTInductorModelInputs = std::tuple<{input_cpp_types}>; + using AOTInductorModelOutputs = std::tuple<{output_arrayref_types}>; + """ + ) + + if V.graph.const_module: + self.header.splice(V.graph.const_module.wrapper_code.header) + self.prefix.splice(V.graph.const_code) + + if V.graph.is_const_graph: + self.prefix.splice( + """ + void AOTInductorModel::_const_run_impl( + std::vector& output_handles, + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor + ) { + """ + ) + else: + if not config.aot_inductor.use_runtime_constant_folding: + # If we do not split the constant graph, we'll just create + # an empty implementation when wrapping the main module. + self.prefix.splice( + """ + void AOTInductorModel::_const_run_impl( + std::vector& output_handles, + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor + ) {} + + """ + ) + + run_impl_proto = """ + void AOTInductorModel::run_impl( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor + ) { + """ + if config.use_minimal_arrayref_interface: + self.prefix.splice( + """ + template <> + AOTInductorModelOutputs AOTInductorModel::run_impl_minimal_arrayref_interface< + AOTInductorModelInputs, AOTInductorModelOutputs>( + const AOTInductorModelInputs& inputs, + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor + ) { + """ + ) + self.suffix.splice(run_impl_proto) + self.suffix.splice( + """ + AOTInductorModelInputs inputs; + convert_handles_to_inputs(input_handles, inputs); + auto outputs = run_impl_minimal_arrayref_interface( + inputs, stream, proxy_executor); + // NOTE: outputs is full of ArrayRef to thread_local storage. If in the future we need this + // interface to perform well for a DSO using the minimal arrayref interface, all we need + // to do is provide ThreadLocalCachedTensor for each one! + convert_outputs_to_handles(outputs, output_handles); + } + """ + ) + + self.suffix.splice( + """ + extern "C" AOTIRuntimeError AOTInductorModelRunMinimalArrayrefInterface( + AOTInductorModelHandle model_handle, + const AOTInductorModelInputs& inputs, + AOTInductorModelOutputs& outputs) { + auto model = reinterpret_cast(model_handle); + CONVERT_EXCEPTION_TO_ERROR_CODE({ + outputs = model->run_impl_minimal_arrayref_interface( + inputs, + (torch::aot_inductor::DeviceStreamType)nullptr, + nullptr); + }) + } + """ + ) + else: + self.prefix.splice(run_impl_proto) + else: + self.prefix.splice( + """ + void inductor_entry_impl( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed) + ) { + """ + ) + with self.prefix.indent(): + # assign inputs and outputs in both cases so the later codegen can be simplified + if not config.use_minimal_arrayref_interface: + if not V.graph.is_const_graph: + if V.graph.aot_mode: + num_args = len(V.graph.graph_inputs) + else: + # Weights are promoted in the JIT mode + num_args = len(V.graph.graph_inputs) + len(V.graph.constants) + self.prefix.splice( + """ + pybind11::gil_scoped_release release; + """ + ) + + if config.abi_compatible: + self.prefix.splice( + f""" + auto inputs = steal_from_raw_handles_to_raii_handles(input_handles, {num_args}); + """ + ) + else: + # This looks dumb, but can avoid creating two versions of code in the AOTInductor runtime. + self.prefix.splice( + f""" + auto inputs = alloc_tensors_by_stealing_from_handles(input_handles, {num_args}); + """ + ) + + if inputs_len != 0: + for idx, input_key in enumerate(V.graph.graph_inputs.keys()): + if config.use_minimal_arrayref_interface: + self.prefix.writeline( + f"auto {input_key} = std::get<{idx}>(inputs);" + ) + continue + # unwrap input tensor back to scalar + if isinstance(V.graph.graph_inputs[input_key], sympy.Expr): + from ..graph import may_get_constant_buffer_dtype + from .cpp import DTYPE_TO_CPP + + dtype = may_get_constant_buffer_dtype( + V.graph.graph_inputs[input_key] + ) + assert ( + dtype is not None + ), "Fails to get the dtype of the sympy.Expr" + cpp_dtype = DTYPE_TO_CPP[dtype] + if config.abi_compatible: + self.prefix.writeline(f"{cpp_dtype} {input_key};") + dtype_str = str(dtype).split(".")[-1] + self.prefix.writeline( + f"aoti_torch_item_{dtype_str}(inputs[{idx}], &{input_key});" + ) + else: + self.prefix.writeline( + f"{cpp_dtype} {input_key} = inputs[{idx}].item<{cpp_dtype}>();" + ) + else: + self.prefix.writeline( + f"auto {input_key} = std::move(inputs[{idx}]);" + ) + + assert all( + isinstance(v, torch.Tensor) for v in list(V.graph.constants.values()) + ), "Expect all constants to be Tensor" + for idx, constants_key in enumerate(V.graph.constants.keys()): + if V.graph.aot_mode: + # Weights are stored in constants_ and owned by RAIIAtenTensorHandle there. + # Don't call std::move here because it will cause constants_ to lose the ownership. + if config.abi_compatible: + self.prefix.writeline( + f"""auto {constants_key} = constants_->at({idx});""" + ) + else: + self.prefix.writeline( + f"auto {constants_key} = *tensor_handle_to_tensor_pointer(" + + f"""constants_->at({idx}));""" + ) + else: + # Append constants as inputs to the graph + constants_idx = inputs_len + idx + self.prefix.writeline( + f"auto {constants_key} = inputs[{constants_idx}];" + ) + + self.codegen_inputs(self.prefix, V.graph.graph_inputs) + + if V.graph.aot_mode: + if not V.graph.is_const_graph: + if config.use_minimal_arrayref_interface: + # TODO: input shape checking for regular tensor interface as well? + self.codegen_input_numel_asserts() + else: + self.prefix.writeline("inputs.clear();") + self.prefix.writeline( + "auto& kernels = static_cast(*this->kernels_.get());" + ) + + def codegen_input_numel_asserts(self): + for name, buf in V.graph.graph_inputs.items(): + if isinstance(buf, sympy.Expr): + continue + + # comparing strides for 0 size tensor is tricky. Ignore them for now. + if sympy_product(buf.get_size()) == 0: + continue + numel = buf.get_numel() + self.prefix.writeline(f"assert_numel({name}, {numel});") + + def codegen_input_size_var_decl(self, code: IndentedBuffer, name): + if config.abi_compatible: + code.writeline(f"int64_t* {name}_size;") + code.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_sizes({name}, &{name}_size));" + ) + else: + super().codegen_input_size_var_decl(code, name) + + def codegen_input_stride_var_decl(self, code: IndentedBuffer, name): + if config.abi_compatible: + code.writeline(f"int64_t* {name}_stride;") + code.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_strides({name}, &{name}_stride));" + ) + else: + super().codegen_input_stride_var_decl(code, name) + + def codegen_model_kernels(self): + self.prefix.writeline("namespace {") + self.prefix.writeline( + "class AOTInductorModelKernels : public AOTInductorModelKernelsBase {" + ) + self.prefix.writeline(" public:") + declare_kernel = set(self.src_to_kernel.values()) + declare_kernel.update( + entry[0] for entry in self.user_defined_kernel_cache.values() + ) + if V.graph.const_module: + declare_kernel.update( + V.graph.const_module.wrapper_code.src_to_kernel.values() + ) + for kernel in declare_kernel: + self.prefix.writeline(f" CUfunction {kernel}{{nullptr}};") + self.prefix.writeline("};") + self.prefix.writeline("} // namespace") + + def codegen_model_constructor(self): + """ + // Generated code example + AOTInductorModel::AOTInductorModel() + : AOTInductorModelBase(4, 1) { + inputs_info_[0].name = "input0"; + inputs_info_[0].dtype = "torch.float16"; + ... + constants_info_[0].name = "L__self___weight"; + constants_info_[0].dtype = at::kFloat; + constants_info_[0].offset = 0; + constants_info_[0].data_size = 8192; + constants_info_[0].shape = {64, 32}; + constants_info_[0].stride = {32, 1}; + ... + outputs_info_[0].name = "output0"; + outputs_info_[0].dtype = "torch.float16"; + } + """ + + num_inputs = len(V.graph.graph_inputs) + num_outputs = len(V.graph.graph_outputs) + num_constants = len(V.graph.constants) + self.prefix.splice( + f""" + AOTInductorModel::AOTInductorModel(std::shared_ptr constants_map, + std::shared_ptr> constants_array, + const std::string& device_str, + std::optional cubin_dir) + : AOTInductorModelBase({num_inputs}, {num_outputs}, {num_constants}, device_str, cubin_dir) {{ + """ + ) + + with self.prefix.indent(): + for idx, (name, inp) in enumerate(V.graph.graph_inputs.items()): + assert not isinstance( + inp, sympy.Expr + ), f"input {name=} cannot be symbolic" + self.write_input_output_info("inputs_info_", idx, name) + + for idx, (name, tensor) in enumerate(V.graph.constants.items()): + assert isinstance(tensor, torch.Tensor) + self.prefix.writeline(f"""constants_info_[{idx}].name = "{name}";""") + self.prefix.writeline( + f"constants_info_[{idx}].dtype = static_cast({self.codegen_dtype(tensor.dtype)});" + ) + self.prefix.writeline( + f"constants_info_[{idx}].offset = {tensor.storage_offset()};" + ) + self.prefix.writeline( + f"constants_info_[{idx}].data_size = {tensor.untyped_storage().nbytes()};" + ) + from_folded = "true" if name in V.graph.folded_constants else "false" + self.prefix.writeline( + f"constants_info_[{idx}].from_folded = {from_folded};" + ) + + size_str = ", ".join([str(s) for s in tensor.size()]) + self.prefix.writeline(f"constants_info_[{idx}].shape = {{{size_str}}};") + + stride_str = ", ".join([str(s) for s in tensor.stride()]) + self.prefix.writeline( + f"constants_info_[{idx}].stride = {{{stride_str}}};" + ) + if name in V.graph.dynamo_flat_name_to_original_fqn: + original_fqn = V.graph.dynamo_flat_name_to_original_fqn.get( + name, name + ) + elif name in V.graph.allocated_constant_name: + original_fqn = V.graph.allocated_constant_name[name] + else: + raise AssertionError("original_fqn must be set for constant") + self.prefix.writeline( + f"""constants_info_[{idx}].original_fqn = "{original_fqn}";""" + ) + self.prefix.writeline("update_constants_map(std::move(constants_map));") + self.prefix.writeline("update_constants_array(std::move(constants_array));") + + def escape_string(x): + return ( + x.replace("\\", "\\\\") + .replace('"', '\\"') + .replace("\n", "\\n") + .replace("\t", "\\t") + ) + + self.prefix.writeline( + f'in_spec_ = "{escape_string(config.aot_inductor.serialized_in_spec)}";' + ) + self.prefix.writeline( + f'out_spec_ = "{escape_string(config.aot_inductor.serialized_out_spec)}";' + ) + + for idx, output in enumerate(V.graph.graph_outputs): + assert not isinstance( + output, sympy.Expr + ), f"output {name=} cannot be symbolic" + name = f"output{idx}" + self.write_input_output_info("outputs_info_", idx, name) + + self.prefix.writeline( + "this->kernels_ = std::make_unique();" + ) + + self.prefix.writeline("}") + + def codegen_const_run_driver(self): + """ + // Generated code example + std::unordered_map AOTInductorModel::const_run_impl( + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor, + bool initialization + ) { + std::unordered_map folded_constants_map; + std::vector output_handles; + // build up output_handles over here. + _const_run_impl(output_handles, stream, proxy_executor); + // build up folded_constants_map + return folded_constants_map; + } + """ + + self.prefix.splice( + """ + std::unordered_map AOTInductorModel::const_run_impl( + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor, + bool initialization + ) { + """ + ) + if not config.aot_inductor.use_runtime_constant_folding: + self.prefix.splice( + """ + if (!initialization) { + std::cerr << "[WARNING] Calling constant_folding in model, but compiled with config: " + << "aot_inductor.use_runtime_constant_folding=False\\n"; + } + return {}; + } + """ + ) + return + + with self.prefix.indent(): + # This is a mapping to the index of constant folding graph's output + const_index_mapping: List[Optional[Tuple[int, str]]] = [None] * len( + V.graph.const_output_index + ) + for idx, (name, _) in enumerate(V.graph.constants.items()): + if name in V.graph.const_output_index: + const_index_mapping[V.graph.const_output_index[name]] = (idx, name) # type: ignore[call-overload] + assert ( + None not in const_index_mapping + ), "Not all constant gets mapped for constant folding graph." + + self.prefix.writeline( + f""" + std::unordered_map folded_constants_map; + folded_constants_map.reserve({len(const_index_mapping)}); + std::vector output_handles({len(const_index_mapping)}); + """ + ) + + self.prefix.splice( + """ + // The below assignment of output_handles to constants is not used directly. + // It's only used to memo the correspondence of handle and constants. + """ + ) + + for output_idx, (const_idx, _) in enumerate(const_index_mapping): # type: ignore[misc] + self.prefix.writeline( + f"output_handles[{output_idx}] = constants_->at({const_idx});" + ) + + self.prefix.writeline( + "_const_run_impl(output_handles, stream, proxy_executor);" + ) + + for output_idx, (_, const_name) in enumerate(const_index_mapping): # type: ignore[misc] + self.prefix.writeline( + f'folded_constants_map["{const_name}"] = output_handles[{output_idx}];' + ) + self.prefix.writeline("return folded_constants_map;") + + self.prefix.writeline("}") + + def generate(self, is_inference): + if V.graph.aot_mode and not V.graph.is_const_graph: + self.codegen_model_kernels() + self.codegen_model_constructor() + self.codegen_const_run_driver() + self.write_wrapper_decl() + return super().generate(is_inference) + + def finalize_prefix(self): + cached_dtypes_buffer = IndentedBuffer() + if config.abi_compatible: + for dtype in self.used_cached_dtypes: + cached_dtypes_buffer.writeline(f"CACHE_TORCH_DTYPE({dtype});") + for device in self.used_cached_devices: + cached_dtypes_buffer.writeline(f"CACHE_TORCH_DEVICE({device});") + cached_dtypes_buffer.splice(self.prefix) + self.prefix = cached_dtypes_buffer + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=False + ): + self.header.splice(f"\n{kernel}\n") + + def codegen_scalar_to_tensor(self, output: str): + name = f"scalar_to_tensor_{next(self.scalar_to_tensor_id)}" + self.wrapper_call.writeline( + f"RAIIAtenTensorHandle {name} = scalar_to_tensor_handle({output});" + ) + return name + + @cache_on_self + def get_output_refs(self): + return [ + f"torch::tensor({x.codegen_reference(self.wrapper_call)})" + if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible + else x.codegen_reference(self.wrapper_call) + for x in V.graph.graph_outputs + ] + + def generate_return(self, output_refs): + cst_names = V.graph.constants.keys() + arr_iface = ( + not V.graph.is_const_graph and config.use_minimal_arrayref_interface + ) # For brevity. + + def use_thread_local_cached_output_tensor(idx, output): + cached_output_name = f"cached_output_{next(self.cached_output_id)}" + cache_type = "Array" if arr_iface else "Tensor" + self.wrapper_call.writeline( + f"thread_local ThreadLocalCachedOutput{cache_type}> " + f"{cached_output_name}({output});" + ) + if arr_iface: + self.wrapper_call.writeline( + f"{cached_output_name}.copy_data_from({output});" + ) + output_entry = f"std::get<{idx}>(output_arrayref_tensors)" + element_type = f"std::decay_t" + self.wrapper_call.writeline( + f"{output_entry} = {cached_output_name}.arrayref_tensor<{element_type}>();" + ) + else: + self.wrapper_call.writeline( + f"{cached_output_name}.copy_data_from({output});" + ) + self.wrapper_call.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_new_uninitialized_tensor(&output_handles[{idx}]));" + ) + self.wrapper_call.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_assign_tensors({cached_output_name}.tensor(), " + f"output_handles[{idx}]));" + ) + + if arr_iface: + self.wrapper_call.writeline( + "AOTInductorModelOutputs output_arrayref_tensors;" + ) + for idx, output in enumerate(output_refs): + if config.abi_compatible: + output_buffer = V.graph.graph_outputs[idx] + if isinstance(output_buffer, ir.ShapeAsConstantBuffer): + # Need to wrap scalar into tensor as the main function returns a vector of tensors + output_tensor = self.codegen_scalar_to_tensor(output) + self.wrapper_call.writeline( + f"output_handles[{idx}] = {output_tensor}.release();" + ) + continue + + output_is_tensor_handle_expr = ( + f"std::is_same_v," + "RAIIAtenTensorHandle> || " + f"std::is_same_v," + "AtenTensorHandle> || " + f"std::is_same_v," + "ConstantHandle>" + ) + self.wrapper_call.writeline( + f"if constexpr ({output_is_tensor_handle_expr}) {{" + ) + with self.wrapper_call.indent(): + if arr_iface: + cached_output_name = ( + f"cached_output_{next(self.cached_output_id)}" + ) + output_value_type = f"std::decay_t(output_arrayref_tensors).data()[0])>" + self.wrapper_call.writeline( + f"thread_local RAIIAtenTensorHandle {cached_output_name};" + ) + if output in cst_names: + # NOTE(return_constant): In some rare cases where we return + # a constant, we have to return a copy of this constant, + # because (1) constants are not owned by the Model instance + # (2) constants remain the same cross inference runs, + # assuming they are not updated at runtime Basically, we + # cannot release or transfer the ownership of any original + # constant to the user. + self.wrapper_call.writeline( + f"AtenTensorHandle {cached_output_name}_tmp;" + ) + self.wrapper_call.writeline( + f"aoti_torch_clone({output}, &{cached_output_name}_tmp);" + ) + self.wrapper_call.writeline( + f"{cached_output_name} = {cached_output_name}_tmp;" + ) + else: + self.wrapper_call.writeline( + f"{cached_output_name} = {output}.release();" + ) + self.wrapper_call.writeline( + f"convert_handle_to_arrayref_tensor({cached_output_name}, " + f"std::get<{idx}>(output_arrayref_tensors));" + ) + else: + if output in cst_names: + # See NOTE(return_constant) above. + self.wrapper_call.writeline( + f"aoti_torch_clone({output}, &output_handles[{idx}]);" + ) + else: + self.wrapper_call.writeline( + f"output_handles[{idx}] = {output}.release();" + ) + self.wrapper_call.writeline("} else {") + with self.wrapper_call.indent(): + use_thread_local_cached_output_tensor(idx, output) + self.wrapper_call.writeline("}") + + else: + assert ( + not arr_iface + ), "minimal ArrayRef interface is only supported in ABI-compatible mode" + if output in cst_names: + output_expr = f"{output}.clone()" + # See NOTE(return_constant) above. + else: + output_expr = output + self.wrapper_call.writeline( + f"output_handles[{idx}] = reinterpret_cast(" + + f"new at::Tensor({output_expr}));" + ) + if arr_iface: + self.wrapper_call.writeline("return output_arrayref_tensors;") + + def generate_before_suffix(self, result): + if not V.graph.is_const_graph: + if V.graph.aot_mode: + result.writeline("} // AOTInductorModel::run_impl") + else: + result.writeline("} // inductor_entry_impl") + + def generate_end(self, result): + if V.graph.aot_mode: + if V.graph.is_const_graph: + result.writeline("} // AOTInductorModel::_const_run_impl") + else: + result.writeline("} // namespace aot_inductor") + result.writeline("} // namespace torch") + return + + result.writeline("'''\n)") + result.splice( + f""" + inductor_entry = CppWrapperCodeCache.load_pybinding( + ["std::vector"], cpp_wrapper_src, {self.cuda}, {len(V.graph.graph_outputs)}) + """ + ) + + # unwrap output tensor back to python scalar + if all(x for x in self.output_is_tensor.values()): + # If no ShapeAsConstantBuffer in the output, directly return the output as tensors + return_str = "return f(args_tensor)" + else: + outputs = [ + f"outputs[{i}]" if self.output_is_tensor[i] else f"outputs[{i}].item()" + for i in range(len(V.graph.graph_outputs)) + ] + outputs_str = f"[{', '.join(outputs)}]" + return_str = f""" + outputs = f(args_tensor) + return {outputs_str} + """ + + args_str = "args_tensor = [arg if isinstance(arg, torch.Tensor) else torch.tensor(arg) for arg in args]" + if V.graph.constants: + # Append constants to the input args for cpp wrapper. + # Python wrapper directly gets the value inside the wrapper call + # as a global variable passed when calling exec(code, mod.__dict__, mod.__dict__). + # For cpp wrapper, we need to pass this python value to the inductor_entry_impl function explicitly. + assert all( + isinstance(v, torch.Tensor) for v in list(V.graph.constants.values()) + ), "Expect all constants to be Tensor" + constants_str = f"[{', '.join(V.graph.constants.keys())}]" + args_str += f""" + constants_tensor = {constants_str} + args_tensor.extend(constants_tensor) + """ + + # Wrap the func to support setting result._boxed_call = True + result.splice( + f""" + def _wrap_func(f): + def g(args): + {args_str} + {return_str} + return g + call = _wrap_func(inductor_entry) + """ + ) + + def generate_c_shim_extern_kernel_call(self, kernel, args): + # In the abi_compatible mode, we call fallback aten ops through a C shim layer + self.allow_stack_allocation = False + kernel_tokens = kernel.split("::") + kernel_suffix = kernel_tokens[-1] + if kernel_suffix == "call": + kernel_suffix = kernel_tokens[-2] + if config.c_shim_version == "1": + shim_fn = f"aoti_torch_{kernel_suffix}" + else: + shim_fn = f"aoti_torch_{self.device}_{kernel_suffix}" + + # HACK: val_to_arg_str jams multiple arguments together using a comma. If that + # ever breaks, it needs to be reworked to be able to return multiple arguments, + # and the split-on-comma code here needs to be removed. + wrapped_args = [] + for x in args: + pieces = x.split(", ") + for piece in pieces: + # We only really *need* convert_arrayref_tensor_to_tensor for + # ArrayRefTensors. The code flowing into here uses `0` for nullptr, + # which convert_arrayref_tensor_to_tensor would blindly coerce to int, + # so just avoid wrapping integers. + if not piece.isdigit(): + piece = f"convert_arrayref_tensor_to_tensor({piece})" + wrapped_args.append(piece) + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK({shim_fn}({', '.join(wrapped_args)}));" + ) + + def generate_c_shim_extern_kernel_alloc(self, extern_kernel, args): + # registered output buffer name + name = extern_kernel.name + output_handle_name = f"{name}_handle" + self.writeline(f"AtenTensorHandle {output_handle_name};") + output_arg = f"&{output_handle_name}" + self.generate_c_shim_extern_kernel_call( + extern_kernel.get_kernel_name(), args + [output_arg] + ) + self.writeline(f"RAIIAtenTensorHandle {name}({output_handle_name});") + + def generate_extern_kernel_alloc(self, extern_kernel, args): + if config.abi_compatible: + self.generate_c_shim_extern_kernel_alloc(extern_kernel, args) + else: + super().generate_extern_kernel_alloc(extern_kernel, args) + + def generate_c_shim_fallback_kernel(self, fallback_kernel, args): + output_args = [] + output_raii_handles = [] + output_name_base = fallback_kernel.get_name() + for idx, output in enumerate(fallback_kernel.outputs): + if isinstance(output, ir.MultiOutput): + name = f"{output.get_name()}" + output_handle_name = f"{name}_handle" + if output.indices: + assert ( + output.indices[0][1] == idx + ), f"expected {output.indices[0][1]=} == {idx=} for {output_name_base=}" + self.writeline(f"AtenTensorHandle {output_handle_name};") + output_args.append(f"&{output_handle_name}") + output_raii_handles.append( + f"RAIIAtenTensorHandle {name}({output_handle_name});" + ) + elif isinstance(output, int): + output_name = f"{output_name_base}_{idx}" + self.writeline(f"int64_t {output_name} = {output};") + output_args.append(f"&{output_name}") + elif output is None: + output_args.append("nullptr") + else: + raise NotImplementedError("unsupported type of {output=}") + args = args + output_args + assert ( + fallback_kernel.abi_compatible_kernel is not None + ), f"abi_compatible_kernel is None for {fallback_kernel.python_kernel_name=}" + self.generate_c_shim_extern_kernel_call( + fallback_kernel.abi_compatible_kernel, args + ) + for raii_handle in output_raii_handles: + self.writeline(raii_handle) + + def generate_fallback_kernel(self, fallback_kernel, args): + if config.abi_compatible: + self.generate_c_shim_fallback_kernel(fallback_kernel, args) + else: + super().generate_fallback_kernel(fallback_kernel, args) + + def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel): + if output_view: + output_as_strided = f"{output_view.codegen_reference()}" + output_name = f"{output_view.get_name()}_as_strided" + self.writeline(f"auto {output_name} = {output_as_strided};") + + args.insert(0, output_name) + else: + args.insert(0, f"{codegen_reference}") + + if config.abi_compatible: + self.generate_c_shim_extern_kernel_call(kernel, args) + else: + self.writeline(self.wrap_kernel_call(kernel, args)) + + def generate_user_defined_triton_kernel( + self, kernel_name, grid, configs, args, triton_meta + ): + assert len(grid) != 0 + if len(grid) == 1: + grid_decision = grid[0] + else: + meta = CudaKernelParamCache.get(kernel_name) + assert meta is not None + grid_decision = None + for i, c in enumerate(configs): + if all(arg == meta["meta"][key] for key, arg in c.kwargs.items()): + grid_decision = grid[i] + break + assert grid_decision is not None + + self.generate_kernel_call( + kernel_name, + args, + grid=grid_decision, + device_index=V.graph.scheduler.current_device.index, + cuda=True, + triton=True, + triton_meta=triton_meta, + ) + + def generate_scatter_fallback( + self, output, inputs, kernel, python_kernel_name, src_is_tensor, reduce, kwargs + ): + # TODO: support other overload for cpp wrapper and remove the below assertions + if config.abi_compatible: + # call the ABI shim function instead of the ATen one + kernel = kernel.replace("at::", "aoti_torch_") + line = f"{kernel}({output}, {','.join(map(str, inputs))}" + if python_kernel_name == "aten.scatter_": + if src_is_tensor: + if reduce: + line += f", {V.graph.wrapper_code.val_to_arg_str(reduce)}" + else: + assert ( + reduce is None + ), "Expect reduce to be None for aten.scatter_ with scalar src" + else: + line += f", {','.join(kwargs)}" + line += f"){self.ending}" + self.writeline(line) + + def generate_index_put_fallback(self, kernel, x, indices, values, accumulate): + if V.graph.aot_mode and V.graph.cpp_wrapper and config.abi_compatible: + # See the comment in codegen_reinterpret_view about why having something like + # RAIIAtenTensorHandle(tmp_tensor_handle_2) in a tmp array can cause the correponding + # tensor prematurely deallocated, thus this std::vector().data() trick here. + indices_str = ( + f"std::vector{{{', '.join(indices)}}}.data()" + ) + args = [x, indices_str, str(len(indices)), values, accumulate] + else: + indices_str = ( + f"{self.open_bracket}{', '.join(indices)}{self.closed_bracket}" + ) + args = [x, indices_str, values, accumulate] + + args.insert(0, x) # set x as the output tensor, this fallback mutates x. + self.writeline(self.wrap_kernel_call(kernel, args)) + + def add_benchmark_harness(self, output): + if V.graph.aot_mode: + return + super().add_benchmark_harness(output) + + def codegen_sizevar(self, x: Expr) -> str: + return self.expr_printer(V.graph.sizevars.simplify(x)) + + def codegen_tuple_access(self, basename: str, name: str, index: str) -> str: + if config.abi_compatible: + # in the abi_compatible mode, outputs are returned via arguments + return name + else: + return f"std::get<{index}>({basename})" + + def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + parts = list(map(self.codegen_sizevar, shape)) + if len(parts) == 0: + return "{}" + if len(parts) == 1: + return f"{{{parts[0]}, }}" + return f"{{{', '.join(parts)}}}" + + def codegen_dynamic_scalar(self, node): + from .cpp import DTYPE_TO_ATEN, DTYPE_TO_CPP + + (data,) = (t.codegen_reference() for t in node.inputs) + if config.abi_compatible: + dtype = node.inputs[0].get_dtype() + dtype_str = str(dtype).split(".")[-1] + self.writeline(f"{DTYPE_TO_CPP[dtype]} {node.sym};") + self.writeline(f"aoti_torch_item_{dtype_str}({data}, &{node.sym});") + # record in unbacked_symbol_decls so we won't generate a declaration of the symbol again + self.unbacked_symbol_decls.add(str(node.sym)) + else: + if node.is_bool: + self.writeline(f"bool {node.sym} = {data}.item() ? 1 : 0;") + else: + convert_type = DTYPE_TO_ATEN[node.inputs[0].get_dtype()].replace( + "at::k", "to" + ) + self.writeline(f"auto {node.sym} = {data}.item().{convert_type}();") + + def can_stack_allocate_buffer(self, buffer): + return ( + self.allow_stack_allocation + and buffer.get_device().type == "cpu" + and self.can_prove_buffer_has_static_shape(buffer) + and ir.is_contiguous_strides_for_shape( + buffer.get_stride(), buffer.get_size() + ) + ) + + def make_buffer_free(self, buffer): + return ( + "" + if isinstance(buffer.get_layout(), ir.MultiOutputLayout) + or (V.graph.aot_mode and buffer.get_name() in self.stack_allocated_buffers) + or ( + config.use_minimal_arrayref_interface + and V.graph.aot_mode + and buffer.get_name() in V.graph.graph_inputs + ) + else f"{buffer.get_name()}.reset();" + ) + + def make_free_by_names(self, names_to_del: List[str]): + return " ".join(f"{name}.reset();" for name in names_to_del) + + def codegen_exact_buffer_reuse(self, old_name: str, new_name: str, del_line: str): + if config.abi_compatible: + return f"auto {new_name} = std::move({old_name}); // reuse" + else: + return super().codegen_exact_buffer_reuse(old_name, new_name, del_line) + + def generate_profiler_mark_wrapper_call(self, stack): + self.wrapper_call.writeline( + 'RECORD_FUNCTION("inductor_wrapper_call", c10::ArrayRef());' + ) + + def write_triton_header_once(self): + pass + + def generate_start_graph(self): + pass + + def generate_end_graph(self): + pass + + def generate_inf_and_nan_checker(self, nodes): + for buf in nodes.get_names(): + # TODO: Add buf name directly into check_inf_and_nan. + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_check_inf_and_nan({buf}));" + ) + + def codegen_device(self, device): + if config.abi_compatible: + self.used_cached_devices.add(device.type) + return f"cached_torch_device_type_{device.type},{device.index if device.index else 0}" + else: + from .cpp import DEVICE_TO_ATEN + + return ( + f"c10::Device({DEVICE_TO_ATEN[device.type]}, {device.index})" + if device.index is not None + else f"{DEVICE_TO_ATEN[device.type]}" + ) + + def codegen_dtype(self, dtype): + if config.abi_compatible: + dtype_str = str(dtype).split(".")[-1] + self.used_cached_dtypes.add(dtype_str) + return f"cached_torch_dtype_{dtype_str}" + else: + from .cpp import DTYPE_TO_ATEN + + return DTYPE_TO_ATEN[dtype] + + @functools.lru_cache(None) + def codegen_int_array_var( + self, + int_array: str, + writer=None, + known_statically=False, + graph=None, # for per-graph caching + ): + # Because the memory planning is done in two passes (see the implementation + # of self.generate), the writeline behavior is different in the two passes. + # As a result, the emitted int array declarations may appear in a later + # position of the generated code, so the second pass codegen should not + # reuse int array declarations generated in the first pass + if writer is None: + # The first pass codegen uses `self` as the writer + writer = self + + var = f"int_array_{next(self.int_array_id)}" + if var not in self.declared_int_array_vars: + self.declared_int_array_vars.add(var) + if known_statically: + writer.writeline(f"static constexpr int64_t {var}[] = {int_array};") + else: + writer.writeline(f"int64_t {var}[] = {int_array};") + return var + + def make_buffer_allocation(self, buffer): + return self.make_allocation( + buffer.get_name(), + buffer.get_device(), + buffer.get_dtype(), + buffer.get_size(), + buffer.get_stride(), + buffer if self.can_stack_allocate_buffer(buffer) else None, + ) + + def make_allocation( + self, name, device, dtype, shape, stride, buffer_if_can_stack_allocate=None + ): + orig_stride = stride + device_str = self.codegen_device(device) + dtype_code = self.codegen_dtype(dtype) + size = self.codegen_shape_tuple(shape) + stride = self.codegen_shape_tuple(orig_stride) + if config.abi_compatible: + size_array_var = self.codegen_int_array_var( + size, + self.wrapper_call, + known_statically=self.is_statically_known_list_of_ints(shape), + graph=self.get_codegened_graph(), + ) + stride_array_var = self.codegen_int_array_var( + stride, + self.wrapper_call, + known_statically=self.is_statically_known_list_of_ints(orig_stride), + graph=self.get_codegened_graph(), + ) + device_type, device_id = device_str.split(",") + device_idx = "this->device_idx_" if V.graph.aot_mode else device_id + if buffer_if_can_stack_allocate is not None: + from .cpp import DTYPE_TO_CPP + + self.stack_allocated_buffers[name] = buffer_if_can_stack_allocate + cpp_type = DTYPE_TO_CPP[dtype] + numel = buffer_if_can_stack_allocate.get_numel() + # Note: we don't zero storage because empty_strided doesn't zero either. + self.wrapper_call.writeline(f"{cpp_type} {name}_storage[{numel}];") + args = [ + f"{name}_storage", + size_array_var, + stride_array_var, + device_type, + device_idx, + ] + return f"ArrayRefTensor<{cpp_type}> {name}({', '.join(args)});" + + args = [ + str(len(shape)), + size_array_var, + stride_array_var, + dtype_code, + device_type, + device_idx, + f"&{name}_handle", + ] + + self.wrapper_call.writeline(f"AtenTensorHandle {name}_handle;") + self.wrapper_call.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_empty_strided({', '.join(args)}));" + ) + + return f"RAIIAtenTensorHandle {name}({name}_handle);" + + if V.graph.aot_mode and device_str.startswith("c10::Device("): + tensor_device = f"{device_str.split(',')[0]}, this->device_idx_)" + else: + tensor_device = device_str + + if device.type == "cpu": + return f"at::Tensor {name} = at::detail::empty_strided_cpu({size}, {stride}, {dtype_code});" + if device.type == "cuda": + return ( + f"at::Tensor {name} = at::detail::empty_strided_cuda(" + f"{size}, {stride}, {dtype_code}, c10::DeviceType::CUDA);" + ) + return ( + f"{self.declare}{name} = {self.namespace}empty_strided(" + f"{size}, {stride}, at::TensorOptions({tensor_device}).dtype({dtype_code})){self.ending}" + ) + + def codegen_alloc_from_pool(self, name, offset, dtype, shape, stride) -> str: + if config.abi_compatible: + size = self.codegen_shape_tuple(shape) + stride = self.codegen_shape_tuple(stride) + tmp_name = f"tmp_tensor_handle_{next(self.tmp_tensor_id)}" + args = [ + name, + pexpr(offset), # bytes not numel + self.codegen_dtype(dtype), + str(len(shape)), + self.codegen_int_array_var( + size, self.wrapper_call, graph=self.get_codegened_graph() + ), + self.codegen_int_array_var( + stride, self.wrapper_call, graph=self.get_codegened_graph() + ), + f"&{tmp_name}", + ] + self.wrapper_call.writeline(f"AtenTensorHandle {tmp_name};") + self.wrapper_call.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__alloc_from_pool({', '.join(args)}));" + ) + return f"RAIIAtenTensorHandle({tmp_name})" + + return "alloc_from_pool({})".format( + ", ".join( + [ + name, + pexpr(offset), # bytes not numel + self.codegen_dtype(dtype), + self.codegen_shape_tuple(shape), + self.codegen_shape_tuple(stride), + ] + ) + ) + + def codegen_reinterpret_view( + self, data, size_list, stride_list, offset, writer + ) -> str: + dim = str(len(size_list)) + size = self.codegen_shape_tuple(size_list) + stride = self.codegen_shape_tuple(stride_list) + offset = self.codegen_sizevar(offset) + + if config.abi_compatible: + tmp_name = f"tmp_tensor_handle_{next(self.tmp_tensor_id)}" + # Because the memory planning is done in two passes (see the implementation + # of self.generate), the writeline behavior is different in the two passes. + if writer is None: + writer = self + + args = [ + f"{data.get_name()}", + dim, + self.codegen_int_array_var( + size, + writer, + known_statically=self.is_statically_known_list_of_ints(size_list), + graph=self.get_codegened_graph(), + ), + self.codegen_int_array_var( + stride, + writer, + known_statically=self.is_statically_known_list_of_ints(stride_list), + graph=self.get_codegened_graph(), + ), + offset, + ] + + def gen_reinterpret_call(writer, args): + writer.writeline( + f"auto {tmp_name} = reinterpret_tensor_wrapper({', '.join(args)});" + ) + + if ( + self.can_stack_allocate_buffer(data) + and self.is_statically_known_list_of_ints(size_list) + and self.is_statically_known_list_of_ints(stride_list) + and ir.is_contiguous_strides_for_shape(stride_list, size_list) + ): + gen_reinterpret_call(writer, args) + return tmp_name + + gen_reinterpret_call(writer, args) + + # NB, the return handle here represents a temporary tensor, which will be automatically + # released. + # Here's a sample usage in the cpp wrapper code: + # ``` + # aoti_torch_addmm_out( + # buf1, + # arg1_1, + # RAIIAtenTensorHandle(tmp_tensor_handle_0), + # buf0, + # 1L, + # 1L)); + # ``` + # RAIIAtenTensorHandle(tmp_tensor_handle_0) will be released after the call to addmm_out. + # This could be problematic when it's used in a different pattern, for example: + # ```` + # AtenTensorHandle tensor_args[] = {RAIIAtenTensorHandle(tmp_tensor_handle_2), buf5, buf6}; + # aoti_torch_proxy_executor_call_function(..., tensor_args); + # ```` + # RAIIAtenTensorHandle(tmp_tensor_handle_2) will be invalid when it's used in the latter + # kernel call. + # + # This is solved by updating the proxy_executor invocation to + # ``` + # aoti_torch_proxy_executor_call_function(..., + # std::vector{ + # RAIIAtenTensorHandle(tmp_tensor_handle_2), buf5, buf6 + # }.data() + # ); + # ``` + return f"wrap_with_raii_handle_if_needed({tmp_name})" + else: + args = [data.get_name(), size, stride, offset] + return f"reinterpret_tensor({', '.join(args)})" + + def codegen_device_copy(self, src, dst): + if config.abi_compatible: + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_tensor_copy_(expensive_copy_to_tensor_if_needed({src}), {dst}));" + ) + else: + self.writeline(f"{dst}.copy_({src});") + + def codegen_multi_output(self, name, value): + # in the abi_compatible mode, outputs are retrieved by passing + # output pointers, so we skip its codegen here. + if not config.abi_compatible: + super().codegen_multi_output(name, value) + + def codegen_subgraph_prefix(self, subgraph, outer_inputs, outer_outputs): + for inner_input, outer_input in zip(subgraph.graph.graph_inputs, outer_inputs): + if config.abi_compatible: + # in ABI-compatible mode, we copy the underlying at::Tensor of the conditional + # input (outer_input) into another at::Tensor to be used as a subgraph input + # (inner_input) in the nested scope. we can't std::move here, as the codegened + # outer input may be an expression / rvalue (e.g., reinterpret_view(x)), so we + # can't necessarily std::move it back to the origin (x). + self.writeline(f"AtenTensorHandle {inner_input}_handle;") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_assign_tensors_out({outer_input}, &{inner_input}_handle));" + ) + self.writeline( + f"RAIIAtenTensorHandle {inner_input}({inner_input}_handle);" + ) + else: + self.writeline( + f"{self.declare}{inner_input} = {outer_input}{self.ending}" + ) + + def codegen_subgraph_suffix(self, subgraph, outer_inputs, outer_outputs): + for inner_output, outer_output in zip( + subgraph.graph.graph_outputs, outer_outputs + ): + src = inner_output.codegen_reference() + if config.abi_compatible: + # in ABI-compatible mode, we need to std::move subgraph output (inner_output) + # to the conditional output (outer_output), as RAIIAtenTensorHandle's copy + # constructor is deleted. + src = f"std::move({src})" + self.writeline(f"{outer_output} = {src}{self.ending}") + + def codegen_conditional(self, conditional): + name = conditional.get_name() + outer_inputs = [f"{buf.codegen_reference()}" for buf in conditional.operands] + if config.abi_compatible: + outer_outputs = [] + for out in conditional.outputs: + # in ABI-compatible mode, ir.MultiOutput is not codegened, + # hence pre-declare output variables directly and separately + self.writeline(f"RAIIAtenTensorHandle {out.get_name()};") + outer_outputs.append(out.get_name()) + predicate = f"{conditional.predicate.get_name()}_scalar" + self.writeline(f"bool {predicate};") + # in ABI-compatible mode, we need to use the ABI shim function + # to extract a C++ bool from the unrelying scalar bool Tensor + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_item_bool({conditional.predicate.codegen_reference()}, &{predicate}));" + ) + else: + # in non-ABI-compatible mode, we can codegen the conditional outputs + # as array of at::Tensor instances, as the ir.MultiOutput is codegened + outer_outputs = [f"{name}[{i}]" for i in range(len(conditional.outputs))] + self.writeline(f"at::Tensor {name}[{len(conditional.outputs)}];") + predicate = f"{conditional.predicate.codegen_reference()}.item()" + + self.writeline(f"if ({predicate}) {{") + self.writeline(EnterSubgraphLine(self, conditional.true_subgraph.graph)) + self.codegen_subgraph(conditional.true_subgraph, outer_inputs, outer_outputs) + self.writeline(ExitSubgraphLine(self)) + self.writeline("} else {") + self.writeline(EnterSubgraphLine(self, conditional.false_subgraph.graph)) + self.codegen_subgraph(conditional.false_subgraph, outer_inputs, outer_outputs) + self.writeline(ExitSubgraphLine(self)) + self.writeline("}") + + def generate_extern_kernel_args_decl_if_needed( + self, op_overload, raw_args, output_args + ): + arg_types = [x.real_type for x in op_overload._schema.arguments] + return_types = [x.type for x in op_overload._schema.returns] + + new_tensor_args = [] + new_int_args = [] + + def fill_args(arg, arg_type): + static_arg_types = ( + torch.FloatType, + torch.BoolType, + torch.StringType, + torch.Type, + torch.DeviceObjType, + ) + inductor_tensor_buffers = ( + ir.Buffer, + ir.ReinterpretView, + ) + + if isinstance(arg_type, torch.TensorType): + assert isinstance(arg, inductor_tensor_buffers), f"got {type(arg)}" + new_tensor_args.append(f"{arg.codegen_reference()}") + elif isinstance(arg_type, torch.IntType): + # int + new_int_args.append(str(arg)) + elif isinstance(arg_type, torch.SymIntType): + # SymInt + expr = arg.node.expr if isinstance(arg, torch.SymInt) else arg + new_int_args.append(self.expr_printer(expr)) + elif isinstance(arg_type, torch.NumberType): + # Scalar of type int + assert isinstance(arg, (int, float, bool)) + # Only treat int Scalar as dynamic + if isinstance(arg, int): + new_int_args.append(str(arg)) + elif isinstance(arg_type, torch.ListType): + assert isinstance(arg, (list, tuple)) + + # List[Tensor] + if isinstance(arg_type.getElementType(), torch.TensorType): + new_tensor_args.extend([f"{a.codegen_reference()}" for a in arg]) + # List[Optional[Tensor]] + elif isinstance( + arg_type.getElementType(), torch.OptionalType + ) and isinstance( + arg_type.getElementType().getElementType(), torch.TensorType + ): + new_tensor_args.extend( + [f"{a.codegen_reference()}" for a in arg if a is not None] + ) + # List[int] + elif isinstance(arg_type.getElementType(), torch.IntType): + new_int_args.extend([str(a) for a in arg]) + # List[SymInt] + elif isinstance(arg_type.getElementType(), torch.SymIntType): + expressions = [ + a.node.expr if isinstance(a, torch.SymInt) else a for a in arg + ] + new_int_args.extend( + [self.expr_printer(expr) for expr in expressions] + ) + # List[Scalar] + elif isinstance(arg_type.getElementType(), torch.NumberType): + # Only treat int Scalar as dynamic + is_int_type = [isinstance(a, int) for a in arg] + if any(is_int_type): + assert all( + is_int_type + ), "AOTInductor only supports int scalars of the same type" + new_int_args.extend([str(a) for a in arg]) + else: + assert isinstance( + arg_type.getElementType(), static_arg_types # type: ignore[arg-type] + ), f"Fall through arguments must be one of static_arg_types, got {type(arg_type)}" + else: + assert isinstance( + arg_type, static_arg_types # type: ignore[arg-type] + ), f"Fall through arguments must be one of static_arg_types, got {type(arg_type)}" + + for arg, arg_type in zip(raw_args, arg_types): + if arg is not None: + if isinstance(arg_type, torch.OptionalType): + fill_args(arg, arg_type.getElementType()) + else: + fill_args(arg, arg_type) + + def fill_output_arg(arg, return_type): + if isinstance(return_type, torch.TensorType): + self.writeline(f"AtenTensorHandle {arg}_handle; // output buffer") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_new_uninitialized_tensor(&{arg}_handle));" + ) + self.writeline(f"RAIIAtenTensorHandle {arg}({arg}_handle);") + new_tensor_args.append(f"{arg}") + elif isinstance(return_type, torch.SymIntType): + raise NotImplementedError("NYI support for return type: SymInt") + elif isinstance(return_type, torch.ListType) and isinstance( + return_type.getElementType(), torch.SymIntType + ): + raise NotImplementedError("NYI support for return type: List[SymInt]") + else: + raise AssertionError(f"Unsupported return type found: {return_type}") + + # TODO: Only support tensor(s) returns for now, SymInt is not implemented yet + for return_type in return_types: + if isinstance(return_type, (torch.TensorType)): + pass + elif isinstance(return_type, torch.OptionalType): + assert isinstance(return_type.getElementType(), torch.TensorType) + elif isinstance(return_type, torch.ListType): + assert isinstance(return_type.getElementType(), torch.TensorType) + else: + raise NotImplementedError( + f"return type {return_type} is not yet supported." + ) + + for output_arg in output_args: + assert output_arg is not None, "Optional return types are not yet supported" + if isinstance(output_arg, (list, tuple)): + for out in output_arg: + fill_output_arg(out, torch.TensorType.get()) + else: + fill_output_arg(output_arg, torch.TensorType.get()) + + return new_tensor_args, new_int_args + + def generate_extern_kernel_alloc_and_find_schema_if_needed( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + op_overload=None, + raw_args=None, + outputs=None, + ): + if config.is_fbcode(): + assert op_overload is not None + assert raw_args is not None + assert outputs is not None + + return self.generate_extern_kernel_alloc_and_find_schema_if_needed_fbcode( + name, + cpp_kernel_key, + op_overload, + raw_args, + outputs, + ) + else: + return self.generate_extern_kernel_alloc_and_find_schema_if_needed_oss( + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name, + ) + + def generate_extern_kernel_alloc_and_find_schema_if_needed_oss( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + ): + if cpp_kernel_key not in self.extern_call_ops: + self.writeline( + f"static auto op_{cpp_kernel_key} = c10::Dispatcher::singleton()" + ) + self.writeline( + f'\t.findSchemaOrThrow("{kernel}", "{cpp_kernel_overload_name}")' + ) + self.writeline(f"\t.typed<{cpp_op_schema}>();") + self.extern_call_ops.add(cpp_kernel_key) + + self.writeline( + f"auto {name} = op_{cpp_kernel_key}.call({', '.join(codegen_args)});" + ) + + def generate_extern_kernel_alloc_and_find_schema_if_needed_fbcode( + self, + name, + cpp_kernel_key, + op_overload, + raw_args, # contains both args and flatten kwargs + outputs, + ): + def extract_output_name(out): + assert out is not None, "None, i.e. optional output is not supported" + if isinstance(out, ir.MultiOutput): + return out.get_name() + elif isinstance(out, (list, tuple)): + return type(out)(extract_output_name(o) for o in out) + else: + raise AssertionError(f"Unexpected output: {type(out)}") + + # output_args has the same pytree structure as outputs + output_args = extract_output_name(outputs) + if isinstance(output_args, str): + output_args = [output_args] + + ( + tensor_call_args, + int_call_args, + ) = self.generate_extern_kernel_args_decl_if_needed( + op_overload, raw_args, output_args + ) + + tensor_call_args_str = ", ".join(tensor_call_args) + int_call_args_str = ", ".join(int_call_args) + + extern_kernel_node_index = len(V.graph.extern_kernel_nodes) - 1 + + self.writeline( + f"aoti_torch_proxy_executor_call_function(proxy_executor, " + f"{extern_kernel_node_index}, " + f"{len(int_call_args)}, " + f"std::vector{{{int_call_args_str}}}.data(), " + f"{len(tensor_call_args)}, " + f"std::vector{{{tensor_call_args_str}}}.data());" + ) + + self.extern_call_ops.add(cpp_kernel_key) + + def generate_reset_kernel_saved_flags(self): + pass + + def generate_save_uncompiled_kernels(self): + pass + + def val_to_cpp_arg_str(self, type_, val, is_legacy_abi) -> str: + if ( + config.abi_compatible + and not is_legacy_abi + and isinstance(type_, torch.OptionalType) + ): + if val is None: + return "0" # nullptr is not available in C + if not isinstance(type_.getElementType(), torch.TensorType): + var_name = f"var_{next(self.arg_var_id)}" + self.writeline(f"auto {var_name} = {self.val_to_arg_str(val)};") + return f"&{var_name}" + elif config.c_shim_version == "2": + # Similar to other data type, use pointer to denote optional tensor arg in v2 C shim + base_handle = self.val_to_arg_str(val) + if "wrap_with_raii_handle_if_needed" in base_handle: + # wrap_with_raii_handle_if_needed creates a temp RAIIAtenTensorHandle, so we need to + # explicitly store it. Otherwise, it will be destroyed before the fallback kernel call. + tmp_var_name = f"var_{next(self.arg_var_id)}" + self.writeline( + f"RAIIAtenTensorHandle {tmp_var_name} = {base_handle};" + ) + base_handle = tmp_var_name + var_name = f"var_{next(self.arg_var_id)}" + self.writeline(f"AtenTensorHandle {var_name} = {base_handle}.get();") + return f"&{var_name}" + + return self.val_to_arg_str(val) + + def val_to_arg_str(self, val) -> str: + if val is None: + # When None is passed as an argument, it represents an optional that does not contain a value. + if config.abi_compatible: + return "0" # nullptr is not available in C + return "c10::nullopt" + elif isinstance(val, bool): + if config.abi_compatible: + return "1" if val else "0" + else: + return "true" if val else "false" + elif isinstance(val, int): + # uint64_t is long on Linux, but long long on MacOS + return f"{val}LL" if sys.platform == "darwin" else f"{val}L" + elif isinstance(val, str): + return f'"{val}"' + elif isinstance( + val, (ir.Buffer, ir.ReinterpretView, ir.StorageBox, ir.TensorBox) + ): + return val.codegen_reference() + elif isinstance(val, torch.device): + return self.codegen_device(val) + elif isinstance(val, torch.dtype): + return self.codegen_dtype(val) + elif isinstance(val, float) and val in [float("inf"), float("-inf")]: + if val == float("inf"): + return "std::numeric_limits::infinity()" + else: + return "-std::numeric_limits::infinity()" + elif isinstance(val, (list, tuple)): + # FIXME handle embedded optional types? + result = f"{{{', '.join(self.val_to_arg_str(x) for x in val)}}}" + if config.abi_compatible: + static = self.is_statically_known_list_of_ints(val) + # Need to pass the array length because we can't use std::vector + int_var_array = self.codegen_int_array_var( + result, + known_statically=static, + graph=self.get_codegened_graph(), + ) + return f"{int_var_array}, {len(val)}" + else: + return result + else: + return repr(val) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_wrapper_cuda.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_wrapper_cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..caa3549b1f9d2062ff0e97e7a4ee9b3857451110 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_wrapper_cuda.py @@ -0,0 +1,328 @@ +import functools +import os +from itertools import chain, count +from typing import Any, List, Optional, TYPE_CHECKING + +import sympy + +from torch._inductor.codecache import get_cpp_wrapper_cubin_path_name + +from .. import config +from ..codecache import CudaKernelParamCache +from ..triton_heuristics import grid as default_grid +from ..virtualized import V +from .cpp_wrapper_cpu import CppWrapperCpu +from .wrapper import SymbolicCallArg + +if TYPE_CHECKING: + from ..graph import GraphLowering + + +def is_int(s: str) -> bool: + # Cpp code gen adds L at the end of ints + # Lets remove it for checking whether we have an int or not + if s and s[-1] == "L": + s = s[:-1] + try: + int(s) + except ValueError: + return False + except TypeError: + return False + return True + + +def is_float(s: str) -> bool: + try: + float(s) + except ValueError: + return False + return True + + +class CppWrapperCuda(CppWrapperCpu): + """ + Generates cpp wrapper for running on GPU and calls CUDA kernels + """ + + def __init__(self): + self.device = "cuda" + super().__init__() + self.grid_id = count() + self.cuda = True + + def write_header(self): + if V.graph.is_const_graph: + # We do not write header for constant graph, it will be written by main module. + return + + super().write_header() + + self.header.splice("#include ") + if config.abi_compatible: + self.header.splice( + "#include " + ) + else: + self.header.splice( + """ + #include + #include + #include + """ + ) + + self.header.splice( + """ + #define CUDA_DRIVER_CHECK(EXPR) \\ + do { \\ + CUresult code = EXPR; \\ + const char *msg; \\ + cuGetErrorString(code, &msg); \\ + if (code != CUDA_SUCCESS) { \\ + throw std::runtime_error( \\ + std::string("CUDA driver error: ") + \\ + std::string(msg)); \\ + } \\ + } while (0); + + namespace { + + struct Grid { + Grid(uint32_t x, uint32_t y, uint32_t z) + : grid_x(x), grid_y(y), grid_z(z) {} + uint32_t grid_x; + uint32_t grid_y; + uint32_t grid_z; + + bool is_non_zero() { + return grid_x > 0 && grid_y > 0 && grid_z > 0; + } + }; + + } // anonymous namespace + + static inline CUfunction loadKernel( + std::string filePath, + const std::string &funcName, + uint32_t sharedMemBytes, + const std::optional &cubinDir = std::nullopt) { + if (cubinDir) { + std::filesystem::path p1{*cubinDir}; + std::filesystem::path p2{filePath}; + filePath = (p1 / p2.filename()).string(); + } + + CUmodule mod; + CUfunction func; + CUDA_DRIVER_CHECK(cuModuleLoad(&mod, filePath.c_str())); + CUDA_DRIVER_CHECK(cuModuleGetFunction(&func, mod, funcName.c_str())); + if (sharedMemBytes > 0) { + CUDA_DRIVER_CHECK(cuFuncSetAttribute( + func, + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + sharedMemBytes + )) + } + return func; + } + + static inline void launchKernel( + CUfunction func, + uint32_t gridX, + uint32_t gridY, + uint32_t gridZ, + uint32_t numWarps, + uint32_t sharedMemBytes, + void* args[], + cudaStream_t stream) { + CUDA_DRIVER_CHECK(cuLaunchKernel( + func, gridX, gridY, gridZ, 32*numWarps, 1, 1, sharedMemBytes, stream, args, nullptr + )); + } + """ + ) + + def write_get_raw_stream(self, index, graph=None): + name = f"stream{index}" + self.writeline(f"cudaStream_t {name};") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_current_cuda_stream({index}, (void**)&{name}));" + ) + return name + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True + ): + if not cuda: + return super().define_kernel(name, kernel, metadata, cuda) + + def generate(self, is_inference): + self.prefix.writeline("\n") + if not V.graph.aot_mode: + for kernel in chain( + self.src_to_kernel.values(), + [entry[0] for entry in self.user_defined_kernel_cache.values()], + ): + self.prefix.writeline(f"static CUfunction {kernel} = nullptr;") + self.prefix.writeline("\n") + return super().generate(is_inference) + + @functools.lru_cache(None) + def generate_load_kernel_once( + self, + name: str, + mangled_name: str, + cubin_path: str, + shared_mem: int, + graph: "GraphLowering", # for per-graph caching + ): + if V.graph.aot_mode: + self.writeline(f"if (kernels.{name} == nullptr) {{") + self.writeline( + f""" kernels.{name} = loadKernel("{cubin_path}", "{mangled_name}", {shared_mem}, this->cubin_dir_);""" + ) + self.writeline("}") + else: + self.writeline(f"if ({name} == nullptr) {{") + self.writeline( + f""" {name} = loadKernel("{cubin_path}", "{mangled_name}", {shared_mem});""" + ) + self.writeline("}") + + def generate_args_decl(self, call_args): + dynamic_symbols = V.graph.sizevars.free_symbols() + # TODO: only works for constant now, need type info + new_args = [] + for arg in call_args: + var_name = f"var_{next(self.arg_var_id)}" + if isinstance(arg, (sympy.Integer, sympy.Symbol, SymbolicCallArg)): + self.writeline(f"auto {var_name} = {arg};") + elif isinstance(arg, sympy.Expr): + self.writeline(f"auto {var_name} = {self.expr_printer(arg)};") + elif is_int(arg): + self.writeline(f"int {var_name} = {arg};") + elif is_float(arg): + self.writeline(f"float {var_name} = {arg};") + elif any(str(arg) == s.name for s in dynamic_symbols): + self.writeline(f"auto {var_name} = {arg};") + elif arg == "nullptr": + self.writeline(f"auto {var_name} = nullptr;") + elif arg == "c10::nullopt": + self.writeline(f"auto {var_name} = c10::nullopt;") + else: + if config.abi_compatible: + self.writeline(f"CUdeviceptr {var_name};") + self.writeline( + f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr({arg}, reinterpret_cast(&{var_name})));" + ) + else: + self.writeline( + f"CUdeviceptr {var_name} = reinterpret_cast({arg}.data_ptr());" + ) + new_args.append(f"&{var_name}") + + return ", ".join(new_args) + + def generate_default_grid(self, name: str, grid: List[Any], cuda: bool = True): + """ + Generate grid configs for launching a CUDA kernel using the grid + function from triton_heuristics. + """ + if not cuda: + return grid + assert isinstance(grid, list), f"expected {grid=} to be a list" + grid = [e.inner_expr if isinstance(e, SymbolicCallArg) else e for e in grid] + grid_fn = default_grid(*grid) + params = CudaKernelParamCache.get(name) + assert ( + params is not None + ), f"cuda kernel parameters for {name} should already exist at this moment, only found {CudaKernelParamCache.get_keys()}" + block_cfg = { + "XBLOCK": params["x_block"], + "YBLOCK": params["y_block"], + "ZBLOCK": params["z_block"], + } + return grid_fn(block_cfg) + + def generate_kernel_call( + self, + name, + call_args, + grid=None, + device_index=None, + cuda=True, + triton=True, + arg_types=None, + grid_fn: str = "grid", + triton_meta=None, + ): + if not cuda: + # Even in CppWrapperCuda, we may see cpp kernels + return super().generate_kernel_call( + name, call_args, grid, device_index, cuda, triton, arg_types + ) + + params = CudaKernelParamCache.get(name) + assert ( + params is not None + ), f"cuda kernel parameters for {name} should already exist at this moment" + mangled_name = params.get("mangled_name", None) + assert mangled_name is not None, "missing mangled_name" + cubin_path = params.get(get_cpp_wrapper_cubin_path_name(), None) + assert cubin_path is not None and os.path.exists( + cubin_path + ), f"cubin file should already exist at this moment: {cubin_path}" + shared_mem = params.get("shared_mem", 0) + + self.generate_load_kernel_once( + name, mangled_name, cubin_path, shared_mem, V.graph + ) + + # args with value 1 are added into equal_to_1 and constants + # in triton_meta (in the Python codegen) which makes them + # inlined in the PTX and compiled CUBIN + if ( + triton_meta is not None + and "configs" in triton_meta + and triton_meta["configs"] + ): + equal_to_1 = triton_meta["configs"][0].equal_to_1 + call_args = [arg for i, arg in enumerate(call_args) if i not in equal_to_1] + + call_args = self.generate_args_decl(call_args) + kernel_args_var = f"kernel_args_var_{next(self.kernel_callsite_id)}" + self.writeline(f"void* {kernel_args_var}[] = {{{call_args}}};") + stream = ( + "stream" + if V.graph.aot_mode + else self.write_get_raw_stream(device_index, V.graph) + ) + grid_name = f"{name}_grid_{next(self.grid_id)}" + assert isinstance( + grid, (list, tuple) + ), f"expected grid to be a list or tuple but got: {grid=}" + + grid = [V.graph.sizevars.simplify(item) for item in grid] + grid_uses_symbolic_shapes = any(item.free_symbols for item in grid) + grid_args = [self.grid_expr_printer(item) for item in grid] + grid_args_str = ", ".join(grid_args) + self.writeline(f"Grid {grid_name} = Grid({grid_args_str});") + + if grid_uses_symbolic_shapes: + self.writeline(f"if ({grid_name}.is_non_zero()) {{") + kernel_var_name = f"kernels.{name}" if V.graph.aot_mode else name + self.writeline( + "launchKernel({}, {}, {}, {}, {}, {}, {}, {});".format( + kernel_var_name, + f"{grid_name}.grid_x", + f"{grid_name}.grid_y", + f"{grid_name}.grid_z", + params["num_warps"], + params["shared_mem"], + kernel_args_var, + stream, + ) + ) + if grid_uses_symbolic_shapes: + self.writeline("}") diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b5386b659cb1e75ee7e7424fea322758ba2476 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_cpp_scheduling.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_cpp_scheduling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a2ed8bf8b7d8d16523a8aa45163bee9251fbcb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_cpp_scheduling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_env.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64287e696123f7e020fbd235ce1ebb435916295d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09012b515ad381d3c9d73ebe0f1f5b648f3c61a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_template.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13435deb4497468dc082ab5dd27a382a8922cc8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_template.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_epilogue_gen.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_epilogue_gen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11225216057ec5cebd265abfa70973f189efa8a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_epilogue_gen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d604551c1861fae776a8abf6b20f81286c0699c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/device_op_overrides.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/device_op_overrides.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23a37ee53b3fba0eaebcd7b08b38ee17561dfa47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/device_op_overrides.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/gemm_template.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/gemm_template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d4ce0686c0461feb779b789913d241297c81ef6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/gemm_template.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9dc2d8a7e149e1d9023a4aa45f0b6ba166e2d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py @@ -0,0 +1,212 @@ +import logging +from typing import cast, List + +from ...._dynamo.utils import counters + +from ... import config, ir +from ...codecache import code_hash, get_path +from ...ir import ComputedBuffer, CUDATemplateBuffer, Pointwise +from ...scheduler import ( + BaseSchedulerNode, + BaseScheduling, + FusedSchedulerNode, + Scheduler, + SchedulerNode, +) +from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product +from ...virtualized import V +from ..common import IndentedBuffer + +from .cutlass_epilogue_gen import CUTLASSEVTOpNotImplementedError + +log = logging.getLogger(__name__) + + +class CUDACPPScheduling(BaseScheduling): + """ + Partial Scheduling implementation for CUDA C++ Kernels. + This class is intended to be used in combination with TritonScheduling, + and delegated to by CUDACombinedScheduling. + + It handles fusion decisions and CUDA C++ specific template code generation. + """ + + def __init__(self, scheduler: Scheduler): + super().__init__() + self.scheduler = scheduler + + def group_fn(self, sizes): + return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes) + + def is_cuda_cpp_template(self, node: BaseSchedulerNode) -> bool: + return isinstance(node, SchedulerNode) and isinstance( + node.node, CUDATemplateBuffer + ) + + def is_cuda_cpp_fused_template(self, node: BaseSchedulerNode) -> bool: + return isinstance(node, FusedSchedulerNode) and self.is_cuda_cpp_template( + node.get_template_node() + ) + + def _can_fuse_epilogue_impl( + self, + cuda_template_buffer: CUDATemplateBuffer, + epilogue_nodes: List[ir.IRNode], + additional_node: ir.IRNode, + ) -> bool: + """ + Check if the given node can be fused with the epilogue. At the moment, Kernels + support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes. + + Args: + cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer + epilogue_nodes : List[ir.Buffer]: The list of already fused epilogue nodes. + additional_node: The ir.Buffer node to be checked if it can be fused with the epilogue. + Returns: + - bool: True if the given node can be fused with the epilogue, False otherwise. + + """ + if not isinstance(cuda_template_buffer, CUDATemplateBuffer): + return False + if not cuda_template_buffer.template.can_fuse_epilogue: + # The used GEMM op does not support fusing epilogues + return False + if not isinstance(additional_node, ComputedBuffer): + return False + if not isinstance(additional_node.data, Pointwise): + return False + # We can fuse a Pointwise op that depends on the last fused epilogue node + # if any. If there is no epilogue node yet, it needs to depend on the template + # node + node_name = additional_node.get_computed_buffer_name() + if node_name is None: + return False + + if len(epilogue_nodes) == 0: + if cuda_template_buffer.name not in additional_node.get_read_names(): + return False + else: + last_epilogue_node = epilogue_nodes[-1] + assert isinstance(last_epilogue_node, ir.ComputedBuffer) # for mypy + last_epilogue_name = ( + last_epilogue_node.name + if last_epilogue_node.name is not None + else last_epilogue_node.data.name # type: ignore[attr-defined] + ) + if last_epilogue_name not in additional_node.get_read_names(): + return False + if additional_node.layout != cuda_template_buffer.layout: + return False + try: + from torch._inductor.codegen.cuda.cutlass_epilogue_gen import ( + CutlassEVTEpilogueArgumentFormatter, + CutlassEVTEpilogueTypeFormatter, + ) + + CutlassEVTEpilogueTypeFormatter.ir_to_evt_string( + cast(str, cuda_template_buffer.name), "anything", [additional_node] + ) + CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string( + cast(str, cuda_template_buffer.name), [additional_node] + ) + except CUTLASSEVTOpNotImplementedError as e: + not_implemented_op = str(e) + if not_implemented_op.startswith("_op_"): + not_implemented_op = not_implemented_op[4:] + log.warning( + f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}, likely due to unsupported operation: {not_implemented_op}" # noqa: G004, B950 + ) + return False + else: + # Likely due to unsupported dtype. + log.warning( + f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}. Reason: {not_implemented_op}" # noqa: G004, B950 + ) + return False + return True + + @staticmethod + def _unwrap_epilogue_nodes(fused_node: FusedSchedulerNode) -> List[ir.IRNode]: + nodes = fused_node.get_nodes() + template_node = fused_node.get_template_node() + nodes.remove(template_node) + return [n.node for n in nodes] + + def can_fuse_vertical( + self, node1: BaseSchedulerNode, node2: BaseSchedulerNode + ) -> bool: + if self.is_cuda_cpp_template(node1) and isinstance(node2, SchedulerNode): + return self._can_fuse_epilogue_impl( + cast(CUDATemplateBuffer, node1.node), [], node2.node + ) + elif self.is_cuda_cpp_fused_template(node1) and isinstance( + node2, SchedulerNode + ): + fnode1 = cast(FusedSchedulerNode, node1) + return self._can_fuse_epilogue_impl( + fnode1.get_template_node().node, + self._unwrap_epilogue_nodes(fnode1), + node2.node, + ) + return False + + def define_kernel(self, src_code: str, node_schedule) -> str: + wrapper = V.graph.wrapper_code + if src_code in wrapper.src_to_kernel: + kernel_name = wrapper.src_to_kernel[src_code] + else: + fused_name = ( + get_fused_kernel_name(node_schedule, config.triton.descriptive_names) + if config.triton.descriptive_names + else "" + ) + kernel_name = "_".join(["cuda", fused_name, wrapper.next_kernel_suffix()]) + # use the original src_code as the key + wrapper.src_to_kernel[src_code] = kernel_name + src_code = src_code.replace("KERNEL_NAME", kernel_name) + + _, _, kernel_path = get_path(code_hash(src_code), "py") + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline("async_compile.cuda(r'''") + compile_wrapper.splice(src_code, strip=True) + compile_wrapper.writeline("''', 'so')") + + metadata_comment = f"# kernel path: {kernel_path}" + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + metadata_comment += "\n" + origins + "\n" + detailed_origins + wrapper.define_kernel( + kernel_name, compile_wrapper.getvalue(), metadata_comment + ) + return kernel_name + + def codegen_template( + self, template_node: BaseSchedulerNode, epilogue_nodes: List[SchedulerNode] + ): + """ + Codegen a CUDA template, possibly with fused epilogues + """ + counters["inductor"]["cuda_epilogue_fusion_counter"] += len(epilogue_nodes) + assert self.is_cuda_cpp_template( + template_node + ), "Template node passed to CUDAScheduler.codegen_template must be a SchedulerNode that wraps a CUDATemplateBuffer" + template_node = cast(SchedulerNode, template_node) + _, (numel, rnumel) = template_node.group + assert rnumel == 1 + ctb: CUDATemplateBuffer = cast(CUDATemplateBuffer, template_node.node) + epilogue_ir_nodes: List[ir.Buffer] = [n.node for n in epilogue_nodes] + assert all( + isinstance(n, ir.ComputedBuffer) for n in epilogue_ir_nodes + ), "Epilogue nodes must all be instances of ir.ComputedBuffer" + kernel, render = ctb.make_kernel_render(ctb, epilogue_nodes=epilogue_ir_nodes) + with kernel: + for node in [template_node, *epilogue_nodes]: + node.mark_run() + src_code = render() + + with V.set_kernel_handler(kernel): + node_schedule = [template_node, *epilogue_nodes] + kernel_name = self.define_kernel(src_code, node_schedule) + kernel.call_kernel(kernel_name, ctb, epilogue_ir_nodes) + V.graph.removed_buffers |= kernel.removed_buffers + self.scheduler.free_buffers() diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py new file mode 100644 index 0000000000000000000000000000000000000000..6171921173e9717853eafa9497292a7dbdaaf903 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py @@ -0,0 +1,45 @@ +import functools +import logging +from typing import Optional + +import torch + +from ... import config + +log = logging.getLogger(__name__) + + +def get_cuda_arch() -> Optional[str]: + try: + cuda_arch = config.cuda.arch + if cuda_arch is None: + # Get Compute Capability of the first Visible device + major, minor = torch.cuda.get_device_capability(0) + return str(major * 10 + minor) + return str(cuda_arch) + except Exception as e: + log.error("Error getting cuda arch: %s", e) + return None + + +def get_cuda_version() -> Optional[str]: + try: + cuda_version = config.cuda.version + if cuda_version is None: + cuda_version = torch.version.cuda + return cuda_version + except Exception as e: + log.error("Error getting cuda version: %s", e) + return None + + +@functools.lru_cache(None) +def nvcc_exist(nvcc_path: str = "nvcc") -> bool: + if nvcc_path is None: + return False + import subprocess + + res = subprocess.call( + ["which", nvcc_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + return res == 0 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_kernel.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..b9980ef3d1f25e0dd760a0c811416c9b0c7d64a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_kernel.py @@ -0,0 +1,374 @@ +import logging +from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union + +from ... import ir +from ...autotune_process import CUDABenchmarkRequest +from ...ir import Buffer, CUDATemplateBuffer, IRNode, Layout, TensorBox +from ...select_algorithm import ChoiceCaller +from ...utils import sympy_product +from ...virtualized import V + +from ..common import IndentedBuffer, Kernel, OpOverrides, PrimitiveInfoType +from ..cpp import CppPrinter, DTYPE_TO_CPP + +if TYPE_CHECKING: + from torch._inductor.codegen.cuda.cuda_template import CUDATemplate + +log = logging.getLogger(__name__) + +cexpr = CppPrinter().doprint + + +def _normalize_idx(index: int, total_length: int) -> int: + return index if index >= 0 else index + total_length + + +class CUDAKernel(Kernel): + """ + Baseclass for CUDA / Cutlass based Kernels + """ + + overrides = OpOverrides # type: ignore[assignment] + + +class CUDATemplateKernel(CUDAKernel): + """ + Template kernels defined by CUDA / Cutlass in C++. + """ + + _EXTRA_CPP_ARGS = "size_t* workspace_size, uint8_t* workspace, cudaStream_t stream" + + def __init__(self, kernel_name): + """ + Initializes a new instance of the CUDATemplateKernel class. + + Args: + kernel_name (str): The name of the kernel. + """ + super().__init__() + self.kernel_name = kernel_name + # Mapping from arg name to IRNode. + self.named_nodes: Dict[str, IRNode] = {} + + def arg_name(self, node: IRNode) -> Optional[str]: + """ + Returns arg name of a given input or output node. + """ + if node is None: + return None + return {**self.args.input_buffers, **self.args.output_buffers}.get( + node.get_name(), None + ) + + def check_not_null(self, node: IRNode) -> str: + """ + Generates code to check that a node is not null. + """ + + if node is None: + return "" + + size_str = self.size(node, 0, -1) + name_str = self.arg_name(node) + if name_str is None: + return "" + + res = IndentedBuffer(initial_indent=2) + res.tabwidth = 1 + res.splice( + f""" + {{ + if (!{name_str}) {{ + int64_t {name_str}_size = {size_str}; + if ({name_str}_size > 0) {{ + throw std::runtime_error("input {name_str} is null but size is not 0!"); + }} + }} + }} + """ + ) + return res.getvalue() + + def def_kernel( + self, + inputs: List[IRNode], + outputs: List[IRNode], + names_str: str = "", + input_reorder: Optional[List[int]] = None, + ) -> str: + """ + Hook called from template code to generate function definition and + needed args. + + Args: + inputs: List of input IRNodes + outputs: List of output IRNodes + names_str: Comma separated list of input + output argument names. + input_reorder: The actual order of input nodes. + e.g. The template might have input argument defined as [X, W, Bias], + and the actual input passed into this template could be [Bias, X, W]. + In this case, the `input_reorder` would be [2, 0, 1]. + """ + + names = [x.strip() for x in names_str.strip().split(",")] + if len(inputs) + len(outputs) != len(names): + raise RuntimeError( + f"{len(inputs) + len(outputs)=} != {len(names)=}, {inputs=}, {outputs=}, {names=}" + ) + + if input_reorder is not None: + assert len(inputs) == len(input_reorder) + else: + input_reorder = list(range(len(inputs))) + + for idx in input_reorder: + name = names[idx] + node = inputs[idx] + if node is not None: + self.named_nodes[name] = node + self.args.input_buffers[node.get_name()] = name + + for name, node in zip(names[len(inputs) : len(inputs) + len(outputs)], outputs): + if node is not None: + self.named_nodes[name] = node + self.args.output_buffers[node.get_name()] = name + + arg_defs, *_ = self.args.cpp_argdefs() + return f"PT_EXPORT int {self.kernel_name}({', '.join(arg_defs)}, {self._EXTRA_CPP_ARGS})" + + def call_kernel( + self, name: str, node: "CUDATemplateBuffer", epilogue_nodes: List[ir.Buffer] # type: ignore[name-defined] + ) -> None: + """ + Generates code to call the kernel through V.graph.wrapper_code. + used from within torch._inductor.wrapper.WrapperCodeGen + + name: Name of kernel function. + node: The CUDATemplateBuffer node which contains information about the kernel, it's fused epilogue nodes + as well as all required inputs and outputs. + """ + wrapper = V.graph.wrapper_code + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + else: + call_args[i] = f"c_void_p({call_args[i]}.data_ptr())" + + # workspace_size ptr is NULL to mark this call is not intended for retrieving workspace_size. + # workspace_size should have already been retrieved prior to this call. + call_args.append("None") + + if node.get_workspace_size() > 0: + call_args.append(f"c_void_p({node.get_name()}_workspace.data_ptr())") + else: + call_args.append("None") + + wrapper.generate_kernel_call( + name, + call_args, + device_index=V.graph.scheduler.current_device.index, + cuda=True, + triton=False, + ) + + def dtype(self, node: IRNode) -> Optional[str]: + """ + Generates code which represents dtype of a given node. + """ + + if node is None: + return "void" + return DTYPE_TO_CPP.get(node.get_layout().dtype) + + def offset(self, node: IRNode) -> str: + """ + Generates code which represents offset of a given node. + """ + + if node is None: + return "0" + return str(node.get_layout().offset) + + def ptr(self, node: IRNode) -> str: + """ + Generates code which represents pointer of a given node. + """ + + if node is None: + return "nullptr" + arg_name = self.arg_name(node) + if arg_name is None: + return "nullptr" + offset = self.offset(node) + return arg_name if offset == "0" else f"{arg_name} + {offset}" + + def size( + self, + node: IRNode, + start_index: int, + end_index: Optional[int] = None, + default_value: int = 0, + ) -> str: + """ + Hook called from template code to get the size of an arg. + Generates code which represents size of a given node in [start_index, end_index). + If node is None, returns default_value. + + TODO: Will add needed args to pass it in if it is dynamic. + """ + + if node is None: + return str(default_value) + + start_index = _normalize_idx(start_index, len(node.get_size())) + if end_index is None: + end_index = start_index + end_index = _normalize_idx(end_index, len(node.get_size())) + + sizes = node.get_size()[start_index : end_index + 1] + if len(sizes) == 0: + return str(default_value) + + val = sympy_product(sizes) + return cexpr(self.rename_indexing(val)) + + def stride(self, node: IRNode, index: int, default_value: int = 0) -> str: + """ + Hook called from template code to get the stride of an arg. + Generates code which represents stride of a given node at index. + If node is None, returns default_value. + + TODO: Will add needed args to pass it in if it is dynamic. + """ + + if node is None: + return str(default_value) + + index = _normalize_idx(index, len(node.get_size())) + if index < 0: + return str(default_value) + + stride = node.get_stride()[index] + return cexpr(self.rename_indexing(stride)) + + def row_or_column_stride(self, node: IRNode, default_value: int = 0) -> str: + """ + Hook called from template code to get the row or column stride of an arg. + This is required by some CUTLASS 2.X APIs. + If the node is in row_major, it returns stride[-2]. + If the node is in column_major, it returns stride[-1]. + + TODO: Will add needed args to pass it in if it is dynamic. + """ + + if node is None or len(node.get_stride()) < 2: + return str(default_value) + + stride0 = node.get_stride()[-1] + stride1 = node.get_stride()[-2] + if stride0 == 1: + return cexpr(self.rename_indexing(stride1)) + elif stride1 == 1: + return cexpr(self.rename_indexing(stride0)) + else: + raise RuntimeError( + f"At least 1 stride should be 1. Strides: {node.get_stride()=}" + ) + + +class CUDATemplateCaller(ChoiceCaller): + """ + CUDATemplateCaller + + This class represents a caller for CUDA template kernels. It is a subclass of ChoiceCaller. + Attributes: + name (str): The name of the caller. + category (str): The category of the caller. + bmreq (CUDABenchmarkRequest): The benchmark request for the caller. + template_buffer (CUDATemplateBuffer): The template buffer for the caller. + """ + + def __init__( + self, + name: str, + category: str, + input_nodes: List[Buffer], + layout: Layout, + make_kernel_render: Callable[[CUDATemplateBuffer, Optional[List[IRNode]]], str], + bmreq: CUDABenchmarkRequest, + template: "CUDATemplate", # type: ignore[name-defined] + info_kwargs: Optional[Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]], # type: ignore[type-arg] + ): + super().__init__(name, input_nodes, layout) + self.category = category + self.make_kernel_render = make_kernel_render + self.bmreq = bmreq + self.template = template + self.info_kwargs = info_kwargs + + def precompile(self) -> None: + assert self.bmreq is not None + self.bmreq.precompile() + + def benchmark(self, *args, out) -> float: + assert self.bmreq is not None + return self.bmreq.benchmark( + *args, output_tensor=out + ) # @TODO: Hack for ensuring that Cutlass Kernel is preferred + + def __str__(self): + return f"CUDATemplateCaller(source_file={self.bmreq.source_file})" + + def call_name(self) -> str: + return f"cuda_template_kernels.{self.name}" + + def hash_key(self) -> str: + return "-".join( + [ + self.category, + self.bmreq.hash_key, + ] + ) + + def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]: + """Information returned here is logged to the autotune log file when that is enabled.""" + if self.info_kwargs is not None and "op" in self.info_kwargs: + op: Any = self.info_kwargs["op"] + epilogue_node_names: List[str] = [ + getattr(en, "name", "no_name") + for en in self.info_kwargs.get("epilogue_nodes", []) # type: ignore[union-attr] + ] + epilogue_node_strs: List[str] = [ + str(en) for en in self.info_kwargs.get("epilogue_nodes", []) # type: ignore[union-attr] + ] + return { + "backend": "CUDA", + "op_type": type(op).__name__, + "op_conf_name": str(op.configuration_name()), + "op_arch": str(op.arch), + "tile_shape": str(op.tile_description.tile_shape), + "epilogue_schedule": str(op.epilogue_schedule), + "kernel_schedule": str(op.kernel_schedule), + "element_accumulator": str(op.accumulator_type()), + "op_name": str(op.procedural_name()), + "epilogue_node_names": epilogue_node_names, # type: ignore[dict-item] + "epilogue_node_strs": epilogue_node_strs, # type: ignore[dict-item] + "instruction_shape": str( + op.tile_description.math_instruction.instruction_shape + ), + } + else: + return {"backend": "CUDA", "op_type": "unknown"} + + def output_node(self) -> TensorBox: + return TensorBox.create( + CUDATemplateBuffer( + layout=self.layout, + inputs=self.input_nodes, + make_kernel_render=self.make_kernel_render, + workspace_size=self.bmreq.workspace_size, + template=self.template, + ) + ) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py new file mode 100644 index 0000000000000000000000000000000000000000..258e54266477c6b3cab695cb4214f7c71f5315d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py @@ -0,0 +1,242 @@ +import functools +import itertools +import logging +from typing import List, Optional +from unittest.mock import patch + +import sympy + +import torch +from ...autotune_process import CUDABenchmarkRequest, TensorMeta +from ...ir import Buffer, CUDATemplateBuffer, IRNode, Layout + +from ...utils import IndentedBuffer, unique +from ...virtualized import V +from ..common import KernelTemplate +from .cuda_kernel import CUDATemplateCaller, CUDATemplateKernel + +log = logging.getLogger(__name__) + + +class CUDATemplate(KernelTemplate): + index_counter = itertools.count() + + def __init__( + self, + name: str, + input_nodes: List[Buffer], + layout: Layout, + input_reorder: Optional[List[int]] = None, + ): + """ + + Baseclass for CUDA C++ Templates, derived from KernelTemplate. Not to be instantiated directly. + + Args: + name (str): The name of the CUDATemplate object. + input_nodes (List[IRNode]): A list of input IRNodes. + layout (Layout): The layout of the output buffer / tensor. + input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes. + + """ + super().__init__(name) + self.input_nodes = input_nodes + self.output_node: Buffer = Buffer("buf_out", layout) + self.input_reorder = input_reorder + self.layout = layout + + def generate( # type: ignore[override] + self, + **kwargs, + ) -> CUDATemplateCaller: + """ + Generates the CUDA template caller object for the given GEMM template and operation. This CUDATemplateCaller + may be used to call and benchmark the generated CUDA kernel in a standalone manner to enable Autotuning. + + Args: + kwargs: Additional keyword arguments. + + Returns: + A CUDATemplateCaller object representing the generated CUDA template caller. + """ + kernel_name = f"cuda_{self.name}" + with patch.object( + V.graph, "get_dtype", self._fake_get_dtype(self.output_node) + ), CUDATemplateKernel( + kernel_name=kernel_name, + ) as kernel: + code = self.render(kernel=kernel, **kwargs) + _, call_args, _ = kernel.args.python_argdefs() + log.debug("Generated Code:\n%s", code) + log.debug( + "Args: cpp_argdefs: %s, python_argdefs: %s", + kernel.args.cpp_argdefs(), + kernel.args.python_argdefs(), + ) + + input_reorder = ( + self.input_reorder + if self.input_reorder is not None + else list(range(len(self.input_nodes))) + ) + expected_args = list( + unique(self.input_nodes[idx].get_name() for idx in input_reorder) + ) + expected_args.extend([self.output_node.get_name()]) + assert list(call_args)[: len(expected_args)] == expected_args, ( + call_args, + expected_args, + ) + extra_args = V.graph.sizevars.size_hints( + map(sympy.expand, call_args[len(expected_args) :]) + ) + + kernel_hash_name = f"cuda_{self.name}_{next(self.index_counter)}" + + # create the BenchmarkRequest + bmreq = CUDABenchmarkRequest( + kernel_name=kernel_name, + input_tensor_meta=TensorMeta.from_irnodes(self.input_nodes), + output_tensor_meta=TensorMeta.from_irnodes(self.output_node), + extra_args=extra_args, + source_code=code, + ) + + def make_kernel_render( + template_node: CUDATemplateBuffer, + epilogue_nodes: Optional[List[IRNode]] = None, + ): + kernel = CUDATemplateKernel( + kernel_name="KERNEL_NAME", + ) + render = functools.partial( + self.render, + kernel=kernel, + template_buffer_node=template_node, + epilogue_nodes=epilogue_nodes, + **kwargs, # includes "op" argument in case of CUTLASSGemmTemplate + ) + return kernel, render + + return CUDATemplateCaller( + kernel_hash_name, + self.name, + self.input_nodes, + self.output_node.get_layout(), + make_kernel_render, + bmreq, + self, + kwargs, + ) + + def header(self) -> IndentedBuffer: + res = IndentedBuffer() + res.splice( + """ + #include + #include + #include + #include + #include + """ + ) + return res + + def globals(self) -> IndentedBuffer: + res = IndentedBuffer() + res.splice( + """ + // We compile all models with -fvisibility=hidden. Any symbols that need to be + // exposed in the final shared library must be declared with PT_EXPORT to make + // them visible. + #ifdef __GNUC__ // Applies to any compiler with GNU extensions (clang and g++) + #define PT_EXPORT __attribute__((__visibility__("default"))) + #else + #ifdef _WIN32 + #define PT_EXPORT __declspec(dllexport) + #else + #define PT_EXPORT + #endif + #endif + using bfloat16 = nv_bfloat16; + """ + ) + return res + + def render(self, **kwargs) -> str: + raise NotImplementedError + + +class CUTLASSTemplate(CUDATemplate): + """ + CUTLASSTemplate is a class that provides a template for generating CUTLASS Templates. Used as a baseclass for the + CUTLASSGemmTemplate, providing functionality that might also be relevant for non-GEMM CUTLASS Kernels. + """ + + def header(self) -> IndentedBuffer: + res = super().header() + res.splice( + """ + #include "cute/tensor.hpp" + #include "cutlass/cutlass.h" + #include "cutlass/numeric_types.h" + #include "cutlass/tensor_ref.h" + #include "cutlass/util/host_tensor.h" + #include "cutlass/util/reference/host/tensor_fill.h" + #include "cutlass/util/reference/device/tensor_fill.h" + #include "cutlass/util/device_memory.h" + """ + ) + return res + + def globals(self) -> IndentedBuffer: + res = super().globals() + res.splice( + """ + using namespace cute; + #define CUTLASS_CHECK(status) \\ + { \\ + cutlass::Status error = status; \\ + if (error != cutlass::Status::kSuccess) { \\ + auto msg = std::string("[") + __FILE__ + "] Got cutlass error: " + \\ + cutlassGetStatusString(error) + " at: " + std::to_string(__LINE__); \\ + throw std::runtime_error(msg); \\ + } \\ + } + + // Used as pass-through functor in EVT just for type casting / rounding + template + struct identity_op { + CUTLASS_HOST_DEVICE + T operator()(T val) const { return val; } + }; + + """ + ) + return res + + def cute_int(self, int_str: str, var_name: str) -> str: + res = "" + if int_str in {"1", "1L"}: + res = "cute::Int<1>{}" + else: + res = int_str + + return f"{res} /* {var_name} */" + + _DTYPE_TO_CUTLASS = { + torch.float32: "float", + torch.float64: "double", + torch.float16: "cutlass::half_t", + torch.int32: "int", + torch.int8: "int8_t", + torch.uint8: "uint8_t", + torch.bool: "bool", + torch.bfloat16: "cutlass::bfloat16_t", + } + + def cutlass_type_cast(self, node: IRNode, ptr: str) -> str: + if node is None: + return ptr + else: + return f"({self._DTYPE_TO_CUTLASS.get(node.get_dtype())}*)({ptr})" diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..1e4828aab466ab519dbfcd018932f102da6e3208 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py @@ -0,0 +1,360 @@ +from typing import Dict, List +from unittest.mock import patch + +import sympy + +import torch._inductor.virtualized as virtualized +from torch._inductor.ir import ComputedBuffer, FlexibleLayout, IRNode, Pointwise +from torch._inductor.utils import IndentedBuffer, sympy_str + + +# Used as a magic string to indicate an unsupported sympy expression +# became part of generated C++ code. +_MAGIC_SYMPY_ERROR_STRING = "[!sympy: unsupported expr!]" + + +def _arg_str(a): + if isinstance(a, sympy.Expr): + # If this return value containting the _MAGIC_SYMPY_ERROR_STRING + # is used as part of the final generated C++ code, + # a CUTLASSEVTOpNotImplementedError is raised to indicate that + # the op could not be converted to a valid EVT expression. + return f"{_MAGIC_SYMPY_ERROR_STRING}('{sympy_str(a)}')" + return str(a) + + +class CUTLASSEVTOpNotImplementedError(NotImplementedError): + pass + + +class CutlassEVTEpilogueTypeFormatter: + """ + Codegen class, which provides an entry point to generate + Cutlass "Epilogue Visitor Tree" (EVT) functor declarations. + + See https://github.com/NVIDIA/cutlass/tree/main/examples/49_hopper_gemm_with_collective_builder + for more about EVTs and how they are declared and used to generate. + + Notes: + * Used by CUTLASSGemmTemplate. + * This class should not be instantiated by users, it is intended to be used + by calling CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(...) + which instantiates this class as an ops handler for virtualized.V.ops.[op-name] + * Extend this with more _op_ nodes to add support for new pointwise operations. + + + """ + + def __init__(self, accumulator_node_name, evt_type_name): + """ + + Initialize an instance of CutlassEVTEpilogueTypeFormatter. + + Parameters: + - accumulator_node_name (str): The name of the output Buffer for the GEMM operation in the original (unfused) + IR graph. + - evt_type_name (str): The output name of the EVT type we are generating. + + """ + self.accumulator_node_name = accumulator_node_name + self.output = IndentedBuffer(0) + self.var_counter = 0 + self.evt_type_name = evt_type_name + self.aliases = dict() + + @staticmethod + def ir_to_evt_string( + template_output_node_name: str, + evt_type_name: str, + epilogue_nodes: List[IRNode], + ): + """ + Formats IR nodes into a string representation compatible with Cutlass EVT format. + + Args: + template_output_node_name (str): The name of the template output node. + evt_type_name (str): The name of the EVT type. + epilogue_nodes (List[IRNode]): A list of IR nodes representing the epilogue nodes. As of now, these must be + ComputedBuffer nodes wrapping Pointwise nodes. + + Returns: + A string representation of the IR nodes formatted according to the Cutlass EVT format. + """ + formatter = CutlassEVTEpilogueTypeFormatter( + template_output_node_name, evt_type_name + ) + + with virtualized.V.set_ops_handler(formatter), patch.object( + FlexibleLayout, "allow_indexing", True + ): + for node in epilogue_nodes: + if isinstance(node, ComputedBuffer): + pnode = node.data + else: + raise RuntimeError( + "Epilogue nodes must be Pointwise nodes, wrapped in a named ComputedBuffer" + ) + assert isinstance(pnode, Pointwise) + index = pnode._index(pnode.ranges) + result = pnode.inner_fn(index) + # each epilogue node results in a single "using" statement and may refer to the previous steps by name + formatter.aliases[node.name] = result + res = formatter.getvalue(result) # type: ignore[possibly-undefined] + if _MAGIC_SYMPY_ERROR_STRING in res: + raise CUTLASSEVTOpNotImplementedError( + "sympy / indexing expressions not yet supported in EVT fusion" + ) + else: + return res + + def __getattr__(self, name): + """ + Resolve V.ops. calls, after this instance has been installed as V.ops handler. + """ + + def inner(*args, **kwargs): + fargs = [_arg_str(a) for a in args] + fkwargs = {key: _arg_str(a) for key, a in kwargs.items()} + fn = getattr(self, f"_op_{name}") + line = fn(*fargs, **fkwargs) + self.var_counter += 1 + varname = f"EVT_expr_{self.var_counter}" + # replace line with a new variable name + self.output.writeline(f"using {varname} = {line};") + return varname + + if name.startswith("_"): + raise CUTLASSEVTOpNotImplementedError(name) + if hasattr(self, f"_op_{name}"): + return inner + else: + raise CUTLASSEVTOpNotImplementedError(name) + + def _op_load(self, name, index_expr): + # Load an input to an operation. Might be the output of the matmul, the result + # of a previous epilogue node, a constant or (TODO) an auxiliary input. + if name == self.accumulator_node_name: + return f"cutlass::epilogue::fusion::Sm90AccFetch /* :={name} (matmul output in accumulator) */" + elif name in self.aliases: + return self.aliases[name] + else: + # return f"cutlass::epilogue::fusion::Sm90SrcFetch /* :={name} */" + raise CUTLASSEVTOpNotImplementedError( + f"Operand {name} not found. Auxiliary inputs not supported yet." + ) + + def _op_constant(self, value, dtype): + # Load a constant + if str(dtype) in ("torch.float16", "torch.float32"): + return f"cutlass::epilogue::fusion::Sm90ScalarBroadcast /* value={value}, dtype={dtype} */" + else: + raise CUTLASSEVTOpNotImplementedError( + f"Unsupported dtype for constant: {dtype}" + ) + + def _cutlass_binary_functional_op(self, op, a, b): + # Perform a named operation on two inputs + # see https://github.com/NVIDIA/cutlass/blob/6407bcdf0a24097b7b016ee105937693c62f9923/include/cutlass/functional.h for ops + return f"cutlass::epilogue::fusion::Sm90EVT,{a},{b}>" # noqa: B950 + + def _convert_to_output_dtype(self, a): + # Convert the final output to the dtype of the output buffer + return f"cutlass::epilogue::fusion::Sm90EVT,{a}>" # noqa: B950 + + def _op_to_dtype(self, a, *args, **kwargs): + # no-op in our case, since we convert to the output dtype at the end and convert everything to the accumulator + # dtype. + # Is is asserted ( and ascertained during can_fuse decision ) that the dtype remains compatible + # throughout the fusion chain. + return a # noqa: B950 + + def _op_mul(self, a, b): + return self._cutlass_binary_functional_op("multiplies", a, b) + + def _op_div(self, a, b): + return self._cutlass_binary_functional_op("divides", a, b) + + def _op_truediv(self, a, b): + return self._cutlass_binary_functional_op("divides", a, b) + + def _op_ge(self, a, b): + return self._cutlass_binary_functional_op("greater_equal", a, b) + + def _op_add(self, a, b): + return self._cutlass_binary_functional_op("plus", a, b) + + def _op_sub(self, a, b): + return self._cutlass_binary_functional_op("minus", a, b) + + def _op_minimum(self, a, b): + return self._cutlass_binary_functional_op("minimum", a, b) + + def _op_maximum(self, a, b): + return self._cutlass_binary_functional_op("maximum", a, b) + + def _op_relu(self, a): + const_zero = self._op_constant(0.0, "torch.float32") + return f"cutlass::epilogue::fusion::Sm90EVT,{a}, {const_zero}>" # noqa: B950 + + def reduction(self, dtype, src_dtype, reduction_type, value): + raise CUTLASSEVTOpNotImplementedError() + + # Add more ops here... + def getvalue(self, result) -> str: + # Return final result + dtype_converted_expr = self._convert_to_output_dtype( + f"EVT_expr_{self.var_counter}" + ) + self.output.writeline(f"using {self.evt_type_name} = {dtype_converted_expr};") + return self.output.getvalue() + + +class CutlassEVTEpilogueArgumentFormatter: + """ + Codegen class, which provides an entry point to generate + Cutlass "Epilogue Visitor Tree" (EVT) Argument initializers + + See https://github.com/NVIDIA/cutlass/tree/main/examples/49_hopper_gemm_with_collective_builder + for more about EVTs and how they are declared and used to generate. + + Notes: + * Used by CUTLASSGemmTemplate. + * This class should not be instantiated by users, it is intended to be used + by calling CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(...) + which instantiates this class as an ops handler for virtualized.V.ops.[op-name] + * Extend this with more _op_ nodes to add support for new pointwise operations. + + + """ + + def __init__(self, accumulator_node_name: str): + """ + + Initializes a CutlassEVTEpilogueArgumentFormatter object. Do not instantiate directly. + Use the CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string static method. + + Args: + accumulator_node_name (str): The name of the accumulator node which should contain + the Matmul result before fusion according to the IR graph. + """ + self.accumulator_node_name: str = accumulator_node_name # + self.output: IndentedBuffer = IndentedBuffer(0) # The output buffer for codegen + self.var_counter: int = ( + 0 # used to generate variable names, incremented for each new variable + ) + self.aliases: Dict[str, str] = dict() # Aliases for subexpression functors + + @staticmethod + def ir_to_evt_argument_string( + template_output_node_name: str, + epilogue_nodes: List[IRNode], + ) -> str: + formatter = CutlassEVTEpilogueArgumentFormatter( + template_output_node_name, + ) + + with virtualized.V.set_ops_handler(formatter), patch.object( + FlexibleLayout, "allow_indexing", True + ): + for node in epilogue_nodes: + assert isinstance(node, ComputedBuffer) + pnode = node.data + assert isinstance(pnode, Pointwise) + index = pnode._index(pnode.ranges) + result = pnode.inner_fn(index) + # each epilogue node results in a single "using" statement and may refer to the previous steps by name + if node.name is not None: + formatter.aliases[node.name] = result + + res: str = formatter.getvalue(result) # type: ignore[possibly-undefined] + if _MAGIC_SYMPY_ERROR_STRING in res: + raise CUTLASSEVTOpNotImplementedError( + "sympy / indexing expressions not yet supported in EVT fusion" + ) + else: + return res + + def __getattr__(self, name): + def inner(*args, **kwargs): + fargs = [_arg_str(a) for a in args] + fkwargs = {key: _arg_str(a) for key, a in kwargs.items()} + fn = getattr(self, f"_op_{name}") + line = fn(*fargs, **fkwargs) + return line + + if name.startswith("_"): + raise CUTLASSEVTOpNotImplementedError(name) + + if hasattr(self, f"_op_{name}"): + return inner + else: + raise CUTLASSEVTOpNotImplementedError(name) + + def _op_load(self, name, index_expr): + if name == self.accumulator_node_name: + return "{}" + elif name in self.aliases: + return self.aliases[name] + else: + raise CUTLASSEVTOpNotImplementedError( + f"Operand {name} not found. Auxiliary inputs not supported yet." + ) + + def _op_constant(self, value, dtype): + if str(dtype) in ("torch.float16", "torch.float32"): + return "{ static_cast(" + str(value) + ") }" + else: + raise CUTLASSEVTOpNotImplementedError( + f"Unsupported dtype for constant: {dtype}" + ) + + def _cutlass_binary_functional_op(self, op, a, b): + return f"{{ /*{op}: */ {a}, {b} }}" + + def _op_mul(self, a, b): + return self._cutlass_binary_functional_op("multiplies", a, b) + + def _op_div(self, a, b): + return self._cutlass_binary_functional_op("divides", a, b) + + def _op_truediv(self, a, b): + return self._cutlass_binary_functional_op("divides", a, b) + + def _op_ge(self, a, b): + return self._cutlass_binary_functional_op("greater_equal", a, b) + + def _op_add(self, a, b): + return self._cutlass_binary_functional_op("plus", a, b) + + def _op_sub(self, a, b): + return self._cutlass_binary_functional_op("minus", a, b) + + def _op_minimum(self, a, b): + return self._cutlass_binary_functional_op("minimum", a, b) + + def _op_maximum(self, a, b): + return self._cutlass_binary_functional_op("maximum", a, b) + + def _op_relu(self, a): + const_zero = self._op_constant(0.0, "torch.float32") + return "{" + str(a) + ", " + const_zero + "}" + + def _op_to_dtype(self, a, dtype, src_dtype=None): + # Is is asserted ( and ascertained during can_fuse decision ) that the dtype remains compatible + # throughout the fusion chain. + assert dtype in ( + "torch.float32", + "torch.float16", + ), f"Unsupported dtype: {dtype}" + assert src_dtype in ( + None, + "torch.float32", + "torch.float16", + ), f"Unsupported source dtype: {src_dtype}" + return a + + def reduction(self, dtype, src_dtype, reduction_type, value): + raise CUTLASSEVTOpNotImplementedError() + + def getvalue(self, result) -> str: + return "{" + str(result) + "}" diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__init__.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ece44d3406a21eac845d76e3654f974069db23c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1eb41045724734ca2edafd217dab19c0e01b26da Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..2a386a114e86f9556f0302151b42ae8c37b7e806 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py @@ -0,0 +1,186 @@ +from ..cutlass_utils import try_import_cutlass + +if try_import_cutlass(): + import enum + + from cutlass_library.library import * # noqa: F401, F403 + from cutlass_library.gemm_operation import * # noqa: F401, F403 + + # copied / modified from original at + # https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/tools/library/scripts/gemm_operation.py#L658 + # to support EVT similar to + # https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L315C69-L315C69 # noqa: B950 + class EmitGemmUniversal3xInstanceWithEVT: + """Responsible for emitting a CUTLASS 3.x template definition""" + + def __init__(self, operation_suffix=""): + self.operation_suffix = operation_suffix + self.includes = [ + "cutlass/cutlass.h", + "cutlass/gemm/gemm.h", + "cutlass/numeric_types.h", + "cutlass/gemm/kernel/gemm_universal.hpp", + "cutlass/gemm/collective/collective_builder.hpp", + "cutlass/epilogue/collective/collective_builder.hpp", + ] + self.builtin_epilogue_functor_template = """ + ${epilogue_functor}< + ${element_c}, + ${epilogue_vector_length}, + ${element_accumulator}, + ${element_epilogue} + > + """ + self.gemm_template = """ + using EpilogueScheduleType = ${epilogue_schedule}; + static_assert(cute::is_same_v || + cute::is_same_v, + "Epilogue visitor trees are currently only supported by the TMA warp-specialized epilogue"); + static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest; + using ElementAcc = ${element_accumulator}; + using ElementD = ${element_d}; + ${epilogue_functor}; + using ${operation_name}_epilogue = + typename cutlass::epilogue::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + cute::Shape, + cute::Shape, + cutlass::epilogue::collective::EpilogueTileAuto, + ${element_accumulator}, ${element_epilogue}, + ${element_c}, ${layout_c}, ${align_c}, + ${element_d}, ${layout_d}, ${align_d}, + EpilogueScheduleType, + ${operation_name}_epilogue_functor + >::CollectiveOp; + + using ${operation_name}_mainloop = + typename cutlass::gemm::collective::CollectiveBuilder< + ${arch}, ${opcode_class}, + ${element_a}, ${layout_a}, ${align_a}, + ${element_b}, ${layout_b}, ${align_b}, + ${element_accumulator}, + cute::Shape, + cute::Shape, + ${stages}, + ${kernel_schedule} + >::CollectiveOp; + + // Gemm operator ${operation_name} + using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< + cute::Shape, + ${operation_name}_mainloop, + ${operation_name}_epilogue, + ${tile_scheduler}>; + + // Define named type + struct ${operation_name} : + public ${operation_name}_base { }; + + """ + + # + def instance_template(self): + return """ + ${compile_guard_start} + using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>; + manifest.append( + new ${gemm_kind}("${operation_name}")); + ${compile_guard_end} + """ + + # + def emit(self, operation): + tile_shape = operation.tile_description.tile_shape + warp_count = operation.tile_description.warp_count + # stage count set to zero indicates builder automatic stage selection + if operation.tile_description.stages > 0: + stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>" + else: + stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout" # noqa: B950 + warp_shape = [tile_shape[idx] // warp_count[idx] for idx in range(3)] + + ( + instance_layout_A, + instance_layout_B, + instance_layout_C, + instance_layout_D, + ) = ( + operation.A.layout, + operation.B.layout, + operation.C.layout, + operation.D.layout, + ) + + # 3.0 profiler integration only supports trivial epilogues for now + epilogue_vector_length = 1 + + # Support built-in epilogue functors or user-defined functions + if isinstance(operation.epilogue_functor, enum.Enum): + values = { + "epilogue_vector_length": str(epilogue_vector_length), + "element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined] + "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], # type: ignore[name-defined] + } + epilogue_functor = SubstituteTemplate( # type: ignore[name-defined] + self.builtin_epilogue_functor_template, values + ) + + elif callable(operation.epilogue_functor): + epilogue_functor = operation.epilogue_functor( + operation.procedural_name() + "_epilogue_functor" + ) + else: + epilogue_functor = str(operation.epilogue_functor) + # + + values = { + "operation_name": operation.procedural_name(), + "operation_suffix": self.operation_suffix, + "element_a": DataTypeTag[operation.A.element], # type: ignore[name-defined] + "layout_a": LayoutTag[instance_layout_A], # type: ignore[name-defined] + "element_b": DataTypeTag[operation.B.element], # type: ignore[name-defined] + "layout_b": LayoutTag[instance_layout_B], # type: ignore[name-defined] + "element_c": DataTypeTag[operation.C.element], # type: ignore[name-defined] + "layout_c": LayoutTag[instance_layout_C], # type: ignore[name-defined] + "element_d": DataTypeTag[operation.D.element], # type: ignore[name-defined] + "layout_d": LayoutTag[instance_layout_D], # type: ignore[name-defined] + "element_accumulator": DataTypeTag[operation.accumulator_type()], # type: ignore[name-defined] + "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], # type: ignore[name-defined] # noqa: B950 + "arch": "cutlass::arch::Sm%d" % operation.arch, + "tile_shape_m": str(operation.tile_description.tile_shape[0]), + "tile_shape_n": str(operation.tile_description.tile_shape[1]), + "tile_shape_k": str(operation.tile_description.tile_shape[2]), + "cluster_m": str(operation.tile_description.cluster_shape[0]), + "cluster_n": str(operation.tile_description.cluster_shape[1]), + "cluster_k": str(operation.tile_description.cluster_shape[2]), + "warp_shape_m": str(warp_shape[0]), + "warp_shape_n": str(warp_shape[1]), + "warp_shape_k": str(warp_shape[2]), + "instruction_shape_m": str( + operation.tile_description.math_instruction.instruction_shape[0] + ), + "instruction_shape_n": str( + operation.tile_description.math_instruction.instruction_shape[1] + ), + "instruction_shape_k": str( + operation.tile_description.math_instruction.instruction_shape[2] + ), + "kernel_schedule": str(KernelScheduleTag[operation.kernel_schedule]), # type: ignore[name-defined] + "epilogue_schedule": str(EpilogueScheduleTag[operation.epilogue_schedule]), # type: ignore[name-defined] + "epilogue_functor": epilogue_functor, + "stages": stage_count_string, + "align_a": str(operation.A.alignment), + "align_b": str(operation.B.alignment), + "align_c": str(operation.C.alignment), + "align_d": str(operation.C.alignment), + "transform_a": ComplexTransformTag[operation.A.complex_transform], # type: ignore[name-defined] + "transform_b": ComplexTransformTag[operation.B.complex_transform], # type: ignore[name-defined] + "math_operation": MathOperationTag[ # type: ignore[name-defined] + operation.tile_description.math_instruction.math_operation + ], + "epilogue_vector_length": str(epilogue_vector_length), + "element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined] + "tile_scheduler": str(TileSchedulerTag[operation.tile_scheduler]), # type: ignore[name-defined] + } + + return SubstituteTemplate(self.gemm_template, values) # type: ignore[name-defined] diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..203eaef35d3ac024c9c9cfc316a88d24b13107d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py @@ -0,0 +1,258 @@ +import functools +import logging +import os +import sys +from dataclasses import dataclass +from typing import Any, List, Optional + +import sympy + +import torch + +from ...codecache import cache_dir +from ...config import cuda as inductor_cuda_config +from ...ir import Layout +from .cuda_env import get_cuda_arch, get_cuda_version + +log = logging.getLogger(__name__) + + +def _rename_cutlass_import(content: str, cutlass_modules: List[str]) -> str: + for cutlass_module in cutlass_modules: + content = content.replace( + f"from {cutlass_module} import ", + f"from cutlass_library.{cutlass_module} import ", + ) + return content + + +def _gen_cutlass_file( + file_name: str, cutlass_modules: List[str], src_dir: str, dst_dir: str +) -> None: + orig_full_path = os.path.abspath(os.path.join(src_dir, file_name)) + text = "" + with open(orig_full_path) as f: + text = f.read() + text = _rename_cutlass_import(text, cutlass_modules) + dst_full_path = os.path.abspath( + os.path.join( + dst_dir, + file_name, + ) + ) + with open(dst_full_path, "w") as f: + f.write(text) + + +@functools.lru_cache(None) +def try_import_cutlass() -> bool: + # Copy CUTLASS python scripts to a temp dir and add the temp dir to Python search path. + # This is a temporary hack to avoid CUTLASS module naming conflicts. + # TODO(ipiszy): remove this hack when CUTLASS solves Python scripts packaging structure issues. + + cutlass_py_full_path = os.path.abspath( + os.path.join(inductor_cuda_config.cutlass_dir, "python/cutlass_library") + ) + tmp_cutlass_py_full_path = os.path.abspath( + os.path.join(cache_dir(), "torch_cutlass_library") + ) + dst_link = os.path.join(tmp_cutlass_py_full_path, "cutlass_library") + + if os.path.isdir(cutlass_py_full_path): + if tmp_cutlass_py_full_path not in sys.path: + if os.path.exists(dst_link): + assert os.path.islink( + dst_link + ), f"{dst_link} is not a symlink. Try to remove {dst_link} manually and try again." + assert os.path.realpath(os.readlink(dst_link)) == os.path.realpath( + cutlass_py_full_path + ), f"Symlink at {dst_link} does not point to {cutlass_py_full_path}" + else: + os.makedirs(tmp_cutlass_py_full_path, exist_ok=True) + os.symlink(cutlass_py_full_path, dst_link) + sys.path.append(tmp_cutlass_py_full_path) + try: + import cutlass_library.generator # noqa: F401 + import cutlass_library.library # noqa: F401 + import cutlass_library.manifest # noqa: F401 + + return True + + except ImportError as e: + log.debug( + "Failed to import CUTLASS packages: %s, ignoring the CUTLASS backend.", + str(e), + ) + else: + log.debug( + "Failed to import CUTLASS packages: CUTLASS repo does not exist: %s", + cutlass_py_full_path, + ) + return False + + +def _normalize_cuda_arch(arch: str) -> str: + if int(arch) >= 90: + return "90" + elif int(arch) >= 80: + return "80" + elif int(arch) >= 75: + return "75" + elif int(arch) >= 70: + return "70" + else: + raise NotImplementedError(f"Unsupported cuda arch: {arch}") + + +@dataclass +class CUTLASSArgs: + """ + CUTLASS args used to initialize a CUTLASS Manifest. + """ + + architectures: Optional[str] = None + cuda_version: Optional[str] = None + + operations = "all" + build_dir = "" + curr_build_dir = "" + generator_target = "" + kernels = "all" + ignore_kernels = "" + # TODO: these three look dead? + kernel_filter_file: None = None + selected_kernel_list: None = None + interface_dir: None = None + filter_by_cc = True + disable_full_archs_compilation = False + + def __post_init__(self): + if self.architectures is None or self.cuda_version is None: + raise RuntimeError( + f"{self.architectures=} or {self.cuda_version=} is None!" + ) + self.architectures = _normalize_cuda_arch(self.architectures) + + +@functools.lru_cache(None) +def _gen_ops_cached(arch, version) -> List[Any]: + # Note: Cache needs to be specific for cuda architecture and version + + # Import cutlass python scripts. + assert try_import_cutlass() + import cutlass_library.generator as cutlass_generator + import cutlass_library.manifest as cutlass_manifest + + if arch is None or version is None: + log.error( + "Cannot detect cuda arch %s or cuda version %s. " + "Will discard all cutlass ops. " + "Please consider setting _inductor.cuda.arch and _inductor.cuda.version configs.", + arch, + version, + ) + return list() + arch = _normalize_cuda_arch(arch) + args = CUTLASSArgs(architectures=arch, cuda_version=version) + manifest = cutlass_manifest.Manifest(args) + + if arch == "90": + cutlass_generator.GenerateSM90(manifest, args.cuda_version) + cutlass_generator.GenerateSM80(manifest, args.cuda_version) + else: + try: + func = getattr(cutlass_generator, "GenerateSM" + arch) + func(manifest, args.cuda_version) + except AttributeError as e: + raise NotImplementedError( + "Arch " + arch + " is not supported by current cutlass lib." + ) from e + return manifest.operations + + +def gen_ops() -> List[Any]: + """ + Generates all supported CUTLASS operations. + """ + arch = get_cuda_arch() + version = get_cuda_version() + return _gen_ops_cached(arch, version) + + +def dtype_match( + torch_dtype: Optional[torch.dtype], + cutlass_dtype: "cutlass_library.library.DataType", # type: ignore[name-defined] # noqa: F821 +) -> bool: + # Import cutlass python scripts. + assert try_import_cutlass() + import cutlass_library + + if torch_dtype == torch.float: + return ( + cutlass_dtype == cutlass_library.library.DataType.f32 + or cutlass_dtype == cutlass_library.library.DataType.tf32 + ) + elif torch_dtype == torch.half: + return cutlass_dtype == cutlass_library.library.DataType.f16 + elif torch_dtype == torch.bfloat16: + return cutlass_dtype == cutlass_library.library.DataType.bf16 + else: + return False + + +def get_accumulator_dtype( + input_torch_dtypes: List[torch.dtype], +) -> Optional[torch.dtype]: + """ + Given a list of input torch dtypes, returns the inferred accumulator torch dtype. + """ + + if len(input_torch_dtypes) == 0: + return None + torch_dtype = input_torch_dtypes[0] + for dtype in input_torch_dtypes[1:]: + if torch_dtype != dtype: + raise RuntimeError(f"Unmatched input dtypes: {torch_dtype=}, {dtype=}") + if torch_dtype == torch.half: + if torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction: + return torch_dtype + else: + return torch.float + if torch_dtype in {torch.bfloat16, torch.float}: + return torch.float + raise NotImplementedError(f"Unsupported data type: {input_torch_dtypes=}") + + +def get_alignments(torch_dtype: torch.dtype) -> List[int]: + """ + Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype. + CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment. + """ + + if torch_dtype in (torch.half, torch.bfloat16): + return [8, 4, 2, 1] + elif torch_dtype == torch.float: + return [4, 2, 1] + else: + raise NotImplementedError(f"unsupported {torch_dtype=} for alignments") + + +def get_max_alignment(inductor_layout: Layout) -> int: + """ + Returns the max alignment (in terms of number of elements) for a given Inductor Layout. + """ + + dtype = inductor_layout.dtype + size = inductor_layout.size + offset = inductor_layout.offset + + def is_static_int(number): + return isinstance(number, (int, sympy.Integer)) + + if is_static_int(size[-1]) and is_static_int(offset): + alignments = get_alignments(dtype) + for alignment in alignments: + if int(size[-1]) % alignment == 0 and int(offset) % alignment == 0: + return alignment + + return 1 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/device_op_overrides.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/device_op_overrides.py new file mode 100644 index 0000000000000000000000000000000000000000..93a8c08b6a0f25a05f4c194abcef91205f4fc748 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/device_op_overrides.py @@ -0,0 +1,18 @@ +from ..common import DeviceOpOverrides, register_device_op_overrides + + +class CUDADeviceOpOverrides(DeviceOpOverrides): + def import_get_raw_stream_as(self, name): + return f"from torch._C import _cuda_getCurrentRawStream as {name}" + + def set_device(self, device_idx): + return f"torch.cuda.set_device({device_idx})" + + def synchronize(self): + return "torch.cuda.synchronize()" + + def device_guard(self, device_idx): + return f"torch.cuda._DeviceGuard({device_idx})" + + +register_device_op_overrides("cuda", CUDADeviceOpOverrides()) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py new file mode 100644 index 0000000000000000000000000000000000000000..9e76b5291c59e9698ad3c9fdaa2480abd0332f21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py @@ -0,0 +1,706 @@ +import copy +import logging +import re +from typing import cast, Dict, List, Optional, Tuple + +from ...config import cuda as inductor_cuda_config +from ...ir import Buffer, CUDATemplateBuffer, FixedLayout, IRNode, Layout +from ..common import IndentedBuffer + +from . import cutlass_utils +from .cuda_kernel import CUDATemplateKernel +from .cuda_template import CUTLASSTemplate +from .cutlass_epilogue_gen import ( + CutlassEVTEpilogueArgumentFormatter, + CutlassEVTEpilogueTypeFormatter, +) + +log = logging.getLogger(__name__) + +GEMM_TEMPLATE = r""" +{{template.header().getvalue()}} +{{template.globals().getvalue()}} +{{instance_definition}} +// When workspace_size is not a nullptr, populates requested workspace_size and returns. +// Otherwise, computes the Gemm kernel using the given workspace ptr. +extern "C" { +{{kernel.def_kernel(inputs=[X, W, Bias], outputs=[Y], names_str="X, W, Bias, Y", input_reorder=input_reorder)}} { + try { + {{kernel.check_not_null(X)}} + {{kernel.check_not_null(W)}} + {{kernel.check_not_null(Bias)}} + {{kernel.check_not_null(Y)}} + int64_t B = {{kernel.size(Y, 0, -3, default_value=1)}}; + int64_t M = {{kernel.size(X, -2)}}; + int64_t K = {{kernel.size(X, -1)}}; + int64_t N = {{kernel.size(W, -1)}}; + using ElementComputeEpilogue = {{instance_type}}::ElementAccumulator; + using coord_t = cutlass::gemm::GemmCoord::Index; + {{instance_type}}::Arguments arguments; + {{template.render_gemm_arguments(argument_template, epilogue_template, should_swap_xw, + X, W, Bias, Y, alpha, beta, kernel, epilogue_args)}} + {{instance_type}} gemm_op; + if (workspace_size) { + *workspace_size = gemm_op.get_workspace_size(arguments); + return 0; + } + { + auto status = gemm_op.can_implement(arguments); + CUTLASS_CHECK(status); + } + { + auto status = gemm_op.initialize(arguments, workspace, stream); + CUTLASS_CHECK(status); + } + { + auto status = gemm_op(stream); + CUTLASS_CHECK(status); + } + } + catch (std::exception& e) { + std::cerr << "Runtime error: " << e.what() << std::endl; + return -1; + } + catch (...) { + return -1; + } + return 0; +} +} +""" + + +GEMM_ARGS_CUTLASS_2X = r""" + int64_t batch_stride_x = {{kernel.stride(X, -3)}}; + int64_t row_stride_x = {{kernel.row_or_column_stride(X)}}; + int64_t batch_stride_w = {{kernel.stride(W, -3)}}; + int64_t row_stride_w = {{kernel.row_or_column_stride(W)}}; + int64_t batch_stride_bias = {{kernel.stride(Bias, -3)}}; + int64_t row_stride_bias = {{kernel.row_or_column_stride(Bias)}}; + int64_t batch_stride_y = {{kernel.stride(Y, -3)}}; + int64_t row_stride_y = {{kernel.row_or_column_stride(Y)}}; + // Initialize GemmUniversalInstance arguments. + arguments = { + {{template.gemm_mode()}}, // GemmUniversalMode mode + { + static_cast(M), + static_cast(N), + static_cast(K) + }, // GemmCoord problem_size + {{split_k if split_k > 1 else 'B'}}, // int batch_count + {ElementComputeEpilogue({{alpha}}), ElementComputeEpilogue({{beta}})}, // typename EpilogueOutputOp::Params epilogue + {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // void const * ptr_A + {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // void const * ptr_B + {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // void const * ptr_C + {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // void * ptr_D + batch_stride_x, // int64_t batch_stride_A + batch_stride_w, // int64_t batch_stride_B + batch_stride_bias, // int64_t batch_stride_C + batch_stride_y, // int64_t batch_stride_D + row_stride_x, // typename LayoutA::Stride::LongIndex lda + row_stride_w, // typename LayoutB::Stride::LongIndex ldb + row_stride_bias, // typename LayoutC::Stride::LongIndex ldc + row_stride_y, // typename LayoutC::Stride::LongIndex ldd + }; +""" + + +GEMM_ARGS_CUTLASS_3X = r""" + // Initialize GemmUniversal3xInstance arguments. + arguments = { + {{template.gemm_mode()}}, // GemmUniversalMode mode + { + static_cast({{M}}), + static_cast({{N}}), + static_cast(K), + static_cast(B) + }, // ProblemShape problem_shape + { + {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // ElementA const* ptr_A + { + {{template.cute_int(kernel.stride(X, -2), "stride_x0")}}, + {{template.cute_int(kernel.stride(X, -1), "stride_x1")}}, + {{template.cute_int(kernel.stride(X, -3), "batch_stride_x")}} + }, // StrideA dA + {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // ElementB const* ptr_B + { + {{template.cute_int(kernel.stride(W, -1), "stride_w1")}}, + {{template.cute_int(kernel.stride(W, -2), "stride_w0")}}, + {{template.cute_int(kernel.stride(W, -3), "batch_stride_w")}} + }, // StrideB dB + }, // MainloopArguments mainloop + {{epilogue_arguments}} + }; +""" + +GEMM_ARGS_CUTLASS_3X_EPILOGUE = r""" + // see https://tinyurl.com/4rk89z48 + { + {{epilogue_args}}, // thread, typename FusionCallbacks::Arguments ( EVT ) or ThreadEpilogueOp::Params (non-EVT ) + {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // ElementC const* ptr_C + { + {{template.cute_int(kernel.stride(Bias, -2, 1), "stride_bias0")}}, + {{template.cute_int(kernel.stride(Bias, -1, 1), "stride_bias1")}}, + {{template.cute_int(kernel.stride(Bias, -3), "batch_stride_bias")}} + }, // StrideC dC + {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // ElementD const* ptr_D + { + {{template.cute_int(kernel.stride(Y, -2), "stride_y0")}}, + {{template.cute_int(kernel.stride(Y, -1), "stride_y1")}}, + {{template.cute_int(kernel.stride(Y, -3), "batch_stride_y")}} + }, // StrideD dD + }, // EpilogueArguments epilogue +""" + + +class CUTLASSGemmTemplate(CUTLASSTemplate): + """ + CUTLASS GEMM template, which is used to generate CUTLASS GEMM kernels + including those which allow flexible fusions with epilogues. + """ + + def __init__( + self, + input_nodes: List[Buffer], + layout: Layout, + alpha: float, + beta: float, + input_reorder: Optional[List[int]] = None, + can_fuse_epilogue: Optional[bool] = None, + ): + """ + Args: + input_nodes: input nodes of the kernel + layout: layout of the output node + alpha: alpha value of the GEMM operation + beta: beta value of the GEMM operation + input_reorder: reorder of the input nodes + can_fuse_epilogue: If set to True, will only list and use operators capable of flexible epilogue fusions. + If False, it will not use those. If None, both may be listed, but it will not allow fusions. + Defaults to None + """ + super().__init__("cutlass_gemm", input_nodes, layout, input_reorder) + self.alpha = alpha + self.beta = beta + self.can_fuse_epilogue = can_fuse_epilogue + + @staticmethod + def add_cutlass_gemm_choices( + choices, + layout, + input_nodes, + alpha=1, + beta=0, + input_reorder=None, + fuseable=True, + non_fuseable=True, + ): + if non_fuseable: + if fuseable: + # list both fuseable and non-fuseable ops, and treat them all as non-fuseable + can_fuse_epilogue = False + else: + can_fuse_epilogue = None + + cutlass_template = CUTLASSGemmTemplate( + input_nodes, + layout, + alpha=alpha, + beta=beta, + input_reorder=input_reorder, + can_fuse_epilogue=can_fuse_epilogue, + ) + ops = cutlass_template.gen_ops() + for op in ops: + cutlass_template.maybe_append_choice( + choices, + op=op, + ) + else: + ops = [] + if fuseable: + cutlass_template_evt = CUTLASSGemmTemplate( + input_nodes, + layout, + alpha=alpha, + beta=beta, + input_reorder=input_reorder, + can_fuse_epilogue=True, + ) + # This will list only ops capable of EVT fusion + ops_evt = cutlass_template_evt.gen_ops() + for op in ops_evt: + cutlass_template_evt.maybe_append_choice( + choices, + op=op, + ) + else: + ops_evt = [] + log.debug( + "Added %d cutlass gemm configs and %d fuseable gemm configs.", + len(ops), + len(ops_evt), + ) + + def header(self) -> IndentedBuffer: + res = super().header() + res.splice( + """ + #include "cutlass/gemm/gemm.h" + #include "cutlass/gemm/device/gemm_universal.h" + #include "cutlass/gemm/device/gemm_universal_adapter.h" + #include "cutlass/gemm/kernel/gemm_universal.hpp" + #include "cutlass/gemm/collective/collective_builder.hpp" + #include "cutlass/epilogue/collective/collective_builder.hpp" + #include "cutlass/epilogue/collective/default_epilogue.hpp" + #include "cutlass/epilogue/thread/linear_combination.h" + #include "cutlass/gemm/dispatch_policy.hpp" + #include "cutlass/gemm/kernel/tile_scheduler.hpp" + #include "cutlass/util/distribution.h" + #include "cutlass/util/packed_stride.hpp" + #include "cutlass/util/tensor_view_io.h" + """ + ) + return res + + @staticmethod + def cutlass_layout(torch_layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined] # noqa: F821 + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + if torch_layout.stride[-1] == 1: + return cutlass_lib.LayoutType.RowMajor + elif torch_layout.stride[-2] == 1: + return cutlass_lib.LayoutType.ColumnMajor + else: + return None + + @staticmethod + def flip_cutlass_layout( + cutlass_layout: "cutlass_lib.LayoutType", # type: ignore[name-defined] # noqa: F821 + ) -> "cutlass_lib.LayoutType": # type: ignore[name-defined] # noqa: F821 + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + if cutlass_layout == cutlass_lib.LayoutType.RowMajor: + return cutlass_lib.LayoutType.ColumnMajor + else: + return cutlass_lib.LayoutType.RowMajor + + @staticmethod + def layout_match(torch_layout, cutlass_layout) -> bool: + return CUTLASSGemmTemplate.cutlass_layout(torch_layout) == cutlass_layout + + @staticmethod + def set_alignment(torch_layout, op_element) -> bool: + alignment = cutlass_utils.get_max_alignment(torch_layout) + if alignment < op_element.alignment: + return False + else: + op_element.alignment = alignment + return True + + @staticmethod + def has_tma_epilogue(op) -> bool: + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + result = False + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + epilogue_schedule_str = str(op.epilogue_schedule).split(".")[-1] + result = epilogue_schedule_str.lower().startswith("tma") + return result + + @staticmethod + def supports_evt(op: "cutlass_library.gemm_op.GemmOperation") -> bool: # type: ignore[name-defined] # noqa: F821 + """ + returns True if the op is capable of flexible epilogue fusions + using epilogue visitor trees. + + See https://github.com/NVIDIA/cutlass/blob/e01b9b5029b7caca5a43c29f7d2714d7cf1dcae8/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L283-L285 # noqa: B950 + """ + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + if op.gemm_kind != cutlass_lib.GemmKind.Universal3x: + return False + if op.epilogue_schedule not in ( + cutlass_lib.EpilogueScheduleType.TmaWarpSpecialized, + cutlass_lib.EpilogueScheduleType.TmaWarpSpecializedCooperative, + ): + return False + + return True + + def render_evt_epilogue_declaration( + self, + template_output_node_name: str, + evt_type_name: str, + epilogue_nodes: List[IRNode], + ) -> str: + """Generates the epilogue for the EVT epilogue fusion""" + return CutlassEVTEpilogueTypeFormatter.ir_to_evt_string( + template_output_node_name, evt_type_name, epilogue_nodes + ) + + def define_gemm_instance( + self, + op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821 + output_buffer_name: str, + epilogue_nodes: Optional[List[IRNode]] = None, + ) -> Tuple[str, str]: + assert cutlass_utils.try_import_cutlass() + import cutlass_library.gemm_operation as cutlass_gemm_op + import cutlass_library.library as cutlass_lib + + from torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions import ( + EmitGemmUniversal3xInstanceWithEVT, + ) + + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + emitter = EmitGemmUniversal3xInstanceWithEVT() + op.epilogue_functor = lambda epilogue_functor_type_name: self.render_evt_epilogue_declaration( + output_buffer_name, epilogue_functor_type_name, epilogue_nodes + ) + else: + emitter = cutlass_gemm_op.EmitGemmUniversal3xInstance() + op_def = emitter.emit(op) + pattern = re.compile(r"\s*struct\s(.*?)\s:") + decl = [line for line in op_def.split("\n") if "struct " in line][-1] + else: + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + raise RuntimeError( + "EVT epilogue fusion is not supported for Cutlass 2.x ops." + ) + emitter = cutlass_gemm_op.EmitGemmInstance() + op_def = emitter.emit(op) + op_def = op_def.replace( + "cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal" + ) + op_def = op_def.replace("false,", "") + pattern = re.compile(r"\s*using\s(.*?)\s=") + decl = op_def.split("\n")[2] + match = pattern.match(decl) + if match is None: + raise RuntimeError("Invalid Gemm config: \n" + op_def) + op_type = match.groups()[0] + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + op_def += f"\n using {op_type}_device_type = cutlass::gemm::device::GemmUniversalAdapter<{op_type}>;\n" + op_type = f"{op_type}_device_type" + return op_def, op_type + + @staticmethod + def should_swap_XW( + bias: IRNode, + beta: float, + ) -> bool: + return True + + # TODO(ipiszy): Check whether it's necessary to swap X/W. + # strides = bias.get_stride() + # if strides[-1] != 1: + # return True + # for stride in strides[:-1]: + # if stride != 0: + # return True + # return False + + @staticmethod + def swap_XW( + op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821 + ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined] # noqa: F821 + # Swap X and W in GemmOperation. + new_op = copy.deepcopy(op) + new_op.A.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.A.layout) + new_op.B.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.B.layout) + new_op.A, new_op.B = new_op.B, new_op.A + new_op.C.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.C.layout) + new_op.D.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.D.layout) + return new_op + + def filter_op( + self, + op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821 + ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined] # noqa: F821 + assert cutlass_utils.try_import_cutlass() + import cutlass_library.library as cutlass_lib + + # Skip simt kernels + if ( + op.tile_description.math_instruction.opcode_class + == cutlass_lib.OpcodeClass.Simt + ): + return None + + # Only keep GemmUniversal kernels + if op.gemm_kind not in { + cutlass_lib.GemmKind.Universal, + cutlass_lib.GemmKind.Universal3x, + }: + return None + # Filter ops by dtypes. + X = self.input_nodes[0] + W = self.input_nodes[1] + accumulator_torch_dtype = cutlass_utils.get_accumulator_dtype( + [X.get_dtype(), W.get_dtype()], + ) + if not ( + cutlass_utils.dtype_match(X.get_dtype(), op.A.element) + and cutlass_utils.dtype_match(W.get_dtype(), op.B.element) + and cutlass_utils.dtype_match( + self.output_node.get_layout().dtype, op.C.element + ) + and cutlass_utils.dtype_match( + accumulator_torch_dtype, op.accumulator_type() + ) + ): + return None + + # Filter ops by input layouts. + if not ( + self.layout_match(X.get_layout(), op.A.layout) + and self.layout_match(W.get_layout(), op.B.layout) + ): + return None + + # Update op. + op = copy.deepcopy(op) + + # Set output layout. + op.D.layout = CUTLASSGemmTemplate.cutlass_layout(self.output_node.get_layout()) + + # Filter ops by alignments and set alignments. + if not ( + self.set_alignment(X.get_layout(), op.A) + and self.set_alignment(W.get_layout(), op.B) + and self.set_alignment(self.output_node.get_layout(), op.D) + ): + return None + + # Set epilogue. + # TODO: update epilogue functor according to epilogues. + op.element_epilogue = op.accumulator_type() + + # Set bias layout and alignment. + if len(self.input_nodes) >= 3 and self.input_nodes[2] is not None: + Bias = self.input_nodes[2] + bias_layout = CUTLASSGemmTemplate.cutlass_layout(Bias.get_layout()) + if op.gemm_kind != cutlass_lib.GemmKind.Universal3x: + if bias_layout != op.D.layout: + # For cutlass2, bias and output layout must match + return None + else: + op.C.layout = bias_layout + if not self.set_alignment(Bias.get_layout(), op.C): + return None + else: + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + op.C.element = cutlass_lib.DataType.void + else: + op.C.layout = op.D.layout + supports_evt: bool = self.supports_evt(op) + if (self.can_fuse_epilogue is not None) and ( + self.can_fuse_epilogue != supports_evt + ): + return None + if inductor_cuda_config.cutlass_only_evt_capable_ops and not supports_evt: + return None + return op + + def gen_ops(self) -> "List[cutlass_gemm_op.GemmOperation]": # type: ignore[name-defined] # noqa: F821 + assert cutlass_utils.try_import_cutlass() + import cutlass_library.gemm_operation as cutlass_gemm_op + import cutlass_library.library as cutlass_lib + + ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm] + res: Dict[str, cutlass_gemm_op.GemmOperation] = dict() + num_3x_ops = 0 + num_2x_ops = 0 + for op_dict in ops.values(): + for op_list in op_dict.values(): + for op in op_list: + assert isinstance(op, cutlass_gemm_op.GemmOperation) + filter_res = self.filter_op(op) + if ( + filter_res is not None + and res.get(filter_res.configuration_name(), None) is None + ): + res[filter_res.configuration_name()] = filter_res + for op in res.values(): + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + num_3x_ops += 1 + else: + num_2x_ops += 1 + log.debug( + "Got cutlass configs: total number of ops: %d, " + "total number of 3x ops: %d, total number of 2x ops: %d", + len(res), + num_3x_ops, + num_2x_ops, + ) + return list(res.values())[: inductor_cuda_config.cutlass_max_profiling_configs] + + def gemm_mode(self) -> str: + sizes = self.output_node.get_size() + if len(sizes) > 2: + return "cutlass::gemm::GemmUniversalMode::kBatched" + else: + return "cutlass::gemm::GemmUniversalMode::kGemm" + + def render_gemm_arguments( + self, + argument_template: str, + epilogue_template: str, + should_swap_xw: bool, + X: IRNode, + W: IRNode, + Bias: IRNode, + Y: IRNode, + alpha: float, + beta: float, + kernel: CUDATemplateKernel, + epilogue_args, + ) -> str: + options = dict( + alpha=self.alpha, + beta=self.beta, + X=X, + W=W, + Y=Y, + Bias=Bias, + template=self, + kernel=kernel, + M="M", + N="N", + epilogue_args=epilogue_args, + ) + + if epilogue_template is not None: + if should_swap_xw: + # Swap + def clone_with_transposed_stride(node: IRNode) -> IRNode: + old_layout = node.get_layout() + new_stride = list(old_layout.stride) + new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2] + new_layout = FixedLayout( + old_layout.device, + old_layout.dtype, + list(old_layout.size), + new_stride, + old_layout.offset, + ) + return Buffer(node.get_name(), new_layout) + + new_X = clone_with_transposed_stride(X) + new_W = clone_with_transposed_stride(W) + new_Bias = clone_with_transposed_stride(Bias) + new_Y = clone_with_transposed_stride(Y) + options["X"], options["W"], options["Bias"], options["Y"] = ( + new_W, + new_X, + new_Bias, + new_Y, + ) + options["M"], options["N"] = "N", "M" + + epilogue_arguments = self._template_from_string(epilogue_template).render( + **options + ) + arguments = self._template_from_string(argument_template).render( + epilogue_arguments=epilogue_arguments, **options + ) + else: + arguments = self._template_from_string(GEMM_ARGS_CUTLASS_2X).render( + split_k=1, **options + ) + return arguments + + def render( # type: ignore[override] + self, + kernel: CUDATemplateKernel, + op: "cutlass_gemm_op.GemmOperation" = None, # type: ignore[name-defined] # noqa: F821 + template_buffer_node: Optional[CUDATemplateBuffer] = None, + epilogue_nodes: Optional[List[IRNode]] = None, + **kwargs, + ) -> str: + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + assert self.can_fuse_epilogue and CUTLASSGemmTemplate.supports_evt( + op + ), "op does not support EVT epilogue fusion" + assert ( + template_buffer_node is not None + ), "Template node is required for epilogue fusion" + assert isinstance( + template_buffer_node, CUDATemplateBuffer + ), f"Template node has to be a CUDATemplateBuffer, is type {type(template_buffer_node)}" + assert ( + template_buffer_node.name is not None + ), "Output node has to be a Buffer with a name" + # This is the name of the output of the Matmul, before epilogues are applied. + # it is not necessarily materialized in global memory if we have an epilogue + + template_output_node_name = ( + template_buffer_node.name if template_buffer_node is not None else None + ) + + assert cutlass_utils.try_import_cutlass() + import cutlass_library.gemm_operation as cutlass_gemm_op + import cutlass_library.library as cutlass_lib + + assert isinstance( + op, cutlass_gemm_op.GemmOperation + ), "op argument is required and has to be an instance of GemmOperation" + if template_buffer_node is not None: + self.output_node = template_buffer_node + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + self.output_node = cast(Buffer, epilogue_nodes[-1]) + + assert len(self.input_nodes) >= 2 and self.output_node is not None + X, W = self.input_nodes[0], self.input_nodes[1] + Y = self.output_node + Bias = None if len(self.input_nodes) == 2 else self.input_nodes[2] + + epilogue_template: Optional[str] = None + should_swap_xw: bool = False + epilogue_args = f"{{ElementComputeEpilogue({self.alpha}), ElementComputeEpilogue({self.beta})}}" + if op.gemm_kind == cutlass_lib.GemmKind.Universal3x: + if Bias is not None and self.has_tma_epilogue(op): + if self.should_swap_XW(Bias, self.beta): + # TMA epilogue requires bias vector in column major to get best perf. + op = self.swap_XW(op) + should_swap_xw = True + if epilogue_nodes is not None and len(epilogue_nodes) > 0: + epilogue_args = ( + CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string( + cast(str, template_output_node_name), epilogue_nodes + ) + ) + epilogue_template = GEMM_ARGS_CUTLASS_3X_EPILOGUE + argument_template = GEMM_ARGS_CUTLASS_3X + else: + # TODO: Support split_k. + argument_template = GEMM_ARGS_CUTLASS_2X + + instance_definition, instance_type = self.define_gemm_instance( + op, cast(str, template_output_node_name), epilogue_nodes + ) + options = dict( + alpha=self.alpha, + beta=self.beta, + X=X, + W=W, + Y=Y, + Bias=Bias, + epilogue_template=epilogue_template, + argument_template=argument_template, + should_swap_xw=should_swap_xw, + template=self, + kernel=kernel, + instance_definition=instance_definition, + instance_type=instance_type, + input_reorder=self.input_reorder, + epilogue_args=epilogue_args, + ) + res = self._template_from_string(GEMM_TEMPLATE).render(**options) + return res diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda_combined_scheduling.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda_combined_scheduling.py new file mode 100644 index 0000000000000000000000000000000000000000..f82e01daad63c818b10f30cf0876c669359989c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/cuda_combined_scheduling.py @@ -0,0 +1,75 @@ +from typing import List + +from ..scheduler import BaseSchedulerNode, BaseScheduling, Scheduler, SchedulerNode +from .cuda.cuda_cpp_scheduling import CUDACPPScheduling + +from .triton import TritonScheduling + + +class CUDACombinedScheduling(BaseScheduling): + """ + Scheduler for CUDA Kernels, which delegates calls as appropriate + to the CUDA-C++ and Triton Schedulers, which both work for CUDA devices + and use a unified-wrapper for codegen. + + If Scheduling code needs to be specialized for the case of mixed Triton / CUDA C++ code, + this would also be the place to do it. + """ + + def __init__(self, scheduler: Scheduler): + super().__init__() + self._scheduler = scheduler + self._triton_scheduling = TritonScheduling(scheduler) + self._cuda_cpp_scheduling = CUDACPPScheduling(scheduler) + + def choose_node_backend(self, node: BaseSchedulerNode) -> BaseScheduling: + if self._cuda_cpp_scheduling.is_cuda_cpp_template( + node + ) or self._cuda_cpp_scheduling.is_cuda_cpp_fused_template(node): + return self._cuda_cpp_scheduling + return self._triton_scheduling + + def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode): + if self._cuda_cpp_scheduling.can_fuse_vertical(node1, node2): + return True + return self._triton_scheduling.can_fuse_vertical(node1, node2) + + def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode): + for node in (node1, node2): + if self._cuda_cpp_scheduling.is_cuda_cpp_template( + node + ) or self._cuda_cpp_scheduling.is_cuda_cpp_fused_template(node): + return self._cuda_cpp_scheduling.can_fuse_horizontal( + node1, node2 + ) # always False at the moment + return self._triton_scheduling.can_fuse_horizontal(node1, node2) + + def group_fn(self, sizes): + return self._triton_scheduling.group_fn(sizes) + + def codegen_template( + self, template_node: SchedulerNode, epilogue_nodes: List[SchedulerNode] + ): + if self._cuda_cpp_scheduling.is_cuda_cpp_template(template_node): + return self._cuda_cpp_scheduling.codegen_template( + template_node, epilogue_nodes + ) + else: + return self._triton_scheduling.codegen_template( + template_node, epilogue_nodes + ) + + def codegen_nodes(self, nodes: List[SchedulerNode]): + return self._triton_scheduling.codegen_nodes(nodes) + + def codegen_sync(self): + return self._triton_scheduling.codegen_sync() + + def flush(self): + return self._triton_scheduling.flush() + + def codegen_foreach(self, *args, **kwargs): + return self._triton_scheduling.codegen_foreach(*args, **kwargs) + + def benchmark_fused_nodes(self, nodes): + return self._triton_scheduling.benchmark_fused_nodes(nodes) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/memory_planning.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/memory_planning.py new file mode 100644 index 0000000000000000000000000000000000000000..d94e4723dba3828cff886148eacc5707def8ab5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/memory_planning.py @@ -0,0 +1,799 @@ +from __future__ import annotations + +import collections +import dataclasses +import itertools +import pprint +from typing import Any, Dict, Iterable, List, Optional, Protocol + +import sympy + +import torch +from .. import config, ir +from ..utils import cache_on_self, CachedMethod, IndentedBuffer +from ..virtualized import V + +from .wrapper import ( + AllocateLine, + FreeIfNotReusedLine, + MemoryPlanningLine, + NullLine, + ReuseLine, +) + + +ALIGN_BYTES = 64 +assert (ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0 and ALIGN_BYTES >= 8, "must be power of 2" + + +def _align(nbytes): + """Round up to the nearest multiple of ALIGN_BYTES""" + return (nbytes + ALIGN_BYTES - 1) & -ALIGN_BYTES + + +def _is_aligned(v: sympy.Expr): + """v can be statically proven to be a multiple of ALIGN_BYTES""" + if isinstance(v, (sympy.Add, sympy.Max)): + return all(map(_is_aligned, v.args)) + return isinstance(v, align) or sympy.gcd(v, ALIGN_BYTES) == ALIGN_BYTES + + +class align(sympy.Function): + """Symbolically round up to the nearest multiple of ALIGN_BYTES""" + + nargs = (1,) + is_integer = True + + @classmethod + def eval(cls, value): + if isinstance(value, (int, sympy.Integer)): + return _align(int(value)) + if _is_aligned(value): + return value + + +@dataclasses.dataclass +class LiveRange: + """ + A range where a given tensor is live. Begin and end are both counters + representing points in the program of grouped memory operations. + Begin is inclusive, end is exclusive. + + Invariant: begin <= end + """ + + begin: float # int | ±inf + end: float # int | ±inf + + def contains(self, other: LiveRange): + """Is other entirely within self""" + return self.begin <= other.begin and other.end <= self.end + + def join(self, other: LiveRange): + """Combine two ranges using a union operation""" + return LiveRange(min(self.begin, other.begin), max(self.end, other.end)) + + def __len__(self): + return self.end - self.begin + + +class LiveRanges: + """ + A collection of LiveRange regions, allowing for non-contiguous + live regions. + + Invariant: LiveRanges.ranges is in sorted order and non-overlapping + """ + + def __init__(self, ranges: Iterable[LiveRange]): + ranges = [*sorted(ranges, key=lambda x: x.begin)] + self.ranges = ranges[:1] + for r in ranges[1:]: + assert self.ranges[-1].begin <= r.begin + if self.ranges[-1].end >= r.begin: + self.ranges[-1] = LiveRange.join(self.ranges[-1], r) + else: + self.ranges.append(r) + + def overlaps(self, other: LiveRanges): + """Check if any pair of ranges in self and other overlap""" + left = collections.deque(self.ranges) + right = collections.deque(other.ranges) + while left and right: + if left[0].begin > right[0].begin: + left, right = right, left + assert left[0].begin <= right[0].begin + if left[0].end > right[0].begin: + return True + left.popleft() + return False + + @property + def begin(self): + return self.ranges[0].begin + + @property + def end(self): + return self.ranges[-1].end + + def __repr__(self): + return f"{self.__class__.__name__}([{', '.join(map(repr, self.ranges))}])" + + +class AllocationTreeNode: + """ + Abstract base class for nodes in allocation pool. + """ + + def allocate(self, block: Allocation, is_last: bool) -> bool: + """ + Try to assign block to a memory location in this bool. Return True if + an assignment was made. + """ + return False + + def get_live_ranges(self) -> LiveRanges: + """Aggregate LiveRanges for all objects below this in tree""" + raise NotImplementedError() + + def get_size_hint(self) -> int: + """Number of bytes used for example inputs""" + raise NotImplementedError() + + def get_symbolic_size(self) -> sympy.Expr: + """Number of bytes needed at runtime""" + raise NotImplementedError() + + def finalize(self, pool, offset) -> AllocationTreeNode: + """Called after all allocations have been made""" + return self + + def is_empty(self): + return False + + +@dataclasses.dataclass +class Allocation(AllocationTreeNode): + """ + Represents memory allocated to a given node in the allocation pool. + """ + + node: ir.Buffer + live_range: LiveRange + size_hint: int + symbolic_size: sympy.Expr + allocated: bool = False + pool: Optional[AllocationPool] = None + offset: Optional[sympy.Expr] = None + + @property + def device(self): + return self.node.get_device() + + def get_live_ranges(self): + return LiveRanges([self.live_range]) + + def get_size_hint(self): + return self.size_hint + + def get_symbolic_size(self): + return self.symbolic_size + + def mark_allocated(self): + assert not self.allocated + self.allocated = True + + def finalize(self, pool, offset): + assert self.pool is None and self.offset is None + self.pool = pool + self.offset = offset + return self + + def codegen_alloc_from_pool(self, wrapper): + assert self.pool + node = self.node + shape = tuple(node.get_size()) + stride = tuple(node.get_stride()) + return wrapper.codegen_alloc_from_pool( + self.pool.name, self.offset, node.get_dtype(), shape, stride + ) + + def __repr__(self): + return ( + f"{self.__class__.__name__}(" + f"node={self.node.get_name()}, " + f"live_range={self.live_range}, " + f"size_hint={self.size_hint}, " + f"symbolic_size={self.symbolic_size}, " + f"pool={self.pool.name if self.pool else None}, " + f"offset={self.offset})" + ) + + +@dataclasses.dataclass +class Empty(AllocationTreeNode): + """ + Placeholder to represent empty space in the allocation pool. + Only exists to get the size_hint correct in parent nodes. + """ + + size_hint: int + + def get_live_ranges(self): + return LiveRanges([]) + + def get_size_hint(self): + return self.size_hint + + def get_symbolic_size(self): + return 0 + + def is_empty(self): + return True + + +class MemorySplitProtocol(Protocol): + get_live_ranges: CachedMethod[[], LiveRanges] + get_size_hint: CachedMethod[[], int] + get_symbolic_size: CachedMethod[[], sympy.Expr] + + def _allocate(self, block: Allocation, is_last: bool) -> bool: + ... + + +class ClearCacheOnAllocateMixin(MemorySplitProtocol): + """ + Helper to assist in caching get_live_ranges, get_size_hint, and + get_symbolic_size. + """ + + def allocate(self, block: Allocation, is_last: bool): + is_allocated = self._allocate(block, is_last) + if is_allocated: + self.clear_cache() + return is_allocated + + def clear_cache(self): + self.get_live_ranges.clear_cache(self) + self.get_size_hint.clear_cache(self) + self.get_symbolic_size.clear_cache(self) + + +@dataclasses.dataclass +class TemporalSplit(ClearCacheOnAllocateMixin, AllocationTreeNode): + """ + Contains a list of allocations not overlapping in LiveRanges. + + Invariant: no pair (a,b) in self.allocations will have: + a.get_live_ranges().overlaps(b.get_live_ranges()) + """ + + allocations: List[AllocationTreeNode] + + def _allocate(self, block: Allocation, is_last: bool): + slot_size = self.get_size_hint() + block_size = block.get_size_hint() + if not is_last and block_size > slot_size: + return False # doesn't fit + + block_live = block.get_live_ranges() + overlapping = [ + s for s in self.allocations if s.get_live_ranges().overlaps(block_live) + ] + if len(overlapping) > 1: + # TODO(jansel): we could try harder here by merging overlapping in space + return False + elif len(overlapping) == 1: + return overlapping[0].allocate(block, is_last) + else: + block.mark_allocated() + + if len(self.allocations) == 1 and isinstance(self.allocations[-1], Empty): + self.allocations.pop() + + if slot_size == block_size: + # perfect fit + self.allocations.append(block) + elif slot_size > block_size: + self.allocations.append( + SpatialSplit.create(block, slot_size - block_size) + ) + else: # grow this allocation + assert is_last + self.allocations = [ + *( + SpatialSplit.create(a, block_size - slot_size) + for a in self.allocations + ), + block, + ] + return True + + @cache_on_self + def get_live_ranges(self) -> LiveRanges: + return LiveRanges( + itertools.chain.from_iterable( + x.get_live_ranges().ranges for x in self.allocations + ) + ) + + @cache_on_self + def get_size_hint(self) -> int: + if not self.allocations: + return 0 + return max(x.get_size_hint() for x in self.allocations) + + @cache_on_self + def get_symbolic_size(self) -> sympy.Expr: + if not self.allocations: + return 0 # type: ignore[return-value] + return sympy.Max(*[x.get_symbolic_size() for x in self.allocations]) + + def is_empty(self): + return len(self.allocations) == 1 and self.allocations[0].is_empty() + + def finalize(self, pool, offset): + self.allocations = [block.finalize(pool, offset) for block in self.allocations] + self.clear_cache() + if len(self.allocations) == 1: + return self.allocations[0] + return self + + +@dataclasses.dataclass +class SpatialSplit(ClearCacheOnAllocateMixin, AllocationTreeNode): + """ + Contains two allocations, left and right, that do not overlap in space. + Right will be allocated immediately after left in memory. + """ + + left: TemporalSplit + right: TemporalSplit + + @staticmethod + def create(left, extra_space): + assert isinstance(left, AllocationTreeNode) + assert isinstance(extra_space, int) and extra_space >= 1 + return SpatialSplit(TemporalSplit([left]), TemporalSplit([Empty(extra_space)])) + + def _allocate(self, block: Allocation, is_last: bool): + return self.left.allocate(block, False) or self.right.allocate(block, is_last) + + @cache_on_self + def get_live_ranges(self): + return LiveRanges( + itertools.chain( + self.left.get_live_ranges().ranges, self.right.get_live_ranges().ranges + ) + ) + + @cache_on_self + def get_size_hint(self) -> int: + return _align(self.left.get_size_hint()) + self.right.get_size_hint() + + @cache_on_self + def get_symbolic_size(self) -> sympy.Expr: + return align(self.left.get_symbolic_size()) + self.right.get_symbolic_size() + + def finalize(self, pool, offset): + self.left = self.left.finalize(pool, offset) + self.right = self.right.finalize( + pool, offset + align(self.left.get_symbolic_size()) + ) + self.clear_cache() + if self.right.is_empty(): + return self.left + return self + + +@dataclasses.dataclass +class AllocationPool: + """ + Represents a pool of allocations that will be generated by a single + call to torch.empty. + """ + + device: torch.device + root: TemporalSplit + can_expand: bool = True + restrict_live_range: Optional[LiveRange] = None + name: Optional[str] = None + names_to_del: List[str] = dataclasses.field(default_factory=list) + creation_cache: Dict[str, str] = dataclasses.field(default_factory=dict) + + def allocate(self, block: Allocation, is_last: bool): + if self.restrict_live_range and not self.restrict_live_range.contains( + block.live_range + ): + return False + + is_last = self.can_expand and is_last + if self.root.allocate(block, is_last): + return True + + if is_last: + return self.allocate_at_end(block) + + return False + + def allocate_at_end(self, block): + block.mark_allocated() + self.root = TemporalSplit([SpatialSplit(self.root, TemporalSplit([block]))]) + return True + + def finalize(self, name): + assert not self.name + self.name = name + self.names_to_del.append(name) + self.root.finalize(self, 0) + + def codegen_create(self, wrapper, code: IndentedBuffer): + assert self.name + nbytes = self.root.get_symbolic_size() + for block in self.root.allocations: + if isinstance(block, Allocation) and nbytes == block.get_symbolic_size(): + # optimization: fuse first allocation and pool creation + node = block.node + code.writeline( + wrapper.make_allocation( + self.name, + device=self.device, + dtype=node.get_dtype(), + shape=tuple(node.get_size()), + stride=tuple(node.get_stride()), + ) + ) + self.creation_cache[block.codegen_alloc_from_pool(wrapper)] = self.name + return + else: + code.writeline( + wrapper.make_allocation( + self.name, + device=self.device, + dtype=torch.uint8, + shape=(nbytes,), + stride=(1,), + ) + ) + + def codegen_destroy(self, wrapper, code: IndentedBuffer): + code.writeline(wrapper.make_free_by_names(self.names_to_del)) + + def __eq__(self, other): + return self is other + + def __hash__(self): + return id(self) + + +@dataclasses.dataclass +class AllocationPools: + """ + Collection of many AllocationPool objects grouped by device. + """ + + device_to_pools: Dict[torch.device, List[AllocationPool]] = dataclasses.field( + default_factory=dict + ) + + def get_pools(self, block): + if block.device not in self.device_to_pools: + self.device_to_pools[block.device] = [] + return self.device_to_pools[block.device] + + def allocate(self, block: Allocation): + pools = self.get_pools(block) + + for pool in pools: + if pool.allocate(block, is_last=pool is pools[-1]): + return + + # everything is full, make a new pool + pools.append( + AllocationPool( + block.device, + TemporalSplit([block]), + can_expand=config.memory_pool != "none", + ) + ) + block.mark_allocated() + + def allocate_output(self, block: Allocation): + """Outputs get different pools so memory gets freed properly""" + pools = self.get_pools(block) + if pools and config.memory_pool in ("outputs", "combined"): + pools[-1].allocate_at_end(block) + else: + # create a new pool + block.mark_allocated() + pools.append( + AllocationPool( + block.device, + TemporalSplit([block]), + can_expand=config.memory_pool == "combined", + ) + ) + + def finalize(self): + """Called at the end of allocation process""" + for i, pool in enumerate( + itertools.chain.from_iterable(self.device_to_pools.values()) + ): + pool.finalize(f"pool{i}") + + def pprint(self): + for pool in itertools.chain.from_iterable(self.device_to_pools.values()): + print() + print(pool.name) + print(pool.root.get_live_ranges()) + pprint.pprint(pool.root) + + +class BufferGroup: + """ + Due to inplace reuse an allocated buffer can have many names. + This tracks these collections of buffers sharing underlying memory. + """ + + def __init__(self, node: ir.Buffer): + self.node = node + self.names = [node.get_name()] + self.is_output = False + self.allocation: Optional[Allocation] = None + self.live_range = LiveRange(float("inf"), -float("inf")) + + def update_usage(self, timestep: int): + """Expand self.live_range to include timestep""" + self.live_range = LiveRange( + min(timestep, self.live_range.begin), + max(timestep, self.live_range.end), + ) + + def sym_nbytes(self): + return self.node.get_layout().storage_size() * self.node.get_dtype().itemsize + + def make_allocation(self): + assert not self.allocation, "multiple allocations" + assert isinstance(self.live_range.begin, int), "live ranges not computed" + nbytes = self.sym_nbytes() + # For now, fallback value will be used if we encounter an unbacked SymInt. The longer-term plan is to have + # size_hint() use better heuristics for unbackeds, at which point the fallback value will be ignored. + size_hint = V.graph.sizevars.size_hint(nbytes, fallback=64) + self.allocation = Allocation( + self.node, + self.live_range, + size_hint=size_hint, + symbolic_size=nbytes, + ) + + def __repr__(self): + return ( + f"{self.__class__.__name__}({self.names!r}, is_output={self.is_output}, " + f"live_range={self.live_range}" + ) + + +@dataclasses.dataclass +class PoolMemoryPlanningLine(MemoryPlanningLine): + """Abstract base class for {Alloc,Dealloc}FromPoolLine""" + + group: BufferGroup + timestep: Optional[int] = None + + @property + def node(self): + return self.group.node + + +@dataclasses.dataclass +class AllocFromPoolLine(PoolMemoryPlanningLine): + """Similar to AllocationLine, but takes memory from a pool""" + + is_first_pool_usage: bool = False + + def codegen(self, code: IndentedBuffer): + allocation = self.group.allocation + assert allocation and allocation.pool + pool = allocation.pool + name = self.node.get_name() + + if self.is_first_pool_usage: + pool.codegen_create(self.wrapper, code) + + pool.names_to_del.extend(self.group.names) + alloc_from_pool = allocation.codegen_alloc_from_pool(self.wrapper) + if alloc_from_pool in pool.creation_cache: + code.writeline( + self.wrapper.make_tensor_alias( + name, pool.creation_cache[alloc_from_pool], "alloc" + ) + ) + else: + pool.creation_cache[alloc_from_pool] = name + code.writeline( + f"{self.wrapper.declare}{name} = {alloc_from_pool}{self.wrapper.ending}" + ) + + +@dataclasses.dataclass +class DeallocFromPoolLine(PoolMemoryPlanningLine): + """Similar to FreeIfNotReusedLine, but takes memory from a pool""" + + is_last_pool_usage: bool = False + + def codegen(self, code: IndentedBuffer): + if self.is_last_pool_usage: + assert self.group.allocation and self.group.allocation.pool + self.group.allocation.pool.codegen_destroy(self.wrapper, code) + + +@dataclasses.dataclass +class MemoryPlanner: + """ + Coordination object to run memory planning passes during wrapper + codegen. + """ + + wrapper: Any + pools: AllocationPools = dataclasses.field(default_factory=AllocationPools) + buffer_groups: Optional[List[BufferGroup]] = None + + def plan(self, lines: List[Any]) -> List[Any]: + """Call all the memory planning passes in sequence""" + lines = [*lines] + self.drop_removed_buffers(lines) + self.convert_to_pool_lines(lines) + self.compute_live_ranges(lines) + self.allocate_groups() + self.mark_first_last_usage(lines) + return lines + + def drop_removed_buffers(self, lines): + """ + Replace any memory planning lines in V.graph.removed_buffers with NullLine + """ + # drop any removed buffers + for i, line in enumerate(lines): + if isinstance(line, (AllocateLine, FreeIfNotReusedLine, ReuseLine)): + if line.node.get_name() in V.graph.removed_buffers: + lines[i] = NullLine(self.wrapper) + + def compute_buffer_groups(self, lines): + """ + Populates self.buffer_groups with BufferGroup objects that join + allocations with common storage (due to inplace reuse) into a + single object. + """ + name_to_group = {} + for line in lines: + if isinstance(line, AllocateLine): + name = line.node.get_name() + assert name not in name_to_group + name_to_group[name] = BufferGroup(line.node) + elif isinstance(line, ReuseLine): + old_name = line.node.get_name() + new_name = line.reused_as.get_name() + assert new_name not in name_to_group + # TODO(jansel): we should support reusing buffers created via ExternKernelAlloc + if old_name in name_to_group: + name_to_group[old_name].names.append(new_name) + name_to_group[new_name] = name_to_group[old_name] + + outputs = set(V.graph.get_output_names()) + unique_groups = [*{id(g): g for g in name_to_group.values()}.values()] + for group in unique_groups: + group.is_output = any(x in outputs for x in group.names) + + assert self.buffer_groups is None + self.buffer_groups = unique_groups + return name_to_group + + def convert_to_pool_lines(self, lines): + """ + Convert AllocateLine/FreeIfNotReusedLine/ReuseLine into their + pool-based counterparts. + """ + name_to_group = self.compute_buffer_groups(lines) + for i, line in enumerate(lines): + if isinstance(line, AllocateLine): + if line.node.get_name() in name_to_group: + lines[i] = AllocFromPoolLine( + self.wrapper, name_to_group[line.node.get_name()] + ) + elif isinstance(line, FreeIfNotReusedLine): + assert not line.is_reused + if line.node.get_name() in name_to_group: + lines[i] = DeallocFromPoolLine( + self.wrapper, name_to_group[line.node.get_name()] + ) + elif isinstance(line, ReuseLine): + if line.node.get_name() in name_to_group: + line.delete_old = False + + def compute_live_ranges(self, lines): + """Populate every BufferGroup.live_ranges field based on first/last usage""" + timestep = 0 + worklist = collections.deque(lines) + while worklist: + if isinstance(worklist[0], MemoryPlanningLine): + timestep += 1 + while worklist and isinstance(worklist[0], MemoryPlanningLine): + line = worklist.popleft() + if isinstance(line, PoolMemoryPlanningLine): + line.group.update_usage(timestep) + line.timestep = timestep + else: + worklist.popleft() + + timestep += 1 + assert self.buffer_groups is not None + for group in self.buffer_groups: + if group.is_output: + group.update_usage(timestep) + + def allocate_groups(self): + """ + Assign every allocation to a specific location in a specific AllocationPool. + """ + assert config.memory_pool in ("none", "intermediates", "outputs", "combined") + assert self.buffer_groups is not None + + for group in self.buffer_groups: + group.make_allocation() + + outputs: List[Allocation] = [] + intermediates: List[Allocation] = [] + for group in self.buffer_groups: + assert group.allocation + if group.is_output and config.memory_pool != "combined": + outputs.append(group.allocation) + else: + intermediates.append(group.allocation) + + for block in sorted( + outputs, + key=lambda x: ( + x.size_hint, + -len(x.live_range), + ), + ): + self.pools.allocate_output(block) + + for block in sorted( + intermediates, + key=lambda x: ( + -x.size_hint, + -len(x.live_range), + ), + ): + self.pools.allocate(block) + + self.pools.finalize() + + def mark_first_last_usage(self, lines): + """ + Populate the AllocFromPoolLine.is_first_pool_usage and + DeallocFromPoolLine.is_last_pool_usage fields so that pools + are created/destroyed. + """ + seen = set() + for line in lines: + if isinstance(line, AllocFromPoolLine): + assert line.group.allocation + pool = line.group.allocation.pool + assert pool is not None + if pool not in seen: + line.is_first_pool_usage = True + seen.add(pool) + + seen = set() + for line in reversed(lines): + if isinstance(line, DeallocFromPoolLine): + assert line.group.allocation + pool = line.group.allocation.pool + assert pool is not None + if pool not in seen: + line.is_last_pool_usage = ( + pool.root.get_live_ranges().end <= line.timestep + ) + seen.add(pool) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/multi_kernel.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/multi_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..e03ca8eca95b50e02eaed7c5db2ebd77b5edb440 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/multi_kernel.py @@ -0,0 +1,413 @@ +import logging +import os +from typing import Any, List + +from torch._inductor.metrics import get_metric_table, is_metric_table_enabled + +from .. import config +from ..codecache import PyCodeCache, TritonFuture +from ..utils import cache_on_self, do_bench +from ..virtualized import V +from .common import TensorArg + +log = logging.getLogger(__name__) + + +def get_kernel_argdefs(kernel): + arg_defs, _, _ = kernel.args.python_argdefs() + return arg_defs + + +def _get_all_args(args_list): + all_args = max(args_list, key=len)[:] + for args in args_list: + assert set(args).issubset(set(all_args)), f"{args} v.s. {all_args}" + + return all_args + + +def get_all_kernel_argdefs(kernels): + """ + The logic here must match with `get_all_call_args`. + """ + argdefs_list = [get_kernel_argdefs(kernel) for kernel in kernels] + + return _get_all_args(argdefs_list) + + +def get_all_call_args(call_args_list): + """ + Passed in the call_args for each subkernel and return the call_args for the + combined multi-kernel. + + Note an algorithm as follows does not always work: + ``` + all_call_args: Dict[ + Any, None + ] = {} # use a dict rather than set to maintain insertion order + for call_args in call_args_list: + all_call_args.update({arg: None for arg in call_args}) + + all_call_args = list(all_call_args.keys()) + ``` + It will fail if any kernel has the same argument passed in multiple times. + Check test_pass_same_arg_multi_times in test_multi_kernel.py + + Instead, we pick the longest call args and assert that otehr call args are + a subset of it. + """ + return _get_all_args(call_args_list) + + +def get_numel_argdefs(kernel): + numel_argdefs = [] + for tree in kernel.range_trees: + if tree.prefix != "r" or kernel.inside_reduction: + numel_argdefs.append(f"{tree.prefix}numel") + + return numel_argdefs + + +class MultiKernelState: + """ + Maintain state of multi-kernel compilation so we don't define duplicated + multi-kernel for the same set of sub-kernels. + + V.graph.wrapper_code has a reference to MultiKernelState instance. + """ + + def __init__(self): + self.subkernel_to_kernel_name = {} + + def define_kernel(self, kernels): + """ + Previously we name the multi kernel as "multi_kernel_{kernel_names[0]}". + This has some minor issue. + + E.g. for persistent reduction https://gist.github.com/shunting314/39e7c00ff8bb2055942ed5a3255d61ca , + there are 2 flavors of non-persistent reduction: + https://gist.github.com/shunting314/056d43d35907e87efb883970b35c17d4 + and + https://gist.github.com/shunting314/02ee753b65c513c54e695626afe682bd + + The only different is cache eviction policy. + + We should name the multi-kernel differently in these 2 cases. + """ + kernel_names = tuple(k.kernel_name for k in kernels) + if kernel_names in self.subkernel_to_kernel_name: + return self.subkernel_to_kernel_name[kernel_names] + + # name the multi kernel based on the first kernel + multi_kernel_name = f"multi_kernel_{len(self.subkernel_to_kernel_name)}" + self.subkernel_to_kernel_name[kernel_names] = multi_kernel_name + + if V.graph.cpp_wrapper: + # we should not generate any python code for multi-kernel during + # the second pass of cpp-wrapper. + return multi_kernel_name + + wrapper = V.graph.wrapper_code + + kernel_call_def_code = "\n".join( + [ + f""" + def call{idx}(need_clone_args=False): + args = [{', '.join(get_kernel_argdefs(kernels[idx]))}] + if need_clone_args: + args, _ = multi_kernel_call.kernels[{idx}].clone_args(*args) + multi_kernel_call.kernels[{idx}].run(*args, {', '.join(get_numel_argdefs(kernels[idx]))}, grid=grid, stream=stream) + """.format( + idx + ).strip( + "\n" + ) + for idx in range(len(kernels)) + ] + ) + + # add subkernel src code hashes to the multi-kernel source code so changing a + # subkernel implementation will result in a differnt py file for + # multi-kernel. This makes cache implementation straightforward since + # we can decide cache file name based on multi-kernel py file name + # directly. + # + # Without the hash added for subkernels, the cache file may be shared by + # different subkernels which is incorrect. + subkernel_hashes = "\n".join( + f"# subkernel{i} code hash: {kernel.code_hash}" + for i, kernel in enumerate(kernels) + ) + + src_code = f""" +{subkernel_hashes} +def run(multi_kernel_call, {', '.join(get_all_kernel_argdefs(kernels))}, {', '.join(get_numel_argdefs(kernels[0]))}, grid, stream): +{kernel_call_def_code} + multi_kernel_call.run_with_argless_kernels([call0, call1]) + """ # noqa: B950 line too long + wrapper.header.splice( + f""" + {multi_kernel_name} = async_compile.multi_kernel({multi_kernel_name!r}, [ + {", ".join(kernel_names)}, + ], + ''' + """ + ) + wrapper.header.splice(src_code) + wrapper.header.splice( + """ + ''' + ) + """ + ) + + return multi_kernel_name + + +class MultiKernel: + """ + This class maintains the compile time state for multi kernels. + + Assume we do codegen for a MultiKernel encapsulating kernel1 and kernel2. + The generated definition for the multi-kernel will looks like: + ``` + multi_kernel_kernel1 = MultiKernelCall([kernel1, kernel2], multi_kernel_definition_code) + ``` + + Here is an concrete example: https://gist.github.com/shunting314/d9f3fb6bc6cee3dbae005825ca196d39 + """ + + def __init__(self, kernels): + assert len(kernels) >= 2 + + self.kernels = kernels + self.kernel_name = V.graph.wrapper_code.multi_kernel_state.define_kernel( + kernels + ) + + # need this since some code in inductor check if the kernel object has an args + # attribute to decide if it's a non-null kernel. + self.args = object() + + def call_kernel(self, kernel_name): + """ + Collect the union of arguments from all subkernels as the arguments + for the multi-kernel. + """ + assert kernel_name == self.kernel_name + call_args_list = [kernel.get_call_args() for kernel in self.kernels] + + all_call_args = get_all_call_args(call_args_list) + grid: List[Any] = [] + + if V.graph.cpp_wrapper: + # for the second pass of cpp-wrapper codegen, we should call + # the fast kernel directly + picked_kernel = MultiKernelCall.lookup_choice(kernel_name) + kernel_name = self.kernels[picked_kernel].kernel_name + final_call_args = call_args_list[picked_kernel] + else: + final_call_args = all_call_args + + # numels for all subkernels should be the same. Use kernels[0] here + self.kernels[0].add_numel_to_call_args_and_grid( + kernel_name, final_call_args, grid + ) + + grid = V.graph.wrapper_code.generate_default_grid(kernel_name, grid) + + V.graph.wrapper_code.generate_kernel_call( + kernel_name, + final_call_args, + grid, + V.graph.scheduler.current_device.index, + ) + + def codegen_nan_check(self): + wrapper = V.graph.wrapper_code + seen = set() + for k in self.kernels: + _, call_args, arg_types = k.args.python_argdefs() + for arg, arg_type in zip(call_args, arg_types): + if arg in seen: + continue + seen.add(arg) + if isinstance(arg_type, TensorArg): + line = f"assert not {arg}.isnan().any().item()" + wrapper.writeline(line) + line = f"assert not {arg}.isinf().any().item()" + wrapper.writeline(line) + + @property + def removed_buffers(self): + return set.intersection(*[k.removed_buffers for k in self.kernels]) + + @property + def inplaced_to_remove(self): + return set.intersection(*[k.inplaced_to_remove for k in self.kernels]) + + @property + @cache_on_self + def inplace_update_buffers(self): + """ + Make sure all kernels have the same inplace update mappings. + """ + for k in self.kernels[1:]: + assert k.inplace_update_buffers == self.kernels[0].inplace_update_buffers + return self.kernels[0].inplace_update_buffers + + def warn_mix_layout(self, kernel_name: str): + pass + + +class MultiKernelCall: + """ + This class is called at run time to actually run the kernel + """ + + def __init__(self, multi_kernel_name, kernels, src_code): + assert len(kernels) >= 2 + self._kernels = kernels + self.multi_kernel_name = multi_kernel_name + + self._run = PyCodeCache.load(src_code).run + self.disable_cache = os.environ.get( + "TORCHINDUCTOR_DISABLE_MULTI_KERNEL_CACHE" + ) == "1" or is_metric_table_enabled("persistent_red_perf") + + self.picked_kernel = None + if config.triton.multi_kernel > 1: + # manually force a subkernel to ease perf testing + picked_by_config = config.triton.multi_kernel - 2 + assert picked_by_config < len(self._kernels) + self.picked_kernel = picked_by_config + elif not self.disable_cache: + self.load_cache() + + self._recorded = False + + def cache_file_path(self): + py_file_path = self._run.__globals__["__file__"] + return os.path.splitext(py_file_path)[0] + ".picked_kernel" + + def load_cache(self): + assert self.picked_kernel is None + path = self.cache_file_path() + if os.path.exists(path): + with open(path) as fd: + self.picked_kernel = int(fd.read()) + assert self.picked_kernel >= 0 and self.picked_kernel < len( + self._kernels + ) + log.debug( + "Load picked kernel %d from cache file %s", self.picked_kernel, path + ) + + def store_cache(self): + assert self.picked_kernel is not None + path = self.cache_file_path() + with open(path, "w") as fd: + fd.write(str(self.picked_kernel)) + log.debug("Store picked kernel %d to cache file %s", self.picked_kernel, path) + + @property + def kernels(self): + """ + Read results from future. + + This should be called after parallel compilation is done. + In case you call this before compilation is done, + it may slow down the parallel compilation. + """ + for i, kernel in enumerate(self._kernels): + if isinstance(kernel, TritonFuture): + self._kernels[i] = kernel.result() + + return self._kernels + + def run(self, *args, **kwargs): + self._run(self, *args, **kwargs) + + @staticmethod + def benchmark_sub_kernels(kernel_calls): + """ + Benchmark all the sub kernels and return the execution time + (in milliseconds) for each of time. + + Unit test may mock this method to force a specific kernel to + be picked. + """ + return [ + do_bench(lambda: kernel_call(True), rep=40, fast_flush=True) + for kernel_call in kernel_calls + ] + + # record_choice and lookup_choice are helper functions for cpp-wrapper + # codegen. The first pass use record_choice to keep the choice and + # the second pass do lookup by calling lookup_choice. + # + # An alternative that reused the multi-kernel cache does not work well + # since during codegen of the second pass, it's very hard to know the + # path for the cache file. Also reading the cache file need do some IO + # which can be slower. + @staticmethod + def record_choice(multi_kernel_name, choice): + """ + Record the multi-kernel choice for cpp-wrapper first pass codegen + for the second pass. + + We should do nothing if this function is not called during codegen. + """ + from torch._inductor.graph import GraphLowering + + if not isinstance(V.graph, GraphLowering): + return + + if not V.graph.record_multi_kernel_choice: + return + + V.graph.multi_kernel_to_choice[multi_kernel_name] = choice + + @staticmethod + def lookup_choice(multi_kernel_name): + # this should always been done during cpp-wrapper codegen + assert V.graph.record_multi_kernel_choice + # there should be no miss + return V.graph.multi_kernel_to_choice[multi_kernel_name] + + def run_with_argless_kernels(self, kernel_calls): + if self.picked_kernel is None: + timings = self.benchmark_sub_kernels(kernel_calls) + self.picked_kernel = timings.index(min(timings)) + k0 = self.kernels[0] + log.debug( + "pick %dth sub-kernel in %s. Size hints %s. Reduction hint %s. Timings %s", + self.picked_kernel, + [k.inductor_meta.get("kernel_name") for k in self.kernels], + k0.size_hints, + k0.inductor_meta.get("reduction_hint"), + timings, + ) + + def get_kernel_path(k): + return k.fn.fn.__code__.co_filename + + get_metric_table("persistent_red_perf").add_row( + lambda: { + "kernel1_name": get_kernel_path(self.kernels[0]), + "kernel2_name": get_kernel_path(self.kernels[1]), + "kernel1_latency": timings[0], + "kernel2_latency": timings[1], + "size_hints": k0.size_hints, + "reduction_hint": k0.inductor_meta.get("reduction_hint"), + "speedup": timings[1] / timings[0], + } + ) + + if not self.disable_cache: + self.store_cache() + + if not self._recorded: + self._recorded = True + self.record_choice(self.multi_kernel_name, self.picked_kernel) + kernel_calls[self.picked_kernel]() diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py new file mode 100644 index 0000000000000000000000000000000000000000..e0306cf2d389f35cfca0d949b339ed629162ab2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py @@ -0,0 +1,3931 @@ +from __future__ import annotations + +import collections +import contextlib +import dataclasses +import functools +import itertools +import logging +import math +import operator +import os +import textwrap +from functools import lru_cache +from typing import ( + Any, + Callable, + cast, + Counter, + DefaultDict, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) + +import sympy + +import torch +import torch._logging + +from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata +from torch._prims_common import is_integer_dtype +from torch.utils._sympy.functions import FloorDiv, ModularIndexing +from torch.utils._sympy.value_ranges import ValueRanges +from torch.utils._triton import has_triton_package + +from ..._dynamo.utils import counters +from .. import config, ir, scheduler +from ..codecache import code_hash, get_path, PyCodeCache +from ..dependencies import Dep, MemoryDep, StarDep, WeakDep +from ..ir import IRNode, ReductionHint, TritonTemplateBuffer +from ..optimize_indexing import indexing_dtype_strength_reduction +from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse +from ..triton_heuristics import AutotuneHint +from ..utils import ( + cache_on_self, + do_bench, + get_dtype_size, + get_fused_kernel_name, + get_kernel_metadata, + get_max_y_grid, + green_text, + is_welford_reduction, + next_power_of_2, + Placeholder, + sympy_dot, + sympy_index_symbol, + sympy_product, + sympy_subs, + unique, + yellow_text, +) +from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V +from ..wrapper_benchmark import get_kernel_category_by_source_code +from .common import ( + CSE, + CSEVariable, + DeferredLine, + free_symbol_startswith, + IndentedBuffer, + index_prevent_reordering, + Kernel, + OpOverrides, + PythonPrinter, + SizeArg, + TensorArg, +) +from .multi_kernel import MultiKernel +from .triton_utils import config_of, signature_of, signature_to_meta + +log = logging.getLogger(__name__) +perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") +schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") +fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") + + +@lru_cache(None) +def gen_attr_descriptor_import(): + """ + import AttrsDescriptor if the triton version is new enough to have this + class defined. + """ + if not has_triton_package(): + return "" + + import triton.compiler.compiler + + if hasattr(triton.compiler.compiler, "AttrsDescriptor"): + return "from triton.compiler.compiler import AttrsDescriptor" + else: + return "" + + +@lru_cache(None) +def gen_common_triton_imports(): + imports = IndentedBuffer() + imports.splice( + """ + import triton + import triton.language as tl + """ + ) + if attr_desc := gen_attr_descriptor_import(): + imports.writeline(attr_desc) + + imports.splice( + """ + from torch._inductor import triton_helpers, triton_heuristics + from torch._inductor.ir import ReductionHint, TileHint + from torch._inductor.triton_helpers import libdevice, math as tl_math + from torch._inductor.triton_heuristics import AutotuneHint + from torch._inductor.utils import instance_descriptor + """ + ) + return imports.getvalue() + + +@dataclasses.dataclass +class IndexingOptions: + index_str: str + mask_vars: Set[sympy.Symbol] + mask_str: str + expand_str: Optional[str] + _has_rindex: bool + + def has_mask(self): + return bool(self.mask_vars) + + def has_rindex(self): + return self._has_rindex + + def has_tmpmask(self): + return "tmp" in self.mask_str + + def has_rmask(self): + return "rmask" in self.mask_str + + +@dataclasses.dataclass +class BlockPtrOptions: + constant_offset: sympy.Expr + shape: List[sympy.Expr] + strides: List[sympy.Expr] + block_shape: List[str] + order: List[int] + offsets: List[str] + mask_vars: Set[sympy.Symbol] + reshape_suffix: List[str] + + @staticmethod + def create( + strides: List[sympy.Expr], + constant_offset: sympy.Expr, + range_trees: List[IterationRangesEntry], + mask_vars: Set[sympy.Symbol], + ) -> BlockPtrOptions: + """Helper to create a BlockPtrOptions instance""" + block_shape = [f"{t.prefix.upper()}BLOCK" for t in range_trees] + reshape_suffix = [*block_shape] + + broadcasting_dim = [s == 0 for s in strides] + for i, is_broadcasting in enumerate(broadcasting_dim): + if is_broadcasting: + # drop any stride==0 dimensions for performance + reshape_suffix[i] = "1" + + if V.kernel.no_x_dim: + assert range_trees[0].prefix == "x" + reshape_suffix.pop(0) + + if ( + not V.kernel.inside_reduction + and len(strides) == len(V.kernel.numels) - 1 + and V.kernel.numels[-1] != 1 + ): + # Need to expand rank by 1 to match rank when self.inside_reduction=True + reshape_suffix.append("1") + + def filter(it): + """Removes any broadcasting dims from a given sequence""" + assert len(it) == len(broadcasting_dim) + return [ + item + for item, is_broadcasting in zip(it, broadcasting_dim) + if not is_broadcasting + ] + + return BlockPtrOptions( + constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset), + shape=[ + V.graph.sizevars.lookup_precomputed_size(t.numel) + for t in filter(range_trees) + ], + strides=[*map(V.graph.sizevars.lookup_precomputed_size, filter(strides))], + block_shape=filter(block_shape), + order=V.graph.sizevars.guarded_order(filter(strides)), + offsets=filter([f"{t.prefix}offset" for t in range_trees]), + mask_vars=mask_vars, + reshape_suffix=reshape_suffix, + ) + + def format(self, name: str, roffset=True) -> str: + """ + Codegen a call to tl.make_block_ptr() + + Args: + name: variable name for pointer + roffset: should roffset be included in offsets=..., for use with tl.advance() + + Returns: + "tl.make_block_ptr(...)" + """ + f = V.kernel.index_to_str + offsets = [*self.offsets] + if not roffset: + offsets[offsets.index("roffset")] = "0" + args = [ + f"{name} + ({f(self.constant_offset)})" + if self.constant_offset != 0 + else name, + f"shape={f(self.shape)}", + f"strides={f(self.strides)}", + f"block_shape={f(self.block_shape)}", + f"order={f(self.order)}", + f"offsets={f(offsets)}", + ] + return f"tl.make_block_ptr({', '.join(args)})" + + @cache_on_self + def boundary_check(self) -> List[int]: + """List of indices to pass to tl.load(boundary_check=...)""" + check = [] + for i in range(len(self.shape)): + if ( + self.block_shape[i] != "1" + and not V.graph.sizevars.statically_known_equals(self.strides[i], 0) # type: ignore[arg-type] + and not V.graph.sizevars.statically_known_multiple_of( + self.shape[i], + config.triton.max_block[self.block_shape[i][0]], # type: ignore[arg-type] + ) + and not (V.kernel.no_x_dim and self.block_shape[i] == "XBLOCK") + ): + check.append(i) + return check + + def advance_roffset(self): + """Codegen string to pass to tl.advance(name, ...)""" + advance = ["0"] * len(self.shape) + advance[self.offsets.index("roffset")] = "RBLOCK" + return V.kernel.index_to_str(advance) + + def has_rindex(self): + return "RBLOCK" in self.block_shape + + def has_rmask(self): + return self.has_rindex() + + def has_tmpmask(self): + return False # block_ptr can't do indirect indexing + + def has_mask(self): + return bool(self.boundary_check()) + + +def triton_reshape(value: str, old_shape: List[str], new_shape: List[str]): + """Workaround https://github.com/openai/triton/issues/2836""" + assert isinstance(old_shape, list) and isinstance(new_shape, list) + if old_shape == new_shape: + return value + if [s for s in new_shape if s != "1"] != old_shape: + return f"tl.reshape({value}, [{', '.join(new_shape)}])" + # rewrite to [:, None] syntax, which is less buggy + idx = 0 + expand = [] + for size in new_shape: + if idx < len(old_shape) and size == old_shape[idx]: + expand.append(":") + idx += 1 + else: + assert size == "1" + expand.append("None") + assert idx == len(old_shape) + return f"{value}[{', '.join(expand)}]" + + +class TritonPrinter(PythonPrinter): + def _print_floor(self, expr): + assert len(expr.args) == 1 + return ( + f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" + ) + + def _print_ceiling(self, expr): + assert len(expr.args) == 1 + return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" + + def _helper_sqrt(self, expr): + return f"libdevice.sqrt({self._print(expr)}.to(tl.float32))" + + def _print_Where(self, expr): + c = self.doprint(expr.args[0]) + p = self.doprint(expr.args[1]) + q = self.doprint(expr.args[2]) + return f"tl.where({c}, {p}, {q})" + + def _print_Min(self, expr): + nargs = len(expr.args) + if len(expr.args) == 1: + return self._print(expr.args[0]) + + mid = len(expr.args) // 2 + a = self._print(sympy.Min(*expr.args[:mid])) + b = self._print(sympy.Min(*expr.args[mid:])) + return f"tl.minimum({a}, {b})" + + def _print_Max(self, expr): + nargs = len(expr.args) + if len(expr.args) == 1: + return self._print(expr.args[0]) + + mid = len(expr.args) // 2 + a = self._print(sympy.Max(*expr.args[:mid])) + b = self._print(sympy.Max(*expr.args[mid:])) + + return f"tl.maximum({a}, {b})" + + def _print_Abs(self, expr): + assert len(expr.args) == 1 + return f"tl_math.abs({self._print(expr.args[0])})" + + def _print_cos(self, expr): + assert len(expr.args) == 1 + return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_cosh(self, expr): + assert len(expr.args) == 1 + return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_acos(self, expr): + assert len(expr.args) == 1 + return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_sin(self, expr): + assert len(expr.args) == 1 + return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_sinh(self, expr): + assert len(expr.args) == 1 + return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_asin(self, expr): + assert len(expr.args) == 1 + return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_tan(self, expr): + assert len(expr.args) == 1 + return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_tanh(self, expr): + assert len(expr.args) == 1 + return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_atan(self, expr): + assert len(expr.args) == 1 + return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))" + + def _print_FloorDiv(self, expr): + if expr.is_integer: + return super()._print_FloorDiv(expr) + + x, div = expr.args + x = self.paren(self.doprint(x)) + div = self.paren(self.doprint(div)) + return f"libdevice.floor({x} / {div}).to({V.kernel.index_dtype})" + + def _print_Round(self, expr): + assert len(expr.args) == 1 + return ( + f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})" + ) + + def _print_RoundDecimal(self, expr): + assert len(expr.args) == 2 + number, ndigits = expr.args + if number.is_integer: + # ndigits < 0 should have been filtered by the sympy function + assert ndigits < 0 + raise ValueError( + f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." + ) + return f"libdevice.nearbyint(1e{ndigits} * {self.paren(self._print(number))}) * 1e{-ndigits}" + + +texpr = TritonPrinter().doprint +pexpr = PythonPrinter().doprint + + +def triton_compute_type(dtype): + triton_type_name = str(dtype).split(".")[-1] + if triton_type_name == "bool": + triton_type_name = "int1" + elif triton_type_name in ("float16", "bfloat16"): + # float16 math is done in float32 inside the kernel + triton_type_name = "float32" + elif triton_type_name == "float8_e4m3fn": + triton_type_name = "float8e4nv" + elif triton_type_name == "float8_e5m2": + triton_type_name = "float8e5" + elif triton_type_name == "float8_e4m3fnuz": + triton_type_name = "float8e4b8" + elif triton_type_name == "float8_e5m2": + triton_type_name = "float8e5b16" + return f"tl.{triton_type_name}" + + +def triton_store_type(dtype): + triton_type_name = str(dtype).split(".")[-1] + if triton_type_name == "bool": + triton_type_name = "int8" + elif triton_type_name == "float8_e4m3fn": + triton_type_name = "float8e4nv" + elif triton_type_name == "float8_e5m2": + triton_type_name = "float8e5" + return f"tl.{triton_type_name}" + + +def triton_acc_type(dtype): + if is_integer_dtype(dtype) and dtype.is_signed: + nbits = 64 if dtype == torch.int64 else 32 + return f"tl.int{nbits}" + return triton_compute_type(dtype) + + +def triton_constant(value): + if value == float("inf"): + return 'float("inf")' + elif value == float("-inf"): + return 'float("-inf")' + elif math.isnan(value): + return 'float("nan")' + return repr(value) + + +class TritonCSEVariable(CSEVariable): + def __init__(self, name, bounds: ValueRanges[Any]): + super().__init__(name, bounds) + # We'll use this to track which masks the variable needs when used for indirect indexing + self.mask_vars: Set[str] = set() + + def update_on_args(self, name, args, kwargs): + # When making a variable that is going to be used in indirect indexing + # if a where clause is used it should mean that the result is always a + # valid index, so you shouldn't include any of the dependent variables + # in the resulting load mask + if name == "where": + return + for arg in args: + if isinstance(arg, TritonCSEVariable): + self.mask_vars.update(arg.mask_vars) + elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": + # most of the time index vars don't need masks associated with them + # however, when index vars are used to compute indices for indirect reads + # those reads should subsequently be masked, + self.mask_vars.update({f"{arg.name[0]}mask"}) + + def __repr__(self): + return f"TritonCSEVariable(name={self.name})" + + +class TritonOverrides(OpOverrides): + """Map element-wise ops to Triton""" + + @staticmethod + def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None): + def _get_min_elements_per_thread( + src_dtype: torch.dtype, dst_dtype: torch.dtype + ) -> int: + if src_dtype == dst_dtype: + # No data type conversion is needed. No requirements on min_elem_per_thread. + return 0 + + # fp8 data type conversions has min_elem_per_thread requirements. + # Refer to Triton implementations here: + # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. + fp8_dtypes = { + torch.float8_e4m3fn, + torch.float8_e5m2, + } + # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. + assert not ( + src_dtype in fp8_dtypes + and dst_dtype in fp8_dtypes + and src_dtype != dst_dtype + ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" + if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: + return 4 + if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: + return 2 + # No requirements on min_elem_per_thread. + return 0 + + if src_dtype is not None: + # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). + # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions + # in the same kernel. + V.kernel.min_elem_per_thread = max( + _get_min_elements_per_thread(src_dtype, dtype), + V.kernel.min_elem_per_thread, + ) + + if dtype == torch.bool: + return f"({x} != 0)" + elif dtype == torch.uint8: + # to work around llvm uint conversion semantics + # that produces 0's for negative values + return f"{x}.to(tl.int8).to(tl.uint8)" + return f"{x}.to({triton_compute_type(dtype)})" + + @staticmethod + def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype): + triton_dtype = triton_compute_type(dtype) + # We may promote float16 or bfloat16 to float32 and cause the + # bitwidth of dtype to be different from the input tensor (i.e. float32). + # In such as case, we will have to convert the input tensor to + # its src_type, perform bitcast, and then convert the bit-casted + # tensor back to float to ensure we use values with the right precision. + if src_dtype in (torch.float16, torch.bfloat16): + triton_src_dtype = str(src_dtype).split(".")[-1] + cast_x = f"{x}.to(tl.{triton_src_dtype})" + cast_x = f"{cast_x}.to({triton_dtype}, bitcast=True)" + return f"{cast_x}.to(tl.float32)" + else: + return f"{x}.to({triton_dtype}, bitcast=True)" + + @staticmethod + def _shaped_constant(value, dtype, shape): + type_ = torch._prims_common.dtype_to_type(dtype) + triton_val = triton_constant(type_(value)) + triton_type = triton_compute_type(dtype) + + if triton_type == "tl.float32": + # Float constants are always f32 in triton + return triton_val + + # NOTE: We use a tensor here in order to get the expected type. + # Otherwise, e.g. float64 constants would be trunctated to float32. + return f"tl.full({shape}, {triton_val}, {triton_type})" + + @classmethod + def constant(cls, value, dtype): + return cls._shaped_constant(value, dtype, shape=[]) + + @staticmethod + def abs(x): + return f"tl_math.abs({x})" + + @staticmethod + def libdevice_abs(x): + return f"libdevice.abs({x})" + + @staticmethod + def exp(x): + return f"tl_math.exp({x})" + + @staticmethod + def libdevice_exp(x): + return f"libdevice.exp({x})" + + @staticmethod + def exp2(x): + return f"libdevice.exp2({x})" + + @staticmethod + def expm1(x): + return f"libdevice.expm1({x})" + + @staticmethod + def sqrt(x): + return f"libdevice.sqrt({x})" + + @staticmethod + def libdevice_sqrt(x): + return f"libdevice.sqrt({x})" + + @staticmethod + def relu(x): + bug = config.triton.inject_relu_bug_TESTING_ONLY + if bug == "compile_error": + return "compile error!" + elif bug == "runtime_error": + # NB: this only triggers runtime error as long as input + # is not all zero + return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' + elif bug == "accuracy": + return f"{x} + 1" + elif bug is None: + return ops.maximum("0", x) + else: + raise AssertionError( + f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" + ) + + @staticmethod + def minimum(a, b): + return f"triton_helpers.minimum({a}, {b})" + + @staticmethod + def maximum(a, b): + return f"triton_helpers.maximum({a}, {b})" + + @staticmethod + def where(a, b, c): + return f"tl.where({a}, {b}, {c})" + + @staticmethod + def cos(x): + return f"tl_math.cos({x})" + + @staticmethod + def libdevice_cos(x): + return f"libdevice.cos({x})" + + @staticmethod + def sin(x): + return f"tl_math.sin({x})" + + @staticmethod + def libdevice_sin(x): + return f"libdevice.sin({x})" + + @classmethod + def index_expr(cls, expr, dtype): + raise NotImplementedError("ops.index_expr not implemented outside a kernel") + + @staticmethod + def masked(mask, body, other): + raise NotImplementedError("ops.masked not implemented outside a kernel") + + @staticmethod + def lgamma(x): + return f"libdevice.lgamma({x})" + + @staticmethod + def erf(x): + return f"libdevice.erf({x})" + + @staticmethod + def cosh(x): + return f"libdevice.cosh({x})" + + @staticmethod + def sinh(x): + return f"libdevice.sinh({x})" + + @staticmethod + def acos(x): + return f"libdevice.acos({x})" + + @staticmethod + def acosh(x): + return f"libdevice.acosh({x})" + + @staticmethod + def asin(x): + return f"libdevice.asin({x})" + + @staticmethod + def asinh(x): + return f"libdevice.asinh({x})" + + @staticmethod + def atan2(x, y): + return f"libdevice.atan2({x}, {y})" + + @staticmethod + def atan(x): + return f"libdevice.atan({x})" + + @staticmethod + def atanh(x): + return f"libdevice.atanh({x})" + + @staticmethod + def copysign(x, y): + return f"libdevice.copysign({x}, {y})" + + @staticmethod + def erfc(x): + return f"libdevice.erfc({x})" + + @staticmethod + def erfinv(x): + return f"libdevice.erfinv({x})" + + @staticmethod + def hypot(x, y): + return f"libdevice.hypot({x}, {y})" + + @staticmethod + def log10(x): + return f"libdevice.log10({x})" + + @staticmethod + def nextafter(x, y): + return f"libdevice.nextafter({x}, {y})" + + @staticmethod + def logical_and(a, b): + return f"{a} & {b}" + + @staticmethod + def logical_not(a): + return f"{a} == 0" + + @staticmethod + def logical_or(a, b): + return f"{a} | {b}" + + @staticmethod + def logical_xor(a, b): + return f"({a} ^ {b})" + + @staticmethod + def bitwise_and(a, b): + return f"{a} & {b}" + + @staticmethod + def bitwise_not(a): + return f"~{a}" + + @staticmethod + def bitwise_or(a, b): + return f"{a} | {b}" + + @staticmethod + def bitwise_xor(a, b): + return f"{a} ^ {b}" + + @staticmethod + def bitwise_left_shift(a, b): + return f"{a} << {b}" + + @staticmethod + def bitwise_right_shift(a, b): + return f"{a} >> {b}" + + @staticmethod + def rand(seed, offset): + offset = f"({offset}).to(tl.uint32)" + return f"tl.rand({seed}, {offset})" + + @staticmethod + def randn(seed, offset): + offset = f"({offset}).to(tl.uint32)" + return f"tl.randn({seed}, {offset})" + + @staticmethod + def randint64(seed, offset, low, high): + offset = f"({offset}).to(tl.uint32)" + return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" + + @staticmethod + def load_seed(name, offset): + raise NotImplementedError("ops.load_seed not implemented outside a kernel") + + @staticmethod + def rsqrt(x): + return f"libdevice.rsqrt({x})" + + @staticmethod + def log1p(x): + return f"libdevice.log1p({x})" + + @staticmethod + def tan(x): + return f"libdevice.tan({x})" + + @staticmethod + def tanh(x): + return f"libdevice.tanh({x})" + + @staticmethod + def sigmoid(x): + return f"tl.sigmoid({x})" + + @staticmethod + def libdevice_sigmoid(x): + return f"1/(1 + libdevice.exp(-({x})))" + + @staticmethod + def signbit(x): + # XX: This is wrong for the value -0.0 in floating point + return f"libdevice.signbit({x}) if ({x}).dtype is tl.float32 else {x} < 0" + + @staticmethod + def fmod(a, b): + return f"libdevice.fmod({a}, {b})" + + @staticmethod + def pow(a, b): + return f"libdevice.pow({a}, {b})" + + @staticmethod + def log(x): + return f"tl_math.log({x})" + + @staticmethod + def libdevice_log(x): + return f"libdevice.log({x})" + + @staticmethod + def isinf(x): + return f"libdevice.isinf({x}).to(tl.int1)" + + @staticmethod + def isnan(x): + return f"libdevice.isnan({x}).to(tl.int1)" + + @staticmethod + def round(x): + return f"libdevice.nearbyint({x})" + + @staticmethod + def floor(x): + return f"libdevice.floor({x})" + + @staticmethod + def floordiv(a, b): + # See the comment in lowering.div_mode. a and b are integer type. + # Similar to div_floor_kernel_cuda in pytorch core. + # Notice that // in triton behaves as truncdiv instead of floordiv + quot = f"{a} // {b}" + rem = f"{a} % {b}" + return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" + + @staticmethod + def sign(x): + def to_int(s): + return f"{s}.to(tl.int8)" + + left = to_int(ops.lt("0", x)) + right = to_int(ops.lt(x, "0")) + sub = ops.sub(left, right) + return f"{sub}.to({x}.dtype)" + + @staticmethod + def trunc(x): + return f"libdevice.trunc({x})" + + @staticmethod + def truncdiv(a, b): + # See the comment in lowering.div_mode. a and b are integer type. + # Notice that // in triton behaves as truncdiv instead of floordiv + return f"{a} // {b}" + + @staticmethod + def ceil(x): + return f"libdevice.ceil({x})" + + +TritonOverrides._initialize_pointwise_overrides("triton") + + +# Use mypy to check protocol implemented correctly +def _typecheck_TritonOverrides(h: TritonOverrides) -> OpsHandler[str]: + return h + + +class TritonKernelOverrides(TritonOverrides): + """Map element-wise ops to Triton within a TritonKernel + + Unlike TritonOverrides, these assume the code is going to be inserted into + the body of the main triton kernel and so it may use indexing and mask + variables which are assumed to already be defined in the current scope. + """ + + @classmethod + def constant(cls, value, dtype): + # NOTE: Cannot use shape=[] as it's not supported by triton-rocm + # We could use shape=[1] instead but starting with the correct + # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. + ndim = V.kernel.triton_tensor_ndim() + shape = [1] * ndim + return cls._shaped_constant(value, dtype, shape=shape) + + @classmethod + def index_expr(cls, expr, dtype): + indexing = V.kernel.indexing(expr, block_ptr=False) + assert isinstance(indexing, IndexingOptions) + # This is called from CSEProxy.__getattr__, so we'll set the bounds there + var = V.kernel.cse.generate(V.kernel.compute, indexing.index_str) + + if dtype not in {torch.int32, torch.int64}: + var = V.kernel.cse.generate(V.kernel.compute, cls.to_dtype(var, dtype)) + var.mask_vars = indexing.mask_vars + return var + + @staticmethod + def masked(mask, body, other): + with V.kernel.mask_loads(mask) as new_mask: + result = body() + + # Take dtype from result to prevent accidental promotion + other = V.kernel.cse.generate( + V.kernel.compute, + f"tl.full({result}.shape, {triton_constant(other)}, {result}.dtype)", + ) + return ops.where(new_mask, result, other) + + @staticmethod + def load_seed(name, offset): + var = V.kernel.args.input(name) + return ( + f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" + ) + + @staticmethod + def frexp(x): + cache_key = f"frexp({x})" + if cache_key in V.kernel.cse.cache: + return V.kernel.cse.cache[cache_key] + + mantissa = V.kernel.cse.newvar() + exponent = V.kernel.cse.newvar() + V.kernel.compute.writeline( + f"{mantissa}, {exponent} = triton_helpers.frexp({x})" + ) + V.kernel.cse.cache[cache_key] = (mantissa, exponent) + return (mantissa, exponent) + + +# Use mypy to check protocol implemented correctly +def _typecheck_TritonKernelOverrides(h: TritonKernelOverrides) -> OpsHandler[str]: + return h + + +@dataclasses.dataclass +class IterationRanges: + """ + Each range tree represents multiple sets of iteration indexing + in a single tiled dimension in the output kernel. + + If you have two loops ranges one (4, 3, 2) and another (4, 6), + then the range tree will be: + 4 (i0) + 3 (i1) 6 (i3) + 2 (i2) + Where i0 is shared between both loops, but then the split into + different indexing vars. All loop ranges must iterate over + the same number of elements. + """ + + def __init__( + self, + name: str, + var_list: List[sympy.Symbol], + var_ranges: Dict[sympy.Symbol, sympy.Expr], + numel: sympy.Expr, + prefix: str, + *, + kernel: TritonKernel, + divisor=sympy.Integer(1), + length=sympy.Integer(1), + root: IterationRangesRoot, + ): + super().__init__() + self.name = name + self.var_list = var_list + self.var_ranges = var_ranges + self.numel = numel + self.prefix = prefix + self.divisor = divisor + self.length = length + self.kernel = kernel + self.root = root + + def symbol(self): + return sympy_index_symbol(self.name) + + +class IterationRangesRoot(IterationRanges): + def __init__( + self, + name: str, + numel: sympy.Expr, + prefix: str, + index: int, + kernel: TritonKernel, + pid_cache=None, + *, + is_loop: bool, + tensor_dim: Optional[int], + grid_dim: Optional[int], + ): + if pid_cache is None: + pid_cache = {} + super().__init__( + name=name, + var_list=[], + var_ranges={}, + numel=numel, + prefix=prefix, + kernel=kernel, + root=self, + ) + self.index = index + # Store all the nodes in one flat list + self.nodes: Dict[sympy.Expr, IterationRangesEntry] = {} + # This is for re-ordering program ID in triton mm template + # pid_cache["tl.program_id(0)"] = pid_m + self.pid_cache: Dict[str, str] = pid_cache + + # True if the dimension is implemented as a single program looping over + # the full dimension (currently only used for non-persistent reduction) + assert not is_loop or (prefix == "r" and grid_dim is None) + self.is_loop = is_loop + # Index of corresponding dimension on triton tensors + self.tensor_dim = tensor_dim + # Index of corresponding dimension in the triton grid + self.grid_dim = grid_dim + + def __repr__(self): + return f"IterationRangesRoot({self.name!r}, {self.numel}, ...)" + + def cache_clear(self): + for node in self.nodes.values(): + node.cache_clear() + + def lookup(self, divisor, length): + """ + Lookup a given RangeTreeEntry, creating it if needed + """ + if V.graph.sizevars.statically_known_equals(divisor * length, self.numel): + expr = FloorDiv(sympy_index_symbol(f"{self.prefix}index"), divisor) + else: + expr = ModularIndexing( + sympy_index_symbol(f"{self.prefix}index"), divisor, length + ) + + if expr not in self.nodes: + node = IterationRangesEntry( + f"{self.prefix}{next(V.kernel.iter_vars_count)}", + divisor, + length, + expr, + self, + ) + V.kernel.range_tree_nodes[node.symbol()] = node + self.var_list.append(node.symbol()) + self.var_ranges[node.symbol()] = length + self.nodes[expr] = node + return self.nodes[expr] + + def construct_entries(self, lengths: List[sympy.Expr]): + divisor = sympy.Integer(1) + itervars = [] + for length in reversed(lengths): + itervars.append(self.lookup(divisor, length)) + divisor = divisor * length + return list(reversed(itervars)) + + def construct(self, lengths: List[sympy.Expr]): + return [e.symbol() for e in self.construct_entries(lengths)] + + def vars_and_sizes(self, index: sympy.Expr): + """Figure out vars from this tree used in index""" + nodes = [V.kernel.range_tree_nodes.get(s) for s in index.free_symbols] + nodes = [n for n in nodes if n and n.prefix == self.prefix] + nodes.sort(key=lambda x: V.graph.sizevars.size_hint(x.divisor)) + divisor = sympy.Integer(1) + index_vars = [] + sizes = [] + + def add(node): + nonlocal divisor + index_vars.append(node.symbol()) + sizes.append(node.length) + divisor = divisor * node.length + + for node in nodes: + if not V.graph.sizevars.statically_known_equals(node.divisor, divisor): + # fill in unused index var + add(self.lookup(divisor, FloorDiv(node.divisor, divisor))) + divisor = node.divisor + add(node) + if not V.graph.sizevars.statically_known_equals(self.numel, divisor): + # fill in unused index var + add(self.lookup(divisor, FloorDiv(self.numel, divisor))) + + return list(reversed(index_vars)), list(reversed(sizes)) + + def ranges_code(self): + assert self.tensor_dim is not None + size = self.kernel.indexing_size_str(self.tensor_dim) + index_dtype = self.kernel.index_dtype + convert = f".to({index_dtype})" if index_dtype != "tl.int32" else "" + return f"tl.arange(0, {self.prefix.upper()}BLOCK){size}{convert}" + + def scalar_code(self, value): + index_dtype = self.kernel.index_dtype + ndim = self.kernel.triton_tensor_ndim() + size = [1] * ndim + return f"tl.full({size}, {value}, {index_dtype})" + + def get_pid(self): + assert self.grid_dim is not None + key = f"tl.program_id({self.grid_dim})" + # y_grid has a limit, so express it in terms of y and z in case of overflow. + # z grid is only exercised when max_tiles == 3 (off by default). + if ( + self.grid_dim == 1 + and config.triton.max_tiles <= 2 + and not (isinstance(self.numel, int) and self.numel <= get_max_y_grid()) + ): + key = f"{key} * (tl.program_id({self.grid_dim + 1}) + 1)" + pid = self.pid_cache.get(key, key) + if self.kernel.index_dtype != "tl.int32": + return f"{pid}.to({self.kernel.index_dtype})" + return pid + + def codegen_header(self, code): + x = self.prefix + if self.is_loop: + code.writeline(f"{self.name} = {x}offset + {x}base") + elif self.grid_dim is None: + # no need to "{x}offset = " + code.writeline(f"{self.name} = {self.ranges_code()}") + code.writeline(f"{x}offset = 0") + else: + if self.tensor_dim is not None: + line = f"{x}offset + {self.ranges_code()}" + else: + line = self.scalar_code(f"{x}offset") + code.writelines( + [ + f"{x}offset = {self.get_pid()} * {x.upper()}BLOCK", + f"{self.name} = {line}", + ] + ) + code.writeline(f"{x}mask = {self.name} < {x}numel") + + +class IterationRangesEntry(IterationRanges): + def __init__( + self, + name: str, + divisor: sympy.Expr, + length: sympy.Expr, + expr: sympy.Expr, + parent: IterationRanges, + ): + super().__init__( + name=name, + numel=parent.numel / length, + var_list=parent.var_list, + var_ranges=parent.var_ranges, + prefix=parent.prefix, + divisor=divisor, + length=length, + kernel=parent.kernel, + root=parent.root, + ) + self.parent = parent + self.codegen = functools.lru_cache(None)(self._codegen) + self.expr = expr + + def __repr__(self): + return f"IterationRangesEntry({self.name}, {self.divisor}, {self.length}, {self.expr}, {self.var_ranges})" + + def set_name(self, name): + self.codegen = lambda: name # type: ignore[assignment] + self.codegen.cache_clear = lambda: None # type: ignore[method-assign] + self.name = name + + def cache_clear(self): + self.codegen.cache_clear() + + def writeline(self, line): + if self.root.is_loop: + V.kernel.indexing_code.writeline(line) + else: + # lift non-reduction stores outside loop + V.kernel.body.writeline(line) + + def _codegen(self): + self.writeline(f"{self.name} = " + texpr(V.kernel.rename_indexing(self.expr))) + return self.name + + def precomputed_args(self): + # for dynamic shapes, find parts of indexing expressions that have to be precomputed + precomputed_args: List[sympy.Expr] = [] + if isinstance(self.expr, sympy.Symbol): + return precomputed_args + assert isinstance(self.expr, (FloorDiv, ModularIndexing)), type(self.expr) + for arg in self.expr.args[1:]: + if not isinstance(arg, (sympy.Integer, sympy.Symbol)): + symbols = arg.free_symbols + if len(symbols) > 0 and all(s.name.startswith("s") for s in symbols): + precomputed_args.append(arg) + return precomputed_args + + def __hash__(self): + return hash(self.name) + + def __eq__(self, other): + return self.name == other.name + + +class HelperFunctions: + """An ordered set of helper functions.""" + + _templates_seen: Dict[str, str] # Template code to function name + finalized_helpers: List[str] + + def __init__(self): + self._templates_seen = {} + self.finalized_helpers = [] + + def add(self, template_code: str) -> str: + """This accepts a function definition with the function name + left as a format specifier e.g. + + @triton.jit + def {name}(arg0, arg1): + return arg0 + arg1 + + We add the templated code to the function set and return the name + assigned to that function. + + """ + existing_name = self._templates_seen.get(template_code) + if existing_name is not None: + # Don't duplicate existing helpers + return existing_name + + name = f"_triton_helper_fn{len(self.finalized_helpers)}" + self._templates_seen[template_code] = name + self.finalized_helpers.append(template_code.format(name=name)) + return name + + def __iter__(self): + return iter(self.finalized_helpers) + + def __getitem__(self, idx): + return self.finalized_helpers[idx] + + +class TritonKernel(Kernel): + overrides = TritonKernelOverrides # type: ignore[assignment] + sexpr = pexpr + + helper_functions: HelperFunctions + + def __init__( + self, + *groups, + index_dtype: str, + mutations: Optional[Set[str]] = None, + pid_cache=None, + reduction_hint=ReductionHint.DEFAULT, + min_elem_per_thread=0, + disable_persistent_reduction=False, + ): + if pid_cache is None: + pid_cache = {} + super().__init__() + self.numels = [V.graph.sizevars.simplify(s) for s in groups] + self.mutations: Set[str] = mutations if mutations is not None else set() + self.range_trees: List[IterationRangesRoot] = [] + self.range_tree_nodes: Dict[sympy.Symbol, IterationRangesEntry] = {} + self.iter_vars_count = itertools.count() + self.inside_reduction = self.numels[-1] != 1 + self.body = IndentedBuffer() + self.indexing_code = IndentedBuffer() + self.suffix: IndentedBuffer = IndentedBuffer() # type: ignore[assignment] + self.outside_loop_vars: Set[Any] = set() + self.reduction_hint = reduction_hint + self.index_dtype: str = index_dtype + self.min_elem_per_thread = min_elem_per_thread + self.last_usage: Set[str] = set() + self.block_ptr_id = itertools.count() + # buffer accesses in the kernel + self.buf_accesses: DefaultDict[str, List[Dep]] = collections.defaultdict(list) + + self.persistent_reduction: bool = ( + not disable_persistent_reduction + ) and self.should_use_persistent_reduction() + self.no_x_dim = ( + self.reduction_hint == ReductionHint.INNER + and self.persistent_reduction + and len(self.numels) == 2 + and self.numels[-1] >= 256 + ) + self.initialize_range_tree(pid_cache) + + self.helper_functions = HelperFunctions() + + # A set of autotuning hints to pass as part of triton_meta + self.autotune_hints: Set[AutotuneHint] = set() + + # define this in a closure to make cache local to object + @functools.lru_cache(None) + def simplify_indexing(index: sympy.Expr): + index = V.graph.sizevars.simplify_with_ranges(index, self.var_ranges()) + for tree in self.range_trees: + index = self.combine_contiguous_dims(index, tree) + return index + + self.simplify_indexing = simplify_indexing + self.code_hash = None + self.triton_meta: Optional[Dict[str, object]] = None + + def need_numel_args(self): + r""" + Indicate whether we need provide numel as arguments for the generated + kernel calls in the benchmark. + + Should be true for pointwise/reduction kernels but false for triton + matmul kernels. + """ + return True + + def should_use_persistent_reduction(self) -> bool: + """ + Heuristic to set self.persistent_reduction and add guards + if needed. + """ + if not (self.inside_reduction and config.triton.persistent_reductions): + return False + threshold = { + ReductionHint.INNER: 1024, + }.get(self.reduction_hint, 64) + + # If multi_kernel is enabled, we do more aggressive persistent reduction. + # This may result in some persisent reductions slower than the + # corresponding non-persistent reductions. MultiKernel will do benchmarking + # to pick the faster one. + if config.triton.multi_kernel: + threshold *= 16 + last_numel = self.numels[-1] + if not isinstance(last_numel, (int, sympy.Integer)): + # Not static + return False + hint = V.graph.sizevars.size_hint(last_numel) + if hint > threshold: + return False + # will need to recompile if we cross a larger power of 2 boundary + V.graph.sizevars.guard_leq(self.numels[-1], next_power_of_2(hint)) # type: ignore[arg-type] + return True + + def set_last_usage(self, nodes): + if not self.inside_reduction or self.persistent_reduction: + return + self.last_usage = set( + itertools.chain.from_iterable( + n.last_usage for n in nodes if n is not EnableReduction + ) + ) + + def initialize_range_tree(self, pid_cache): + no_r_dim = not self.inside_reduction or self.numels[-1] == 1 + + prefixes = "zyxr" + active_prefixes = prefixes[-len(self.numels) :] + + grid_dims = "xyz" + if self.no_x_dim: + tensor_dims = "r" + elif no_r_dim: + tensor_dims = "xyz" + else: + tensor_dims = "xyzr" + + tensor_dims = "".join(p for p in tensor_dims if p in active_prefixes) + + for i, prefix in enumerate(active_prefixes): + is_reduction = prefix == "r" + tensor_dim = tensor_dims.find(prefix) if prefix in tensor_dims else None + grid_dim = None if is_reduction else grid_dims.find(prefix) + index = i if grid_dim is None else grid_dim + self.range_trees.append( + IterationRangesRoot( + f"{prefix}index", + self.numels[i], + prefix, + index, + self, + pid_cache=pid_cache, + is_loop=is_reduction and not self.persistent_reduction, + tensor_dim=tensor_dim, + grid_dim=grid_dim, + ) + ) + for tree in self.range_trees: + # reduction indexing goes inside a loop + if not tree.is_loop: + tree.codegen_header(self.body) + if self.inside_reduction and self.range_trees[-1].is_loop: + # workaround for this issue: + # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 + self.body.writeline(f"rbase = {self.range_trees[-1].ranges_code()}") + + def disable_reduction(self): + should_flush = self.range_trees[-1].is_loop + + @contextlib.contextmanager + def ctx(): + if self.numels[-1] == 1: + assert not self.inside_reduction + yield + return + if should_flush: + # calling codegen_body() will flush all the pending buffers + # and write out a reduction loop + self.codegen_body() + self.inside_reduction = False + try: + yield + if should_flush: + # flush out any code before opening the next loop + self.codegen_body() + finally: + self.inside_reduction = True + + return ctx() + + def set_ranges(self, *lengths): + assert len(lengths) == len(self.range_trees) + return [ + ranges.construct(length) + for length, ranges in zip(lengths, self.range_trees) + ] + + @staticmethod + def _split_iteration_ranges( + groups: Iterable[sympy.Expr], lengths: List[List[sympy.Expr]] + ): + sv = V.graph.sizevars + new_ranges: List[List[sympy.Expr]] = [[] for _ in groups] + remaining = [sv.simplify(g) for g in groups] + var_count = itertools.count() + + def add_range(i, expr): + expr = sv.simplify(expr) + if not sv.statically_known_multiple_of(remaining[i], expr): + raise CantSplit() + # guard on the last item out + remaining[i] = FloorDiv(remaining[i], expr) + new_ranges[i].append(expr) + return next(var_count) + + def make_combined(size, idx1, idx2): + def getter(flat_vars): + return size * flat_vars[idx1] + flat_vars[idx2] + + return getter + + return_getters_groups = [] + current_group = 0 + for length_group in lengths: + return_getters = [] + for size in length_group: + if sv.statically_known_equals(size, 1): # type: ignore[arg-type] + return_getters.append(lambda _: sympy.Integer(0)) + continue + + while ( + current_group < len(remaining) + and sv.size_hint(remaining[current_group]) == 1 + ): + # scroll to next group with remaining elements + current_group += 1 + + if sv.size_hint(size) > sv.size_hint(remaining[current_group]): + # need to break size in two + if not sv.statically_known_multiple_of( + size, remaining[current_group] + ): + raise CantSplit() + size1 = remaining[current_group] + size2 = FloorDiv(size, remaining[current_group]) + return_getters.append( + make_combined( + size2, + add_range(current_group, size1), + add_range(current_group + 1, size2), + ) + ) + else: + return_getters.append( + operator.itemgetter(add_range(current_group, size)) + ) + return_getters_groups.append(return_getters) + + assert all( + V.graph.sizevars.size_hint(s) == 1 for s in remaining + ), f"failed to set ranges {remaining} {lengths}" + + return new_ranges, return_getters_groups + + @classmethod + def is_compatible( + cls, groups: Iterable[sympy.Expr], lengths: List[List[sympy.Expr]] + ): + try: + cls._split_iteration_ranges(groups, lengths) + return True + except CantSplit: + return False + + def split_and_set_ranges(self, lengths: List[List[sympy.Expr]]): + """ + We may want to fuse `for i0 in s0*s1` into a tiled kernel with groups (s0, s1). + + To do this we need to split up the iteration space of i0 into something like: + for i1 in s0: + for i2 in s1: + i0 = i1*s1 + i2 + .... + + This function matches and resplits lengths to the groups of + this kernel to enable tiled + non-tiled fusions. + """ + groups = [rt.numel for rt in self.range_trees] + if not self.inside_reduction: + groups[-1] = sympy.Integer(1) + + if len(lengths) == len(self.range_trees) and all( + V.graph.sizevars.simplify(sympy_product(x) - g) == 0 + for x, g in zip(lengths, groups) + ): + return self.set_ranges(*lengths) + + new_ranges, return_getters_groups = self._split_iteration_ranges( + groups, lengths + ) + itervars = list(itertools.chain.from_iterable(self.set_ranges(*new_ranges))) + return [[fn(itervars) for fn in fns] for fns in return_getters_groups] + + def is_indirect_indexing(self, index: sympy.Expr): + # tmpX means indirect indexing + return free_symbol_startswith(index, "tmp") + + def is_broadcasted(self, index: sympy.Expr): + # Note. This may not be correct when there is indirect indexing + if self.is_indirect_indexing(index): + return False + + index_numels = [1] * len(self.numels) + for symbol in index.free_symbols: + if symbol not in self.range_tree_nodes: + # Non-iterated variables, e.g. strides + continue + entry = self.range_tree_nodes[symbol] # type: ignore[index] + assert isinstance(entry.parent, IterationRangesRoot) + index_numels[entry.parent.index] *= entry.length + + # If the index variables only iterate over a subset of the kernel + # numels, then it must be broadcasted. + simplify = V.graph.sizevars.simplify + return any( + simplify(idx_range) != simplify(iter_range) # type: ignore[arg-type] + for idx_range, iter_range in zip(index_numels, self.numels) + ) + + def combine_contiguous_dims(self, index: sympy.Expr, tree: IterationRangesRoot): + """ + More aggressive simplification to merge contiguous dims + """ + if isinstance(index, (sympy.Integer, sympy.Symbol)): + return index + index_vars, sizes = tree.vars_and_sizes(index) + if len(sizes) <= 1: + return index + new_sizes, reindex, prune = V.graph.sizevars._simplify_loops( + index_vars, sizes, index_prevent_reordering([index], index_vars, sizes) + ) + if new_sizes == sizes: + return index + new_index_vars = tree.construct(new_sizes) + new_index = sympy_subs(index, dict(zip(index_vars, reindex(new_index_vars)))) + return new_index + + def index_to_str(self, index: sympy.Expr) -> str: + """ + Convert an index expr to a string that can be used in triton code. + e.g. a sympy expression "s2" may actually appear as "ks1" in the triton kernel. + + Index expressions often need to be passed in as arguments to the triton kernel. + Rename_indexing and codegen_indexing keep track of the needed indices and add + new parameters to the function signature. + """ + if isinstance(index, list): + return f"[{', '.join(map(self.index_to_str, index))}]" + return texpr(self.rename_indexing(self.codegen_indexing(index))) + + def indexing( + self, + index: sympy.Expr, + *, + copy_shape=None, + dense_indexing=False, + override_mask=None, + block_ptr=False, + ) -> Union[IndexingOptions, BlockPtrOptions]: + """ + Compute the index and mask to pass to tl.load() or tl.store() + """ + index = self.simplify_indexing(index) + index = sympy_subs(index, V.graph.sizevars.precomputed_replacements) + # if simple replacements didn't get rid of floor/ceil, try full subs + if len(index.atoms(sympy.floor)) or len(index.atoms(sympy.ceiling)): + index = index.subs(V.graph.sizevars.precomputed_replacements) + # last resort, if no range vars are in the expr, hoist it + # TODO instead of trying to blindly find complicated exprs, we should hoist the + # inputs/outputs sizes and strides, but at the time indexing is generated + # kernel inputs and outputs are not set yet, we'd need a deeper refactor + # to do it this way + + if len(index.atoms(sympy.ceiling)): + for a in index.atoms(sympy.ceiling): + # for nested exprs, atoms yields top level first (?) + # so if everything goes fine, lower level replacements will come up empty + symbols = a.free_symbols + if len(symbols) > 0 and all( + s.name.startswith("s") or s.name.startswith("ps") for s in symbols + ): + replacements = {a: V.graph.sizevars.lookup_precomputed_size(a)} + index = sympy_subs(index, replacements) + + index = self.simplify_indexing(index) + index_vars = index.free_symbols + has_rindex = False + + mask_vars: Set[str] = set() + for var in index_vars: + assert isinstance(var, sympy.Symbol) + has_rindex = has_rindex or var.name.startswith("r") + if override_mask: + pass + elif var.name.startswith("tmp"): + # indirect indexing + cse_var = self.cse.varname_map[var.name] + mask_vars.update(cse_var.mask_vars) + elif var.name.startswith(("s", "ps", "i", "u")): + pass + else: + # var is one of xN, yN or rN + assert var.name[0] in "xyr", var.name + mask_vars.add(f"{var.name[0]}mask") + + need_dense = ( + config.triton.dense_indexing + or dense_indexing + or self._load_mask is not None + ) and index != 0 + + have_dense = True + have_loop_vars = False + dense_mask_vars = set() + + for tree in self.active_range_trees(): + if index_vars.intersection(tree.var_list): + have_loop_vars = True + else: + have_dense = False + dense_mask_vars.add(f"{tree.prefix}mask") + + if ( + block_ptr + and config.triton.use_block_ptr + and not override_mask + and not self._load_mask + and len(mask_vars - dense_mask_vars) == 0 + and not self.is_indirect_indexing(index) + and have_loop_vars + # workaround https://github.com/openai/triton/issues/2821 + and self.index_dtype == "tl.int32" + ): + index_relative_to_xyr_index = sympy_subs( + index, {v: t.expr for v, t in self.range_tree_nodes.items()} + ) + range_trees = self.active_range_trees(reorder=True) + symbols = [t.symbol() for t in range_trees] + strides = [sympy.Wild(f"stride_{s}", exclude=symbols) for s in symbols] + offset = sympy.Wild("_offset", exclude=symbols) + m = index_relative_to_xyr_index.match(sympy_dot(symbols, strides) + offset) + # TODO(jansel): it is sometimes possible to do higher dimensional block_ptrs with + # a tl.reshape the correct block. We will miss these cases today. + if m: + self.filter_masks(mask_vars) + return BlockPtrOptions.create( + [m[s] for s in strides], + m[offset], + range_trees, + mask_vars, # type: ignore[arg-type] + ) + + expand_str = None + index_str = self.index_to_str(index) + if isinstance(index, sympy.Integer): + expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() + index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" + return IndexingOptions(index_str, set(), "None", expand_str, has_rindex) + + if need_dense and not have_dense: + expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() + index_str = f"tl.broadcast_to({index_str}, {expand_str})" + mask_vars = dense_mask_vars + elif not have_loop_vars and copy_shape: + index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" + mask_vars = dense_mask_vars + + if override_mask: + mask_vars = {override_mask} + + if self._load_mask: + mask_vars.add(self._load_mask) + + self.filter_masks(mask_vars) + + mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" + return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex) # type: ignore[arg-type] + + def active_range_trees(self, reorder=False): + trees = [ + t for t in self.range_trees if t.prefix != "r" or self.inside_reduction + ] + if reorder and len(trees) > 1: + count = sum(t.prefix in "xyz" for t in trees) + assert "".join(t.prefix for t in trees[:count]) == "zyx"[-count:], [ + t.prefix for t in trees[:count] + ] + trees[:count] = reversed(trees[:count]) + return trees + + def filter_masks(self, mask_vars): + for tree in self.range_trees: + # Masks are superfluous if we only have one element + if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] + mask_vars.discard(f"{tree.prefix}mask") + continue + # Masks are superfluous if numel is a multiple of BLOCK + # (We use the fact that BLOCK is required by triton to be a power of 2) + if tree.prefix.upper() not in config.triton.max_block: + continue + max_block = config.triton.max_block[tree.prefix.upper()] + # Optional optimization: if block divides numel exactly, we will + # never need to do a masked load to handle stragglers at the end. + # It's faster to avoid masking at all. But it is sound to always + # mask. + if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): # type: ignore[arg-type] + mask_vars.discard(f"{tree.prefix}mask") + + def var_ranges(self): + return dict( + itertools.chain.from_iterable( + tree.var_ranges.items() for tree in self.range_trees + ) + ) + + def codegen_indexing(self, expr: sympy.Expr): + expr = V.graph.sizevars.simplify_with_ranges(expr, self.var_ranges()) + for sym in sorted(expr.free_symbols, key=str): + if sym in self.range_tree_nodes: + # if indexing expression is complicated, we precompute it on the host side + # and send the result as a kernel argument + replacements = {} + for ps in self.range_tree_nodes[sym].precomputed_args(): # type: ignore[index] + replacements[ps] = V.graph.sizevars.lookup_precomputed_size(ps) + if len(replacements) > 0: + self.range_tree_nodes[sym].expr = sympy_subs( # type: ignore[index] + self.range_tree_nodes[sym].expr, replacements # type: ignore[index] + ) + self.range_tree_nodes[sym].codegen() # type: ignore[index] + return expr + + @contextlib.contextmanager + def mask_loads(self, mask): + """Context manager to add an additional mask to tl.load/store""" + prior = self._load_mask + if prior: + mask = self.cse.generate(self.compute, f"{mask} & {prior}") + + self._load_mask = mask + try: + # TODO(jansel): do we need a reshape here? + yield mask + finally: + self._load_mask = prior + + def generate_assert(self, check): + return torch.version.hip is None and super().generate_assert(check) + + def load_mask(self, var): + mask = "" + mask_vars = set(var.mask_vars) + if self._load_mask: + mask_vars.add(self._load_mask) + + if mask_vars: + mask = ( + f"{next(iter(mask_vars))}" + if len(mask_vars) == 1 + else f"({' & '.join(str(v) for v in mask_vars)})" + ) + return mask + + @property + def assert_function(self) -> str: + return "tl.device_assert" + + def get_strides_of_load(self, index: sympy.Expr): + """ + This gets the stride of the index for each of the tiling variables + (technically, it does it at index 0) + + For example, if + xindex = x0 + 512*x1 + 1024*r0 + x0 = (xindex//512) + x1 = (xindex % 512) + r0 = rindex // 1024 + + this function would return + {xindex: 512, rindex: 1024} + """ + index_to_tile_indexes = {k: v.expr for k, v in self.range_tree_nodes.items()} + index_in_tile_vars = sympy_subs(index, index_to_tile_indexes) # type: ignore[arg-type] + strides = {} + for range_tree in self.range_trees: + s = sympy_index_symbol(range_tree.name) + strides[s] = sympy_subs(index_in_tile_vars, {s: 1}) - sympy_subs( + index_in_tile_vars, {s: 0} + ) + return strides + + def codegen_block_ptr( + self, name: str, var: str, indexing: BlockPtrOptions, other="" + ) -> Tuple[str, Optional[DeferredLine], str]: + advance_block_ptr = None + check = indexing.boundary_check() + if not check: + # workaround https://github.com/openai/triton/issues/2813 + other = "" + elif other: + assert other == ", other=0.0" + other = f", boundary_check={check!r}, padding_option='zero'" + else: + other = f", boundary_check={check!r}" + if ( + self.inside_reduction + and self.range_trees[-1].is_loop + and indexing.has_rindex() + ): + block_ptr = f"block_ptr{next(self.block_ptr_id)}" + self.body.writeline( + DeferredLine( + name, f"{block_ptr} = {indexing.format(var, roffset=False)}" + ) + ) + advance_block_ptr = DeferredLine( + name, + f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", + ) + else: + block_ptr = indexing.format(var) + return block_ptr, advance_block_ptr, other + + def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): + # broadcasting is not implicit for block_ptrs + value = ( + f"tl.broadcast_to({value}, {self.index_to_str(indexing.reshape_suffix)})" + ) + # drop any extra size=1 dimensions + value = triton_reshape(value, indexing.reshape_suffix, indexing.block_shape) + # workaround https://github.com/openai/triton/issues/2814 + value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" + return f"tl.store({block_ptr}, {value}{other})" + + def load(self, name: str, index: sympy.Expr): + var = self.args.input(name) + indirect_indexing = self.is_indirect_indexing(index) + original_index = index + indexing = self.indexing(index, block_ptr=True) + has_rindex = indexing.has_rindex() + has_tmpmask = indexing.has_tmpmask() + + # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold + # 1) We are doing broadcasting + # 2) It is a non-coalesced load. The intuition is that if it's + # non-coalesced, we will likely load each element multiple times in + # practice. + # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold + # 3.1) We are in a reduction loop + # 3.2) Its not its last use + # 3.3) This load will not be lifted to the body + # + is_coalesced = any( + i == 1 for i in self.get_strides_of_load(original_index).values() + ) + if self.is_broadcasted(original_index): + ep = ", eviction_policy='evict_last'" + elif not is_coalesced: + ep = ", eviction_policy='evict_last'" + elif self.inside_reduction and self.range_trees[-1].is_loop: + if name in self.args.inplace_buffers: + names = set(self.args.inplace_buffers[name].other_names) + else: + names = {name} + last_use = len(names & self.last_usage) > 0 + evict_last = not last_use and (has_rindex or indirect_indexing) + if evict_last: + ep = ", eviction_policy='evict_last'" + else: + ep = ", eviction_policy='evict_first'" + else: + ep = "" + # "other" below is a workaround for https://github.com/openai/triton/issues/737 + # for bool, even though it's likely subject to the same bug, setting `other` leads + # to LLVM errors so we are skipping it for now + if ( + (has_tmpmask or has_rindex) + and V.graph.get_dtype(name) != torch.bool + and indexing.has_mask() + ): + other = ", other=0.0" + else: + other = "" + + advance_block_ptr = None + append_broadcast = None + if V.graph.is_unspec_arg(name): + line = var + else: + if isinstance(indexing, BlockPtrOptions): + block_ptr, advance_block_ptr, other = self.codegen_block_ptr( + name, var, indexing, other + ) + line = f"tl.load({block_ptr}{other}{ep})" + # add needed size=1 dimensions + line = triton_reshape( + line, indexing.block_shape, indexing.reshape_suffix + ) + elif isinstance(original_index, sympy.Integer): + line = f"tl.load({var} + ({original_index}))" + append_broadcast = indexing.expand_str + else: + line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" + + dtype = V.graph.get_dtype(name) + if dtype in (torch.float16, torch.bfloat16): + line += ".to(tl.float32)" + if dtype == torch.bool and torch.version.hip is None: + # Workaround for https://github.com/openai/triton/issues/2151 + # tl.load returns int8 when loading from pointer to int1 + # NOTE: Currently causes hangs on bool UTs for ROCm + line += ".to(tl.int1)" + + if has_tmpmask: + # Masked loads must come after the mask is computed + load_buffer = self.compute + elif ( + self.inside_reduction + and self.range_trees[-1].is_loop + and not indirect_indexing + and not has_rindex + ): + # can lift a common load outside of reduction loop + # One exception is when this is an indirect_load. + load_buffer = self.body + else: + load_buffer = self.loads + + result_var = self.cse.generate(load_buffer, line) + assert isinstance(result_var, TritonCSEVariable) + result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] + + if append_broadcast: + line = f"tl.broadcast_to({result_var}, {append_broadcast})" + result_var = self.cse.generate(load_buffer, line) + + if advance_block_ptr: + load_buffer.writeline(advance_block_ptr) + + if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): + self.outside_loop_vars.add(result_var) + + return result_var + + def store( + self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None + ) -> None: + var = self.args.output(name) + original_index = index + indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) + + # Guard against write-after-read corruption in triton. + # See # https://github.com/openai/triton/issues/1615 + # This triton bug means that a load which is broadcasted over multiple + # warps may see the result of a store that happens later in the triton + # program. The workaround is to add a barrier before storing, which + # enforces that all warps have already read the data. + is_inplace = name in self.args.inplace_buffers + is_broadcasted = self.is_broadcasted(original_index) + if is_inplace and is_broadcasted: + self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) + + advance_block_ptr = None + if isinstance(indexing, BlockPtrOptions): + block_ptr, advance_block_ptr, other = self.codegen_block_ptr( + name, var, indexing + ) + # block_ptr stores don't do implicit casting + line = self.codegen_block_ptr_store_line( + name, indexing, block_ptr, value, other + ) + elif mode is None: + line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" + elif mode == "atomic_add": + line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" + else: + raise NotImplementedError(f"store mode={mode}") + self.stores.writeline(DeferredLine(name, line)) + if advance_block_ptr: + self.stores.writeline(advance_block_ptr) + + if not self.inside_reduction: + self.outside_loop_vars.add(value) + + def bucketize( + self, + values: CSEVariable, + offsets_name: str, + offsets_size: sympy.Expr, + indexing_dtype: torch.dtype, + right: bool, + ) -> CSEVariable: + """ + See [Note: Inductor bucketize op] + """ + + # Triton performance for bucketize_binary_search is much better when the number + # of threads equals the number of elements. + # If we're trying to use a bucketize kernel, we should make sure that an + # autotuning config with num_elements_per_warp=32 exists. + self.autotune_hints.add(AutotuneHint.ELEMENTS_PER_WARP_32) + + offsets_ptr = self.args.input(offsets_name) + block_size = self.dense_size_str() + offsets_size_str = self.index_to_str(offsets_size) + + if indexing_dtype == torch.int32: + triton_dtype = "tl.int32" + elif indexing_dtype == torch.int64: + triton_dtype = "tl.int64" + else: + raise NotImplementedError( + "Bucketize only supports indexing with int32 and int64" + ) + + result = self.cse.generate( + self.compute, + f"triton_helpers.bucketize_binary_search({values}, {offsets_ptr}, {triton_dtype}, {right}, {offsets_size_str}, {block_size})", # noqa: B950 line too long + ) + + return result + + def reduction_resize(self, value): + ndims = self.triton_tensor_ndim() + if ndims == 1: + return f"triton_helpers.promote_to_tensor({value})" + + sizes = [":"] * ndims + sizes[-1] = "None" + return f"{value}[{', '.join(sizes)}]" + + @staticmethod + def _map_tuple_or_scalar(fn, value): + if isinstance(value, tuple): + return tuple(map(fn, value)) + return fn(value) + + def reduction( + self, + dtype: torch.dtype, + src_dtype: torch.dtype, + reduction_type: ReductionType, + value: Union[CSEVariable, Tuple[CSEVariable, ...]], + ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: + assert self.inside_reduction + masks = {f"{tree.prefix}mask" for tree in self.range_trees} + self.filter_masks(masks) + masks = sorted(masks) + if self._load_mask: + masks.append(self._load_mask) + reduction_range_prefix = self.range_trees[-1].prefix + + # Say we have + # tmp0 = ops.constant(1, torch.int64) + # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) + # tmp0 in the triton code is either a scalar, or single-element tensor + # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 + # To avoid this, we broadcast to the expected shape first. + dense_size_str = self.dense_size_str() + value = self._map_tuple_or_scalar( + lambda v: self.cse.generate( + self.compute, f"tl.broadcast_to({v}, {dense_size_str})" + ), + value, + ) + + dim: int + root_op: str + + def final_reduction(value): + use_helper = reduction_type in {"any", "max", "min", "prod"} + module = "triton_helpers" if use_helper else "tl" + if reduction_type in {"max", "min"}: + return self.reduction_resize( + f"{module}.{reduction_type}2({value}, {dim})" + ) + return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") + + def final_argreduce(buffer, result_var, value, index): + buffer.splice( + f"""\ + _, {result_var}_tmp = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) + {result_var} = {self.reduction_resize(f'{result_var}_tmp')} + """ + ) + + cache_key = (src_dtype, reduction_type, value) + if cache_key in self.cse.reduction_cache: + return self.cse.reduction_cache[cache_key] + + dim = self.triton_tensor_ndim() - 1 + acc_type = triton_acc_type(src_dtype) + result_var: Any = self.cse.newvar() + result_var.mask_vars = {var for var in masks if var[0] != "r"} + cond = " & ".join(masks) + + def where_cond(tval, fval): + if not cond: + return tval + return TritonKernelOverrides.where(cond, tval, fval) + + if self.persistent_reduction: + default = ir.Reduction.default_value(reduction_type, src_dtype) + default = self._map_tuple_or_scalar(triton_constant, default) + + def _mask_value(value, default): + return self.cse.generate(self.compute, where_cond(value, default)) + + if isinstance(value, tuple): + masked_value = [_mask_value(v, d) for v, d in zip(value, default)] + else: + masked_value = _mask_value(value, default) + + if reduction_type in {"argmax", "argmin"}: + accumulator_index = str( + self.cse.generate( + self.compute, + f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", + ) + ) + root_op = {"argmax": "max", "argmin": "min"}[reduction_type] + final_argreduce( + self.compute, result_var, masked_value, accumulator_index + ) + elif reduction_type == "welford_reduce": + # For persistent reductions, don't bother with + # welford's algorithm since it uses more registers, and + # taking two reductions doesn't increase memory usage. + sum_ = ops.reduction(dtype, dtype, "sum", value) + self.inside_reduction = False + rnumel = ops.index_expr(self.numels[-1], dtype) + mean = ops.truediv(sum_, rnumel) + + self.inside_reduction = True + dx = ops.sub(value, mean) + dx2 = ops.mul(dx, dx) + m2 = ops.reduction(dtype, dtype, "sum", dx2) + result_var = (mean, m2, rnumel) + elif reduction_type == "welford_combine": + mean, m2, weight = masked_value + welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" + mean, m2, weight = (self.cse.newvar() for _ in range(3)) + self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") + + result_var = tuple( + self.cse.generate(self.compute, self.reduction_resize(var_name)) + for var_name in (mean, m2, weight) + ) + else: + result_var = self.cse.generate( + self.compute, final_reduction(masked_value) + ) + else: + accumulator = f"_{result_var}" + default = ir.Reduction.default_accumulator(reduction_type, src_dtype) + default = self._map_tuple_or_scalar(triton_constant, default) + if not isinstance(default, tuple): + self.body.writeline( + f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" + ) + + if reduction_type in {"argmax", "argmin"}: + accumulator_index = f"_{result_var}_index" + long_max = torch.iinfo(torch.int64).max + self.body.writeline( + f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" + ) + root_op = {"argmax": "max", "argmin": "min"}[reduction_type] + + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( + {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index + ) + {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} + {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} + """ + ) + final_argreduce(self.suffix, result_var, accumulator, accumulator_index) + elif is_welford_reduction(reduction_type): + accumulator = f"{result_var}_mean" + accumulator_m2 = f"{result_var}_m2" + accumulator_weight = f"{result_var}_weight" + self.body.writeline( + f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + self.body.writeline( + f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + self.body.writeline( + f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" + ) + + if reduction_type == "welford_combine": + mean, m2, weight = value + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( + {accumulator}, {accumulator_m2}, {accumulator_weight}, + {mean}, {m2}, {weight} + ) + """ + ) + else: + assert reduction_type == "welford_reduce" + self.compute.splice( + f"""\ + {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( + {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 + ) + """ + ) + + self.compute.splice( + f"""\ + {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} + {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} + {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} + """ + ) + + result_mean = result_var + result_m2 = self.cse.newvar() + result_weight = self.cse.newvar() + self.suffix.splice( + f"""\ + {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( + {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} + ) + {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} + {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} + {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} + """ + ) + result_var = result_mean, result_m2, result_weight + else: + combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) + updated = combine_fn(accumulator, value) + self.compute.writeline( + f"{accumulator} = {where_cond(updated, accumulator)}" + ) + + if src_dtype == torch.bool: + # This is only really used for aten.any. It changes the + # final reduction of a non-persistent reduction from + # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] + # to + # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) + # which is needed because tl.reduce doesn't support tl.int1 + accumulator = f"{accumulator}.to(tl.int8)" + result_type = triton_compute_type(dtype) + self.suffix.writeline( + f"{result_var} = {final_reduction(accumulator)}.to({result_type})" + ) + else: + self.suffix.writeline( + f"{result_var} = {final_reduction(accumulator)}" + ) + + self.cse.reduction_cache[cache_key] = result_var + + if isinstance(result_var, tuple): + self.outside_loop_vars |= set(result_var) + else: + self.outside_loop_vars.add(result_var) + + return result_var + + def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): + assert self.inside_reduction + self.inside_reduction = False + indexing = self.indexing(index, block_ptr=True) + self.inside_reduction = True + var = self.args.output(name) + + if isinstance(indexing, BlockPtrOptions): + self.suffix.writeline( + DeferredLine( + name, + self.codegen_block_ptr_store_line( + name, + indexing, + indexing.format(var), + value, + f", boundary_check={indexing.boundary_check()!r}", + ), + ) + ) + else: + assert isinstance(indexing, IndexingOptions) + self.suffix.writeline( + DeferredLine( + name, + f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", + ) + ) + + def _lift_helper(self, fn, num_args) -> str: + # Lift IR function into a triton function in the global namespace + helper = IndentedBuffer() + helper.writeline("@triton.jit") + args = [f"arg{n}" for n in range(num_args)] + signature = ", ".join(args) + helper.writeline(f"def {{name}}({signature}):") + + cse = CSE(prefix="", suffix="") + overrides = TritonOverrides(V.MockHandler()) + + class CSEProxy: + def __getattr__(self, name: str) -> Callable[..., CSEVariable]: + def inner(*args, **kwargs): + return cse.generate( + helper, + getattr(overrides, name)(*args, **kwargs), + ) + + return inner + + with helper.indent(), V.set_ops_handler(CSEProxy()): + outputs = fn(*args) + helper.writeline(f"return {outputs}") + + return self.helper_functions.add(helper.getvalue()) + + def scan( + self, + dtype: torch.dtype, + combine_fn: Callable[[CSEVariable, CSEVariable], CSEVariable], + value: CSEVariable, + init: int, + ) -> CSEVariable: + assert self.inside_reduction + masks = {f"{tree.prefix}mask" for tree in self.range_trees} + self.filter_masks(masks) + masks = sorted(masks) + if self._load_mask: + masks.append(self._load_mask) + reduction_range_prefix = self.range_trees[-1].prefix + + value = self.cse.generate( + self.compute, f"tl.broadcast_to({value}, {self.dense_size_str()})" + ) + + default = triton_constant(init) + dim = self.triton_tensor_ndim() - 1 + acc_type = triton_acc_type(dtype) + cond = " & ".join(masks) + + combine_helper_fn = self._lift_helper(combine_fn, 2) + + def where_cond(value): + if not cond: + return value + default_tensor = self.cse.generate( + self.body, + f"tl.full({[1] * self.triton_tensor_ndim()}, {default}, {triton_compute_type(dtype)})", + ) + return self.cse.generate( + self.compute, f"tl.where({cond}, {value}, {default_tensor})" + ) + + if self.persistent_reduction: + masked_value = where_cond(value) + result_var = self.cse.generate( + self.compute, + f"tl.associative_scan({masked_value}, {dim}, {combine_helper_fn})", + ) + else: + accumulator = self.cse.newvar() + reduced_size = self.dense_size_list() + reduced_size[-1] = "1" + reduced_size = f"[{', '.join(reduced_size)}]" + + self.body.writeline( + f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" + ) + + masked_value = where_cond(value) + partial_reduce = self.cse.generate( + self.compute, + self.reduction_resize( + f"tl.reduce({value}, {dim}, {combine_helper_fn})" + ), + ) + acc_next = combine_fn(accumulator, partial_reduce) + partial_scan = self.cse.generate( + self.compute, + f"tl.associative_scan({masked_value}, {dim}, {combine_helper_fn})", + ) + result_var = self.cse.generate( + self.compute, combine_fn(accumulator, partial_scan) + ) + self.compute.writeline(f"{accumulator} = {acc_next}") + + result_var.mask_vars = masks # type: ignore[attr-defined] + return result_var + + def codegen_body(self): + """ + Concat output code from index_code, loads, compute, stores, + suffix into self.body. + + For pointwise kernels, this is called just once at the end. + + For reduction kernels, this generates a loop over the reduction + axis. + """ + if not ( + self.indexing_code + or self.loads + or self.stores + or self.compute + or self.suffix + ): + return + + if self.inside_reduction and self.range_trees[-1].is_loop: + self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") + with self.body.indent(): + # last range tree is always reduction + self.range_trees[-1].codegen_header(self.body) + self.body.splice(self.indexing_code) + self.body.splice(self.loads) + self.body.splice(self.compute) + self.body.splice(self.stores) + + # invalidate any caches that came from inside the reduction loop + self.cse.invalidate(self.outside_loop_vars) + self.range_trees[-1].cache_clear() + else: + self.body.splice(self.indexing_code) + self.body.splice(self.loads) + self.body.splice(self.compute) + self.body.splice(self.stores) + self.body.splice(self.suffix) + self.indexing_code.clear() + self.loads.clear() + self.compute.clear() + self.stores.clear() + self.suffix.clear() + + def codegen_kernel_benchmark(self, num_gb, grid=None): + result = IndentedBuffer() + argdefs, call_args, signature = self.args.python_argdefs() + + result.writelines(["", "", "def get_args():"]) + with result.indent(): + name_cnt = itertools.count() + var_names = [] + for arg_name, arg_sig in zip(call_args, signature): + var_name = f"arg_{next(name_cnt)}" + buf = V.graph.get_buffer(arg_name) + if buf: + result.writeline( + f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long + ) + elif arg_name in V.graph.constants: + # note that random seed is put in V.graph.constants + const_tensor = V.graph.constants[arg_name] + result.writeline( + f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long + ) + elif isinstance(arg_sig, SizeArg): + symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) + + # Force the seed_offset to be 0 so calls to the same kernel + # using different seed offset will have the same benchmark harness. + # We can dedup kernel definitions in this case. + if "seed_offset" in arg_sig.name: + symval_hint = 0 + result.writeline(f"{var_name} = {symval_hint}") + else: + raise KeyError( + f"Don't find the buffer or const tensor for {arg_name}" + ) + var_names.append(var_name) + result.writeline(f"return {', '.join(var_names)},") + + result.writelines(["\n", "\n", "def call(args):"]) + if grid is None: + grid = [] + extra_args = [] + extra_args_str = None + for tree in self.active_range_trees(): + expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) + extra_args.append(expr) + if tree.prefix != "r": + grid.append(expr) + if self.need_numel_args(): + extra_args_str = ", ".join(map(str, extra_args)) + ", " + else: + extra_args_str = "" + grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" + else: + grid_arg = f"grid={grid}" + index = V.graph.scheduler.current_device.index + with result.indent(): + result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") + with result.indent(): + result.writeline( + V.graph.device_ops.set_device(index) + ) # no-op to ensure context + stream_name = f"stream{index}" + result.writeline(f"{stream_name} = get_raw_stream({index})") + result.writeline( + f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" + ) + + # benchmark all configs + result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) + with result.indent(): + result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") + with result.indent(): + result.writeline( + V.graph.device_ops.set_device(index) + ) # no-op to ensure context + result.writeline( + f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" + ) + + result.writelines(["\n", "\n", "if __name__ == '__main__':"]) + with result.indent(): + result.writeline("from triton.testing import do_bench") + result.writeline("") + + result.writeline("args = get_args()") + result.writeline( + "ms = do_bench(lambda: call(args), rep=40, fast_flush=True)" + ) + result.writeline(f"num_gb = {num_gb}") + result.writeline("gb_per_s = num_gb / (ms / 1e3)") + result.writeline( + 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' + ) + + return result + + def imports_for_benchmark_kernel(self): + return textwrap.dedent( + """ + from torch._dynamo.testing import rand_strided + {} + import torch + from torch._inductor.triton_heuristics import grid, split_scan_grid + """.format( + V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") + ) + ) + + def estimate_kernel_num_bytes(self): + """ + Try the best to estimate the total size (in bytes) of the + kernel's inputs and outputs, which is used for estimating the memory + throughput of this kernel. This information is used for checking how + far we are from the peak memory bandwidth. It's important that + we want to avoid overestimating the sizes of the inputs and outputs, + because it can wrongfully give us a very large memory traffic value, + which may be even larger than the theoretical bandwidth and thus + become very misleading. This is particularly problematic for cases + where we slice some inputs. In those cases, we should only count + the size of the "slices" instead of the original inputs, because + only the slices contribute to the real memory traffic. + """ + nbytes = [] + ninplace_args = len(unique(self.args.inplace_buffers.values())) + _, call_args, _ = self.args.python_argdefs() + + # For pointwise and reduction kernels, this is the upper-bound numels + # for the output buffer. + # FIXME: This is not exactly right for cases like below: + # def foo(tensor0, tensor1): + # x0 = narrow(tensor0) + # return cat(x0, tensor1) + # For this example, we will end up overestimate the size for the + # slice s0. Potentially, we could have precise inputs information + # if we maintained the original inputs of the Pointwise kernel created + # for the "cat". However, I think it might be a bit overwhelming that + # we add such complexity only for handling some particular cases for + # benchmarking. + out_numel = V.graph.sizevars.size_hint(sympy_product(self.numels)) + for i, arg in enumerate(call_args): + # "buf" may be narrowed. In this case, the number of memory accesses + # should be estimated based on the reinterpreted layout. + # On the other hand, buf may be broadcasted. In this case, + # counting the size of the underline storage would give us + # a better estimation in terms of memory accesses. + if arg not in self.buf_accesses: + nbytes.append(0) + continue + arg_numel = V.graph.get_numel(arg) + buf_size = V.graph.sizevars.size_hint(arg_numel) + if buf_size > out_numel: + # This arg points to a buf that has been sliced. + # We need to count each individual slice to have + # a better estimation. + indices: Set[Any] = set() + no_index_dep_count = 0 + for dep in self.buf_accesses[arg]: + if isinstance(dep, (StarDep, WeakDep)): + indices.add(f"no_index_dep_{no_index_dep_count}") + no_index_dep_count += 1 + else: + indices.add(dep.index) + numel = len(indices) * out_numel + else: + numel = buf_size + dtype = V.graph.get_dtype(arg) + dtype_size = get_dtype_size(dtype) + nbytes.append(numel * dtype_size * (1 + int(i < ninplace_args))) + return sum(nbytes) + + def _get_heuristic(self): + if self.persistent_reduction: + assert self.inside_reduction + return "persistent_reduction" + elif self.inside_reduction: + return "reduction" + return "pointwise" + + def codegen_kernel(self, name=None): + code = IndentedBuffer() + + size_hints = [] + for numel in self.numels: + numel_hint = V.graph.sizevars.symbolic_hint(numel) + if not isinstance(numel_hint, (int, sympy.Integer)): + # This default heuristic hint was picked carefully: it is + # large, to ensure that we don't shrink the block size (since + # if you don't have many elements, it'd be wasteful to pick a + # large block size). Since we don't know how many elements we + # might have, we should be OK with some inefficiency to make + # sure we handle the large case well. 8192 is the largest + # block size we support, so we pick that. + # + # If we have a better hint for unbacked SymInts (e.g., because + # a user told us, or we are tracking upper bounds) we could + # use that here. + size_hint = 8192 + else: + size_hint = next_power_of_2(int(numel_hint)) + size_hints.append(size_hint) + + if not self.inside_reduction: + size_hints.pop() + + heuristics = self._get_heuristic() + + if name is None: + code.splice(gen_common_triton_imports()) + + if config.benchmark_kernel: + code.splice(self.imports_for_benchmark_kernel()) + + argdefs, _, signature = self.args.python_argdefs() + # maps actual expression to SizeArg if it is in sizevars replacements + for i, arg in enumerate(signature): + if isinstance(arg, SizeArg): + # mypy is unhappy about the sympy.Expr + # type for the key of the dict below + symbol = cast(sympy.Symbol, arg.expr) + if symbol in V.graph.sizevars.inv_precomputed_replacements: + signature[i] = SizeArg( + arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] + ) + + mutated_args = set() + for mutation in self.mutations: + if mutation in self.args.input_buffers: + mutated_args.add(self.args.input_buffers[mutation]) + if ( + mutation in self.args.inplace_buffers + and mutation not in V.graph.removed_buffers + and mutation not in self.removed_buffers + ): + mutated_args.add(self.args.inplace_buffers[mutation].inner_name) + if mutation in self.args.output_buffers: + mutated_args.add(self.args.output_buffers[mutation]) + mutated_args = sorted(mutated_args) + + triton_meta_signature = signature_to_meta( + signature, size_dtype=self.index_dtype + ) + triton_meta = { + "signature": triton_meta_signature, + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": {}, + } + + inductor_meta = { + "autotune_hints": set(self.autotune_hints), + "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), + "mutated_arg_names": mutated_args, + "no_x_dim": self.no_x_dim, + "backend_hash": torch.utils._triton.triton_hash_with_backend(), + } + num_gb = None + if config.benchmark_kernel or config.profile_bandwidth: + num_gb = self.estimate_kernel_num_bytes() / 1e9 + inductor_meta["kernel_num_gb"] = num_gb + + for tree in self.active_range_trees(): + sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) + signature.append(sizearg) + triton_meta_signature[len(argdefs)] = signature_of( + sizearg, size_dtype=self.index_dtype + ) + argdefs.append(f"{tree.prefix}numel") + # constexpr version causes issues, see + # https://github.com/pytorch/torchdynamo/pull/1362 + # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( + # tree.numel + # ) + # argdefs.append(f"{tree.prefix}numel: tl.constexpr") + triton_meta["configs"] = [config_of(signature)] + + # Triton compiler includes equal_to_1 args into constants even + # when they are not constexpr. otherwise there may be a segfault + # during launching the Inductor-compiled Triton kernel. + # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 + # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 + for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] + triton_meta["constants"][arg_num] = 1 # type: ignore[index] + + self.triton_meta = triton_meta + + for tree in self.range_trees: + if tree.prefix == "r" and self.persistent_reduction: + # RBLOCK for persistent_reduction is defined in codegen_static_numels + continue + if tree.tensor_dim is None: + continue + argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") + + self.codegen_body() + + for helper in self.helper_functions: + code.writeline("") + code.splice(helper) + + if self.inside_reduction: + reduction_hint = self.reduction_hint + heuristics_line = f""" + @triton_heuristics.{heuristics}( + size_hints={size_hints!r}, + reduction_hint={reduction_hint}, + filename=__file__, + triton_meta={triton_meta!r}, + inductor_meta={inductor_meta!r} + ) + @triton.jit + """ + else: + tile_hint = "" + if len(size_hints) == 2: + if len(signature) == 4: # input, output and 2 args + tile_hint = "tile_hint=TileHint.SQUARE," + else: + tile_hint = "tile_hint=TileHint.DEFAULT," + heuristics_line = f""" + @triton_heuristics.{heuristics}( + size_hints={size_hints!r}, {tile_hint} + filename=__file__, + triton_meta={triton_meta!r}, + inductor_meta={inductor_meta!r}, + min_elem_per_thread={self.min_elem_per_thread} + ) + @triton.jit + """ + code.splice(heuristics_line) + code.writeline( + f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" + ) + with code.indent(): + self.codegen_static_numels(code) + for old, new in self.args.aliases(): + code.writeline(f"{old} = {new}") + code.splice(self.body) + + if config.benchmark_kernel: + code.splice(self.codegen_kernel_benchmark(num_gb)) + + return code.getvalue() + + def codegen_static_numels(self, code): + """ + We get a small speedup from hard coding numels if they are static. + + This code stomps on the passed-in values by writing an constant to the top of the kernel. + + In a kernel like: + def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): + + We would add + xnumel = 4096 + rnumel = 768 + + After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes + a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream + knows that its a static numel, as that you just plop a constant into the kernel. + """ + for tree in self.range_trees: + if tree.prefix != "r" or self.inside_reduction: + simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) + if isinstance(simplified_tree_numel, (sympy.Integer, int)): + code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") + + if tree.prefix == "r" and self.persistent_reduction: + simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) + if isinstance(simplified_tree_numel, (sympy.Integer, int)): + val = int(simplified_tree_numel) + else: + continue + val = next_power_of_2(val) + code.writeline(f"RBLOCK: tl.constexpr = {val}") + + if tree.prefix == "x" and self.no_x_dim: + code.writeline("XBLOCK: tl.constexpr = 1") + + def triton_tensor_ndim(self): + return sum(int(tree.tensor_dim is not None) for tree in self.range_trees) + + def indexing_size_str(self, i): + sizes = ["None"] * self.triton_tensor_ndim() + sizes[i] = ":" + return f"[{', '.join(sizes)}]" + + def dense_size_list(self) -> List[str]: + sizes = ["1"] * self.triton_tensor_ndim() + for tree in self.range_trees: + if tree.tensor_dim is None: + continue + + if tree.prefix != "r" or self.inside_reduction: + sizes[tree.tensor_dim] = f"{tree.prefix.upper()}BLOCK" + return sizes + + def dense_size_str(self): + sizes = self.dense_size_list() + return f"[{', '.join(sizes)}]" + + def _get_grid_fn(self): + return "grid" + + def add_numel_to_call_args_and_grid(self, name, call_args, grid): + # TODO(jansel): if there are constants, we shouldn't bother passing them as args + for tree in self.range_trees: + if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): + expr = tree.numel + else: + expr = V.graph.wrapper_code.generate_numel_expr(name, tree) + + if tree.prefix != "r" or self.inside_reduction: + call_args.append(expr) + if tree.grid_dim is not None: + grid.append(expr) + + def get_call_args(self): + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + + return call_args + + def call_kernel(self, name: str, node: Optional[IRNode] = None): + wrapper = V.graph.wrapper_code + call_args = self.get_call_args() + grid: List[Any] = [] + self.add_numel_to_call_args_and_grid(name, call_args, grid) + current_device = V.graph.scheduler.current_device + + if self.args.workspace_arg is not None: + ws = self.args.workspace_arg + wrapper.generate_workspace_allocation( + ws.nbytes, current_device, ws.zero_fill + ) + + grid = wrapper.generate_default_grid(name, grid) + wrapper.generate_kernel_call( + name, + call_args, + grid, + current_device.index, + cuda=True, + triton=True, + grid_fn=self._get_grid_fn(), + triton_meta=self.triton_meta, + ) + + if self.args.workspace_arg is not None: + wrapper.writeline(wrapper.make_free_by_names(["workspace"])) + + def codegen_nan_check(self): + wrapper = V.graph.wrapper_code + _, call_args, arg_types = self.args.python_argdefs() + for arg, arg_type in zip(call_args, arg_types): + if isinstance(arg_type, TensorArg): + line = f"assert not {arg}.isnan().any().item()" + wrapper.writeline(line) + line = f"assert not {arg}.isinf().any().item()" + wrapper.writeline(line) + + def warn_mix_layout(self, kernel_name): + """ + Print message if the kernel have mixed layout inputs. + Only care about 4D tensor for now. + """ + if ( + len(self.args.input_buffers) == 1 + and len(self.args.output_buffers) == 1 + and len(self.args.inplace_buffers) == 0 + ): + # even if input buffer and output buffer have different layout, + # this can be a layout conversion kernel. No need to warn for + # the mix layouts. + return + + argdefs, call_args, signature = self.args.python_argdefs() + uniform_stride_order = None + for arg_name in call_args: + buf = V.graph.get_buffer(arg_name) + if buf and len(buf.layout.size) == 4: + # ignore the tensor if only 1 dimension is non-zero + if len([x for x in buf.layout.size if x == 1]) == 3: + continue + stride_order = ir.get_stride_order(buf.layout.stride) + if uniform_stride_order is None: + uniform_stride_order = stride_order + elif uniform_stride_order != stride_order: + msg = yellow_text( + f"Expected stride order {uniform_stride_order}, but found stride order" + + f" {stride_order} for kernel {kernel_name}" + ) + log.warning(msg) + + stride_order_list = [ + ir.get_stride_order(V.graph.get_buffer(name).layout.stride) + if V.graph.get_buffer(name) + else None + for name in call_args + ] + size_list = [ + V.graph.get_buffer(name).layout.size + if V.graph.get_buffer(name) + else None + for name in call_args + ] + source_list = [ + "GraphInput" + if name in V.graph.graph_inputs + else "IntermediateBuffer" + if name in V.graph.name_to_buffer + else None + for name in call_args + ] + + msg = yellow_text( + f" param names {argdefs}\n buf names {call_args}\n strides {stride_order_list}" + + f"\n sizes {size_list}\n sources {source_list}\n" + ) + log.warning(msg) + return + msg = green_text( + f"All the inputs for the triton kernel {kernel_name} have uniform layout" + ) + log.warning(msg) + + def create_cse_var(self, *args, **kwargs): + return TritonCSEVariable(*args, **kwargs) + + +class TritonScheduling(BaseScheduling): + def __init__(self, scheduler): + self.scheduler = scheduler + + def group_fn(self, sizes): + return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes) + + def can_fuse(self, node1, node2): + """ + Hook called by Scheduler to determine if the Triton backend + can fuse node1 and node2. These nodes might already be + FusedSchedulerNodes. + """ + if isinstance(node1, scheduler.ForeachKernelSchedulerNode) or isinstance( + node2, scheduler.ForeachKernelSchedulerNode + ): + return scheduler.ForeachKernelSchedulerNode.can_fuse(node1, node2) + + _, (numel1, rnumel1) = node1.group + _, (numel2, rnumel2) = node2.group + why = WhyNoFuse(node1, node2) + + if node1.is_split_scan() and not node2.is_split_scan(): + if node2.is_reduction(): + why("Split scan cannot fuse with reductions") + elif node2.is_split_scan() and not node1.is_split_scan(): + if node1.is_reduction(): + why("Split scan cannot fuse with reductions") + + if node1.is_reduction() and node2.is_reduction(): + reduction_can_fuse = numel1 == numel2 and rnumel1 == rnumel2 + if not reduction_can_fuse: + why( + "numel/rnumel mismatch (reduce) (%s, %s), (%s, %s)", + numel1, + numel2, + rnumel1, + rnumel2, + ) + return reduction_can_fuse + + if not node1.is_reduction() and not node2.is_reduction(): + if not (numel1 == numel2 and rnumel1 == rnumel2): + why( + "numel/rnumel mismatch (non-reduce) (%s, %s), (%s, %s)", + numel1, + numel2, + rnumel1, + rnumel2, + ) + return False + + if node1.is_template(): + # Only allow fusion for TritonTemplates for now. + # Fusion for CUDATemplates are not supported. + is_triton_template = isinstance(node1.node, TritonTemplateBuffer) + if not is_triton_template: + why("node1 is not TritonTemplateBuffer") + return is_triton_template + + # check for a bad combined tiling + tiling1 = self.select_tiling(node1.get_nodes(), numel1, rnumel1) + tiling2 = self.select_tiling(node2.get_nodes(), numel1, rnumel1) + tiling3 = self.select_tiling( + node1.get_nodes() + node2.get_nodes(), numel1, rnumel1 + ) + if config.triton.tiling_prevents_pointwise_fusion: + cond = True + if len(tiling1) > 2: + if len(tiling2) > 2: + cond = tiling1 == tiling2 == tiling3 + else: + cond = tiling1 == tiling3 + elif len(tiling2) > 2: + cond = tiling2 == tiling3 + if not cond: + why( + "tiling mismatch (%s, %s, %s)", + tiling1, + tiling2, + tiling3, + ) + return False + + return True + + if not node1.is_reduction() and node2.is_reduction(): + assert rnumel1 == 1 and rnumel2 != 1 + if numel1 == numel2 * rnumel2: + if not all( + TritonKernel.is_compatible((numel2, rnumel2), n.get_ranges()) + for n in node1.get_nodes() + ): + why("nodes numel/rnumel incompatibility") + return False + if ( + config.triton.tiling_prevents_reduction_fusion + and not node1.is_template() + ): + is_reduction_tiling_valid = self.select_tiling( + node1.get_nodes(), numel1 + ) in ( + (numel1, 1), + (numel2, rnumel2, 1), + ) + if not is_reduction_tiling_valid: + why("invalid tiling for reduction") + return is_reduction_tiling_valid + return True + + if numel1 != numel2: + why("nodes numel incompatibility") + return numel1 == numel2 + + assert node1.is_reduction() and not node2.is_reduction() + # swap args to hit the case above + return self.can_fuse_horizontal(node2, node1) + + can_fuse_vertical = can_fuse + can_fuse_horizontal = can_fuse + + def generate_node_schedule(self, nodes, numel, rnumel): + node_schedule: List[Any] = [] + current_loop_writes: Set[str] = set() + + # Writes with a reduced shape, meaning they are only present once the + # reduction loop has ended + current_loop_reduced_writes = set() + current_loop_has_writes = False + done = set() + + def fits_in_main_body(n): + _, (node_numel, node_rnumel) = n.group + return (node_numel == numel and node_rnumel == rnumel) or ( + node_numel == numel * rnumel and node_rnumel == 1 + ) + + def fits_outside_reduction(n): + _, (node_numel, node_rnumel) = n.group + return node_numel == numel and node_rnumel == 1 and rnumel != 1 + + def schedule_node_in_loop(n): + nonlocal current_loop_has_writes + done.add(n) + node_schedule.append(n) + current_loop_has_writes = True + # A scan is modelled as a reduction in the scheduler but has a + # full sized output that can be used inside the loop body + if ( + n.is_reduction() + and isinstance(n, scheduler.SchedulerNode) + and isinstance(n.node, ir.ComputedBuffer) + and not isinstance(n.node.data, ir.Scan) + ): + current_loop_reduced_writes.add(n.get_name()) + + @contextlib.contextmanager + def end_current_reduction_loop(): + nonlocal current_loop_has_writes + if current_loop_has_writes: + # flush out any other runnable nodes to reduce number of loops + for other_node in nodes[index + 1 :]: + if ( + node not in done + and fits_in_main_body(other_node) + and not (current_loop_reduced_writes & other_node.ancestors) + ): + schedule_node_in_loop(node) + + if node_schedule and node_schedule[-1] is EnableReduction: + node_schedule.pop() + else: + node_schedule.append(DisableReduction) + yield + node_schedule.append(EnableReduction) + current_loop_reduced_writes.clear() + current_loop_has_writes = False + + for index, node in enumerate(nodes): + if node in done: + continue + done.add(node) + + def requires_closing_previous_reduction(node, node_schedule): + if rnumel == 1: + return False + if not current_loop_reduced_writes & node.ancestors: + return False + assert node_schedule and not isinstance( + node_schedule[-1], (EnableReduction, DisableReduction) + ) + return bool(current_loop_reduced_writes) + + if fits_in_main_body(node): + if requires_closing_previous_reduction(node, node_schedule): + with end_current_reduction_loop(): + pass # need to start a new reduction loop + + schedule_node_in_loop(node) + elif fits_outside_reduction(node): + with end_current_reduction_loop(): + node_schedule.append(node) + else: + raise NotImplementedError( + f"unexpected group: ({numel}, {rnumel}) != {node.group[1]}" + ) + + return node_schedule + + def codegen_nodes(self, nodes: List[scheduler.SchedulerNode]): + """ + Given a set of pre-fused nodes, generate a Triton kernel. + """ + _, (numel, rnumel) = max(nodes, key=lambda x: int(x.is_reduction())).group + + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + buf_accesses = collections.defaultdict(list) + for node in nodes: + for access in node.read_writes.reads | node.read_writes.writes: + buf_accesses[access.name].append(access) + + schedule_log.debug("Schedule:\n %s", node_schedule) + + return self.codegen_node_schedule(node_schedule, buf_accesses, numel, rnumel) + + @staticmethod + def reduction_hint(node): + assert node.is_reduction() + if all( + dep.is_contiguous() + for dep in itertools.chain(node.read_writes.reads, node.read_writes.writes) + ): + return ReductionHint.INNER + else: + return node.node.data.reduction_hint + + @staticmethod + def can_use_32bit_indexing( + numel: sympy.Expr, buffers: Iterable[Union[ir.Buffer, ir.TensorBox]] + ) -> bool: + int_max = torch.iinfo(torch.int32).max + size_hint = V.graph.sizevars.size_hint + has_hint = V.graph.sizevars.shape_env.has_hint + + def within_32bit(e): + # Allow for unhinted e as long as we can still statically prove + # (e.g., via ValueRanges) that it is still in bounds + if V.graph.sizevars.is_expr_static_and_true(e <= int_max): + return True + # Otherwise, the hint MUST exist and be in range + return has_hint(e) and size_hint(e) <= int_max + + if not within_32bit(numel): + return False + + # Any use of a MultiOutputLayout will create a buffer with a + # Layout whose sizes are accounted for + buf_sizes = [ + buf.get_layout().storage_size() + for buf in buffers + if not isinstance(buf.get_layout(), ir.MultiOutputLayout) + ] + + if not all(within_32bit(size) for size in buf_sizes): + return False + + # Only install guards for 32-bit indexing as there is no correctness + # issue with using 64-bit for everything + V.graph.sizevars.guard_leq(numel, int_max) # type: ignore[arg-type] + for size in buf_sizes: + V.graph.sizevars.guard_leq(size, int_max) # type: ignore[arg-type] + return True + + @staticmethod + def select_index_dtype(node_schedule, numel, reduction_numel): + # Gather all used buffer names + buffer_names = set() + for node in node_schedule: + if not isinstance(node, scheduler.BaseSchedulerNode): + continue + + buffer_names.update(node.get_names()) + buffer_names.update(node.used_buffer_names()) + + # Get buffers objects + def _get_buffer(name: str) -> Union[ir.Buffer, ir.TensorBox]: + if name in V.graph.name_to_buffer: + return V.graph.name_to_buffer[name] + elif name in V.graph.graph_inputs: + return V.graph.graph_inputs[name] + elif name in V.graph.constants: + data = V.graph.constants[name] + return ir.ConstantBuffer( + name, + ir.FixedLayout( + data.device, data.dtype, *V.graph.static_sizes_strides(data) + ), + ) + raise RuntimeError(f"Failed to find buffer matching name {name}") + + buffers = [_get_buffer(name) for name in buffer_names] + + # In theory we can separately check xnumel and rnumel are <= int_max + # but some indexers do use the full linear index so we need to be + # conservative here. + total_numel = numel * reduction_numel + + if TritonScheduling.can_use_32bit_indexing(total_numel, buffers): + return "tl.int32" + return "tl.int64" + + def get_kernel_args(self, node_schedule, numel, reduction_numel): + reductions = list( + filter( + lambda n: n not in (EnableReduction, DisableReduction) + and n.is_reduction(), + node_schedule, + ) + ) + if len(reductions) > 0: + hints = [self.reduction_hint(n) for n in reductions] + if hints.count(hints[0]) == len(hints): + reduction_hint_val = hints[0] + else: + reduction_hint_val = ReductionHint.DEFAULT + else: + reduction_hint_val = ReductionHint.DEFAULT + + mutations = set() + for node in node_schedule: + if hasattr(node, "get_mutations"): + mutations.update(node.get_mutations()) + + index_dtype = self.select_index_dtype(node_schedule, numel, reduction_numel) + + return reduction_hint_val, mutations, index_dtype + + def codegen_comment(self, node_schedule): + wrapper = V.graph.wrapper_code + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + if origins: + wrapper.writeline(origins) + + if config.debug_fusion: + from torch._inductor.scheduler import ( + BaseSchedulerNode, + ForeachKernelSchedulerNode, + ) + + if not any( + isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule + ): + # We probably should look what are the nodes inside a foreach + # schedule node + node_names = [ + n.get_name() + for n in node_schedule + if isinstance(n, BaseSchedulerNode) + ] + wrapper.writeline( + f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" + ) + + def codegen_node_schedule( + self, node_schedule, buf_accesses, numel, reduction_numel + ): + from torch._inductor.codegen.triton_split_scan import TritonSplitScanKernel + + tiled_groups = self.select_tiling(node_schedule, numel, reduction_numel) + reduction_hint_val, mutations, index_dtype = self.get_kernel_args( + node_schedule, numel, reduction_numel + ) + + is_split_scan = any( + isinstance(node, BaseSchedulerNode) and node.is_split_scan() + for node in node_schedule + ) + kernel_type = TritonSplitScanKernel if is_split_scan else TritonKernel + kernel_args = tiled_groups + kernel_kwargs = { + "reduction_hint": reduction_hint_val, + "mutations": mutations, + "index_dtype": index_dtype, + } + kernel = kernel_type( + *kernel_args, + **kernel_kwargs, + ) + kernel.buf_accesses = buf_accesses + + self.codegen_node_schedule_with_kernel(node_schedule, kernel) + + with V.set_kernel_handler(kernel): + src_code = kernel.codegen_kernel() + + kernel_name = self.define_kernel(src_code, node_schedule) + log.debug("Generating kernel code with kernel_name: %s", kernel_name) + kernel.kernel_name = kernel_name + kernel.code_hash = code_hash(src_code) + + if kernel.persistent_reduction and config.triton.multi_kernel: + kernel2 = TritonKernel( + *kernel_args, + **kernel_kwargs, + disable_persistent_reduction=True, + ) + self.codegen_node_schedule_with_kernel(node_schedule, kernel2) + with V.set_kernel_handler(kernel2): + src_code2 = kernel2.codegen_kernel() + kernel_name2 = self.define_kernel(src_code2, node_schedule) + kernel2.kernel_name = kernel_name2 + kernel2.code_hash = code_hash(src_code2) + + final_kernel = MultiKernel([kernel, kernel2]) + else: + final_kernel = kernel # type: ignore[assignment] + + with V.set_kernel_handler(final_kernel): + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.mark_run() + + self.codegen_comment(node_schedule) + final_kernel.call_kernel(final_kernel.kernel_name) + if config.nan_asserts: + final_kernel.codegen_nan_check() + if config.warn_mix_layout: + final_kernel.warn_mix_layout(kernel_name) + + V.graph.removed_buffers |= final_kernel.removed_buffers + V.graph.inplaced_to_remove |= final_kernel.inplaced_to_remove + + if ( + V.graph.wrapper_code.supports_intermediate_hooks + and config.generate_intermediate_hooks + ): + # Not every node in the schedule will actually be live on output; + # we can't check dead buffers. + live_outs = kernel.args.live_output_buffers() + for node in node_schedule: + if not isinstance(node, scheduler.BaseSchedulerNode): + continue + name = node.get_name() + if name not in live_outs: + continue + origin_node = node.node.get_origin_node() + if origin_node is not None: + counters["inductor"]["intermediate_hooks"] += 1 + V.graph.wrapper_code.writeline( + f"run_intermediate_hooks({origin_node.name!r}, {name})" + ) + + self.scheduler.free_buffers() + + def codegen_node_schedule_with_kernel(self, node_schedule, kernel): + def current_reduction_nodes(nodes): + return itertools.takewhile(lambda n: n is not DisableReduction, nodes) + + with kernel: + stack = contextlib.ExitStack() + kernel.set_last_usage(current_reduction_nodes(node_schedule)) + + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.decide_inplace_update() + for i, node in enumerate(node_schedule): + if node is DisableReduction: + stack.enter_context(kernel.disable_reduction()) + elif node is EnableReduction: + stack.close() + kernel.set_last_usage(current_reduction_nodes(node_schedule[i:])) + else: + # TODO - use split ranges ? + indexing_dtype_strength_reduction(node._body) + index_vars = kernel.split_and_set_ranges(node.get_ranges()) + node.codegen(index_vars) + + def define_kernel(self, src_code, node_schedule): + wrapper = V.graph.wrapper_code + if src_code in wrapper.src_to_kernel: + kernel_name = wrapper.src_to_kernel[src_code] + else: + fused_name = ( + get_fused_kernel_name(node_schedule, config.triton.descriptive_names) + if config.triton.descriptive_names + else "" + ) + kernel_category = get_kernel_category_by_source_code(src_code)[:3] + kernel_name = "_".join( + ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] + ) + # use the original src_code as the key + wrapper.src_to_kernel[src_code] = kernel_name + subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" + + # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name + # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set + # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). + src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) + src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) + + # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does + # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. + src_code = src_code.replace("#pragma CMT", "#") + + basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") + compile_wrapper.splice(src_code, strip=True) + compile_wrapper.writeline( + f"''', device_str='{V.graph.scheduler.current_device.type}')" + ) + + metadata_comment = f"# kernel path: {kernel_path}" + origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) + metadata_comment += "\n" + origins + "\n" + detailed_origins + wrapper.define_kernel( + kernel_name, compile_wrapper.getvalue(), metadata_comment + ) + + # log kernel metadata for offline analysis. + # E.g. one can find all unaligned inner reduction and check if + # padding helps with the perf kernel by kernel. + if is_metric_table_enabled("kernel_metadata"): + log_kernel_metadata(kernel_name, kernel_path, src_code) + + return kernel_name + + def codegen_template( + self, template_node, epilogue_nodes, only_gen_src_code=False + ) -> Optional[str]: + """ + Codegen a triton template + + If `only_gen_src_code` the src code will be returned instead of codegen'd into the wrapper + """ + _, (numel, rnumel) = template_node.group + assert rnumel == 1 + kernel, render = template_node.node.make_kernel_render(template_node.node) + with kernel: + if not only_gen_src_code: + for node in [template_node, *epilogue_nodes]: + node.mark_run() + partial_code = render() + for node in epilogue_nodes: + node.codegen(kernel.split_and_set_ranges(node.get_ranges())) + + # finalize must be called after adding epilogue above + with V.set_kernel_handler(kernel): + # TODO: Maybe unify CUDATemplateKernel to also use PartialRender for flexible epilogue fusion. + src_code = ( + partial_code + if isinstance(partial_code, str) + else partial_code.finalize() + ) + node_schedule = [template_node, *epilogue_nodes] + + if config.benchmark_kernel: + num_gb = kernel.estimate_kernel_num_bytes() / 1e9 + grid_args = V.graph.sizevars.size_hints(kernel.call_sizes) + assert kernel.meta is not None, "meta is None" + grid = kernel.grid_fn(*grid_args, kernel.meta) + src_code = ( + f"{kernel.imports_for_benchmark_kernel()}\n" + f"{src_code}\n" + f"{kernel.codegen_kernel_benchmark(num_gb, grid).getvalue()}" + ) + + if only_gen_src_code: + return src_code + + kernel_name = self.define_kernel(src_code, node_schedule) + + self.codegen_comment(node_schedule) + kernel.call_kernel(kernel_name, template_node.node) + V.graph.removed_buffers |= kernel.removed_buffers + V.graph.inplaced_to_remove |= kernel.inplaced_to_remove + self.scheduler.free_buffers() + return None + + def codegen_sync(self): + V.graph.wrapper_code.writeline(V.graph.device_ops.synchronize()) + + def codegen_foreach(self, foreach_node): + from .triton_foreach import ForeachKernel + + for partitions_with_metadata in ForeachKernel.horizontal_partition( + foreach_node.get_subkernel_nodes(), self + ): + kernel = ForeachKernel() + for nodes, tiled_groups, numel, rnumel in partitions_with_metadata: + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + ( + reduction_hint_val, + mutations, + index_dtype, + ) = self.get_kernel_args(node_schedule, numel, rnumel) + + subkernel = kernel.create_sub_kernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ) + + self.codegen_node_schedule_with_kernel( + node_schedule, + subkernel, + ) + + with V.set_kernel_handler(subkernel): + for node in node_schedule: + if node not in (EnableReduction, DisableReduction): + node.mark_run() + V.graph.removed_buffers |= subkernel.removed_buffers + V.graph.inplaced_to_remove |= subkernel.inplaced_to_remove + + src_code = kernel.codegen_kernel() + kernel_name = self.define_kernel(src_code, [foreach_node]) + self.codegen_comment([foreach_node]) + kernel.call_kernel(V.graph.wrapper_code, kernel_name) + + self.scheduler.free_buffers() + + @staticmethod + @functools.lru_cache(32) + def candidate_tilings(node): + ranges, reduction_ranges = node.get_ranges() + if len(ranges) <= 1: + return () + + rw = node.pointwise_read_writes() + assert len(rw.range_vars) == len(ranges) + + # isinstance(dep, MemoryDep): this filters out StarDeps. StarDeps refer to reads + # that need to access the entire tensor; they don't contribute read indexing + # information (and practically, they don't have dep.index so they can't be used + # for stride_hints below + dep_sources = [rw.reads, rw.writes] + assert all( + isinstance(dep, (MemoryDep, StarDep)) + for dep in itertools.chain.from_iterable(dep_sources) + ) + deps = [ + dep + for dep in itertools.chain.from_iterable(dep_sources) + if dep.name not in V.graph.removed_buffers and isinstance(dep, MemoryDep) + ] + write_names = {dep.name for dep in rw.writes} + + tilings: List[CandidateTiling] = [] + + for dep in deps: + strides = V.graph.sizevars.stride_hints(dep.index, rw.range_vars) + assert len(strides) == len(ranges) + try: + split = strides.index(1) + 1 + if split == len(ranges): + continue + if all(s == 0 for s in strides[split:]): + # if this is a broadcasted tensor and all dimensions after split are broadcast, + # this is not a real split + continue + + except ValueError: + continue + tiled_groups = ( + V.graph.sizevars.simplify(sympy_product(ranges[:split])), + V.graph.sizevars.simplify(sympy_product(ranges[split:])), + ) + # score by number of elements + score = V.graph.sizevars.size_hint( + sympy_product( + size for size, stride in zip(ranges, strides) if stride != 0 + ) + ) + if dep.name in write_names: + # ngimel said contiguous writes is more important than reads + score *= 2 + if CandidateTiling.is_good_size(tiled_groups[0]): + score *= 2 + if CandidateTiling.is_good_size(tiled_groups[1]): + score *= 2 + + if ( + V.graph.sizevars.size_hint( + score - sympy_product(itertools.chain(ranges, reduction_ranges)) + ) + >= 0 + ): + tilings.append(CandidateTiling(tiled_groups, score, dep.name)) + return tilings + + @classmethod + def select_tiling(cls, node_schedule, numel, reduction_numel=sympy.Integer(1)): + """ + Heuristics to decide how to tile kernels. + Currently, we tile based on stride-1 dimensions. + + Returns: + `(tile1, tile2, reduction_numel)` s.t. `tile1 * tile2 == numel` + + """ + if reduction_numel != 1 or config.triton.max_tiles <= 1: + # TODO(jansel): should we tile reductions? + # do perf hint here if stride-1 dim is not being reduced + if perf_hint_log.level <= logging.WARNING: + for node in EnableReduction.filter(node_schedule): + if len(cls.candidate_tilings(node)) > 0: + perf_hint_log.info("reduction over non-contiguous dims") + break + return (numel, reduction_numel) + + seen_names = set() + candidate_tiles: Counter[Any] = collections.Counter() + for node in EnableReduction.filter(node_schedule): + for tiling in cls.candidate_tilings(node): + if tiling.name in seen_names: + continue + seen_names.add(tiling.name) + candidate_tiles[tiling.tiling] += tiling.score + + ranked_tilings = [tiling for tiling, score in candidate_tiles.most_common()] + + if config.triton.max_tiles >= 3: + # Consider adding a third dimension of tiling, but only + # when a1 is a multiple of b1; otherwise, you have a lot + # of stragglers which is annoying to generate code for. + # + # NB: More than three max tiles is not enabled by default. + + # Add one 3D tiling choice + for i in range(1, len(ranked_tilings)): + a0, a1 = ranked_tilings[0] + b0, b1 = ranked_tilings[i] + if V.graph.sizevars.size_hint(a1 - b1) == 0: + continue + if V.graph.sizevars.size_hint(a1 - b1) < 0: + # swap so a0 is bigger + a0, a1 = ranked_tilings[i] + b0, b1 = ranked_tilings[0] + assert V.graph.sizevars.size_hint(a1 - b1) > 0 + if V.graph.sizevars.statically_known_multiple_of(a1, b1): + tiling = (a0, FloorDiv(a1, b1), b1) + ranked_tilings = [tiling] + ranked_tilings + break # only 1 choice for now + + if len(ranked_tilings) > 1: + perf_hint_log.info("possibly bad tiling: %s", ranked_tilings) + + for tiled_groups in ranked_tilings: + new_groups = (*tiled_groups, reduction_numel) + if all( + TritonKernel.is_compatible(new_groups, node.get_ranges()) + for node in node_schedule + if isinstance(node, scheduler.SchedulerNode) + ): + return new_groups + + return (numel, reduction_numel) + + def flush(self): + pass + + def ready_to_flush(self) -> bool: + return False + + def benchmark_fused_nodes(self, nodes): + # empty last_usage. May cause more aggressive 'evict_last'. Should be fine. + for n in nodes: + n.last_usage = set() + + if not nodes[0].is_template(): + _, (numel, rnumel) = max(nodes, key=lambda x: int(x.is_reduction())).group + node_schedule = self.generate_node_schedule(nodes, numel, rnumel) + + tiled_groups = self.select_tiling(node_schedule, numel, rnumel) + reduction_hint_val, mutations, index_dtype = self.get_kernel_args( + node_schedule, numel, rnumel + ) + + kernel = TritonKernel( + *tiled_groups, + reduction_hint=reduction_hint_val, + mutations=mutations, + index_dtype=index_dtype, + ) + + self.codegen_node_schedule_with_kernel(node_schedule, kernel) + with config.patch("benchmark_kernel", True), V.set_kernel_handler(kernel): + src_code = kernel.codegen_kernel() + else: + template_node = nodes[0] + epilogue_nodes = nodes[1:] + + with config.patch("benchmark_kernel", True): + src_code = self.codegen_template( + template_node, epilogue_nodes, only_gen_src_code=True + ) + + src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") + mod = PyCodeCache.load(src_code) + + def cache_file_path(): + assert mod.__file__ is not None + return os.path.splitext(mod.__file__)[0] + ".kernel_perf" + + def load_cache(): + path = cache_file_path() + if os.path.exists(path): + with open(path) as fd: + return float(fd.read()) + return None + + def store_cache(): + path = cache_file_path() + with open(path, "w") as fd: + fd.write(str(ms)) + + log.debug( + "kernel src code for %s written to: %s", + {n.get_name() for n in nodes}, + mod.__file__, + ) + ms = load_cache() + if ms is not None: + return ms, mod.__file__ + + args = mod.get_args() + call = mod.call + wrapped_jit_function = mod.triton_ + + # call once to trigger the compilation + call(wrapped_jit_function.clone_args(*args)[0]) + + launchers = wrapped_jit_function.launchers + assert len(launchers) == 1 + if launchers[0].n_spills > 0: + # skip benchmarking the kernel if there are register spills + ms = float("inf") + else: + # We have to clone the inplace updated arguments to avoid earlier calls + # generating out of range indices for later calls. + ms = do_bench(lambda: call(wrapped_jit_function.clone_args(*args)[0])) + + log.debug( + "The fused kernel for %s took %.3f ms to run", + {n.get_name() for n in nodes}, + ms, + ) + store_cache() + return ms, mod.__file__ + + +@dataclasses.dataclass +class CandidateTiling: + tiling: Tuple[sympy.Expr, sympy.Expr] + score: int # higher is better + name: Optional[str] = None + + @staticmethod + def is_good_size(s): + """Somewhat arbitrary heuristic used to boost scores for some sizes""" + s = V.graph.sizevars.size_hint(s) + return s >= 32 and (s % 32 == 0) + + +class DisableReduction: + """ + Marker to invoke `kernel.disable_reduction()`. This closes a + reduction loop and allows for pointwise ops to occur on the output + of a reduction. + """ + + +class EnableReduction: + """ + Marker to end a DisableReduction block. + """ + + @staticmethod + def filter(node_schedule): + """ + Get the nodes from node_schedule skipping those in a + DisableReduction block. + """ + disabled = False + for node in node_schedule: + if node in (EnableReduction, DisableReduction): + # Don't tile stuff outside the main reduction loop + disabled = node is DisableReduction + elif disabled: + pass + else: + yield node + + +class CantSplit(Exception): + pass diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py new file mode 100644 index 0000000000000000000000000000000000000000..449af125d89e2165c6b9850fa37e3e5c663398ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py @@ -0,0 +1,250 @@ +import itertools +from collections import defaultdict +from dataclasses import dataclass +from typing import Dict, List, Tuple + +from sympy import Integer + +import torch + +from .. import metrics +from ..scheduler import SchedulerNode +from ..utils import ceildiv, Placeholder +from ..virtualized import V +from .common import IndentedBuffer, Kernel +from .triton import gen_common_triton_imports, TritonKernel +from .triton_utils import config_of, signature_to_meta + + +@dataclass +class PartitionState: + partitions: List[ + List[Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer]] + ] + cur_partition: List[ + Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer] + ] + cur_count: int + + def finalize(self): + if self.cur_partition: + self.partitions.append(self.cur_partition) + + +class ForeachKernel(Kernel): + MAX_NUM_ARGS = 250 # number where I would no longer get triton errors + + @staticmethod + def _update_partition(partition_state, node_rw_count, node_info): + if partition_state.cur_count + node_rw_count > ForeachKernel.MAX_NUM_ARGS: + partition_state.partitions.append(partition_state.cur_partition) + partition_state.cur_partition = [node_info] + partition_state.cur_count = node_rw_count + else: + partition_state.cur_count += node_rw_count + partition_state.cur_partition.append(node_info) + + @staticmethod + def horizontal_partition(subkernel_nodes, triton_scheduling): + """Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel) + for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args + (read/writes) and to have the same 2D or 1D blocking strategy.""" + assert len(subkernel_nodes) >= 1 + + partition_state_1d = PartitionState([], [], 0) + yelem_to_partition_state_2d: Dict[Integer, PartitionState] = defaultdict( + lambda: PartitionState([], [], 0) + ) + + for node in subkernel_nodes: + fused_nodes = node.get_nodes() + _, (numel, rnumel) = max( + fused_nodes, key=lambda x: int(x.is_reduction()) + ).group + tiled_groups = triton_scheduling.select_tiling(fused_nodes, numel, rnumel) + node_info = fused_nodes, tiled_groups, numel, rnumel + + read_writes = node.read_writes + read_write_count = len(read_writes.reads) + len(read_writes.writes) + + if tiled_groups[1] == 1: + ForeachKernel._update_partition( + partition_state_1d, read_write_count, node_info + ) + else: + y_elem = tiled_groups[0] + partition_state_2d = yelem_to_partition_state_2d[y_elem] + ForeachKernel._update_partition( + partition_state_2d, read_write_count, node_info + ) + + partition_state_1d.finalize() + all_partitions = partition_state_1d.partitions + for partition_state_2d in yelem_to_partition_state_2d.values(): + partition_state_2d.finalize() + all_partitions.extend(partition_state_2d.partitions) + + return all_partitions + + def __init__(self): + super().__init__() + self.blocking_2d = False + self.block_size_1d = 1024 # Try tuning this value + self.block_size_2d = 32 + self.num_warps = 8 + self.sub_kernels = [] + self.iter_vars_count = itertools.count() + self.x_block_count = 0 + self.y_block_count = 0 + + def get_block_size(self): + if self.blocking_2d: + return self.block_size_2d + else: + return self.block_size_1d + + @staticmethod + def codegen_pid_offsets(code, block_count, lower_bound, prefix): + if block_count == 0: + code.splice(f"{prefix}pid_offset = {prefix}pid") + else: + code.splice(f"{prefix}pid_offset = {prefix}pid - {lower_bound}") + + def codegen_pid_range(self, code, x_elems): + num_x_blocks = ceildiv(x_elems, self.get_block_size()) + upper_bound_x_pid = self.x_block_count + num_x_blocks + lower_bound_x_pid = self.x_block_count + + if self.x_block_count == 0: + cond = "if" + else: + cond = "elif" + + x_pid_bounds_check = ( + f"xpid >= {lower_bound_x_pid} and xpid < {upper_bound_x_pid}" + ) + code.splice(f"{cond} {x_pid_bounds_check}:") + + with code.indent(): + ForeachKernel.codegen_pid_offsets( + code, num_x_blocks, lower_bound_x_pid, "x" + ) + self.x_block_count += num_x_blocks + + def create_sub_kernel(self, *groups, index_dtype, mutations, reduction_hint): + sub_kernel = TritonKernel( + *groups, + index_dtype=index_dtype, + mutations=mutations, + pid_cache={ + "tl.program_id(0)": "xpid_offset", + "tl.program_id(1)": "ypid", + }, + reduction_hint=reduction_hint, + ) + if self.blocking_2d: + assert len(groups) == 3 + + self.blocking_2d |= groups[1] != 1 and len(groups) == 3 + metrics.generated_kernel_count -= 1 + sub_kernel.args = self.args + sub_kernel.iter_vars_count = self.iter_vars_count + sub_kernel.cse.iter_buffer_ids = self.cse.iter_buffer_ids + self.sub_kernels.append(sub_kernel) + return sub_kernel + + def jit_lines(self): + can_use_32bit = all(k.index_dtype == "tl.int32" for k in self.sub_kernels) + size_dtype = "tl.int32" if can_use_32bit else "tl.int64" + _, _, signature = self.args.python_argdefs() + triton_meta = { + "signature": signature_to_meta(signature, size_dtype=size_dtype), + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + "constants": {}, + } + triton_meta["configs"] = [config_of(signature)] + inductor_meta = { + "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), + "backend_hash": torch.utils._triton.triton_hash_with_backend(), + } + return f""" + @triton_heuristics.foreach( + num_warps={self.num_warps}, + triton_meta={triton_meta!r}, + inductor_meta={inductor_meta!r}, + ) + @triton.jit + """ + + def grid(self): + return ( + self.x_block_count, + ceildiv(int(self.sub_kernels[0].numels[0]), self.block_size_2d) + if self.blocking_2d + else 1, + 1, + ) + + def codegen_kernel(self, name=None): + code = IndentedBuffer() + + code.splice(gen_common_triton_imports()) + argdefs, _, _ = self.args.python_argdefs() + code.splice(self.jit_lines()) + code.writeline( + f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" + ) + + with code.indent(): + code.splice("xpid = tl.program_id(0)") + if self.blocking_2d: + code.splice("ypid = tl.program_id(1)") + code.splice(f"XBLOCK: tl.constexpr = {self.block_size_2d}") + code.splice(f"YBLOCK: tl.constexpr = {self.block_size_2d}") + else: + code.splice(f"XBLOCK: tl.constexpr = {self.block_size_1d}") + + for sub_kernel in self.sub_kernels: + assert len(sub_kernel.numels) <= 3 + # TODO mlazos: support dynamic shapes + numel_ind = 0 if not self.blocking_2d else 1 + self.codegen_pid_range(code, int(sub_kernel.numels[numel_ind])) + with code.indent(): + if self.blocking_2d: + code.splice(f"ynumel = {sub_kernel.numels[0]}") + code.splice(f"xnumel = {sub_kernel.numels[1]}") + else: + code.splice(f"xnumel = {sub_kernel.numels[0]}") + + sub_kernel.codegen_body() + code.splice(sub_kernel.body) + + code.splice("else:") + with code.indent(): + code.splice("pass") + + return code.getvalue() + + def call_kernel(self, code, name: str): + _, call_args, _ = self.args.python_argdefs() + # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar + for i in range(len(call_args)): + if V.graph.is_unspec_arg(call_args[i]): + call_args[i] = call_args[i] + ".item()" + if V.graph.cpp_wrapper: + V.graph.wrapper_code.generate_kernel_call( + name, + call_args, + device_index=V.graph.scheduler.current_device.index, + grid=self.grid(), + ) + else: + # TODO: refactor generate_kernel_call + call_args_str = ", ".join(call_args) + stream_name = code.write_get_raw_stream( + V.graph.scheduler.current_device.index + ) + code.writeline( + f"{name}.run({call_args_str}, grid=({self.grid()}), stream={stream_name})" + ) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_split_scan.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_split_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..7c52b1c0ad329ca0a633c7d66ea5e21ea80c360a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_split_scan.py @@ -0,0 +1,180 @@ +import functools + +from typing import Optional, Set + +from torch._inductor import config, ir + +from torch._inductor.codegen.triton import ( + IterationRangesRoot, + triton_compute_type, + TritonKernel, + TritonKernelOverrides, +) + +from torch._prims_common import prod + +from torch.utils._sympy.functions import CeilDiv + + +class TritonSplitScanKernel(TritonKernel): + """Generates a triton kernel that supports ops.scan calls while also splitting + the reduction dimension over multiple triton programs. + + For this kernel, loop numels will always take the form ``(xdim, rdim)`` + and the grid has the shape ``(CeilDiv(rdim, RBLOCK), xdim)``. Communication + between blocks occurs within a global memory workspace buffer, which + must be zero-filled before launching the kernel. + + Note that generation for ``ops.reduction`` is not supported. + + For details of the communication strategy, see + https://research.nvidia.com/publication/2016-03_single-pass-parallel-prefix-scan-decoupled-look-back + + """ + + def __init__( + self, + *groups, + index_dtype: str, + mutations: Optional[Set[str]] = None, + reduction_hint=ir.ReductionHint.DEFAULT, + min_elem_per_thread=0, + ): + super().__init__( + *groups, + index_dtype=index_dtype, + mutations=mutations, + pid_cache=None, + reduction_hint=reduction_hint, + min_elem_per_thread=min_elem_per_thread, + ) + self.no_x_dim = True + + def initialize_range_tree(self, pid_cache): + prefixes = "yxr" + assert len(self.numels) <= len( + prefixes + ), "z dimension not supported for split scan" + active_prefixes = prefixes[len(prefixes) - len(self.numels) :] + + grid_dims = "rxy" + for numel, prefix in zip(self.numels, active_prefixes): + is_reduction = prefix == "r" + tensor_dim = 0 if is_reduction else None + grid_dim = grid_dims.find(prefix) + self.range_trees.append( + IterationRangesRoot( + f"{prefix}index", + numel, + prefix, + grid_dim, + self, + pid_cache=pid_cache, + is_loop=False, + tensor_dim=tensor_dim, + grid_dim=grid_dim, + ) + ) + for tree in self.range_trees: + tree.codegen_header(self.body) + + def reduction(self, dtype, src_dtype, reduction_type, value): + raise NotImplementedError("NYI TritonSplitDimKernel reductions") + + def scan(self, dtype, combine_fn, value, init): + import triton.language as tl + + compute_type = triton_compute_type(dtype) + compute_type_triton = getattr(tl, compute_type[3:]) + + element_nbits = compute_type_triton.primitive_bitwidth + + scratch_type = "tl.uint32" if element_nbits <= 16 else "tl.uint64" + scratch_type_triton = getattr(tl, scratch_type[3:]) + scratch_elems_per_block = 3 if element_nbits == 64 else 1 + scratch_nbytes_per_block = scratch_elems_per_block * ( + scratch_type_triton.primitive_bitwidth // 8 + ) + + cse_load = functools.partial(self.cse.generate, self.loads) + cse_compute = functools.partial(self.cse.generate, self.compute) + + assert len(self.numels) == 2, "Unexpected tiling" + min_rblock = config.triton.min_split_scan_rblock + max_blocks = prod(self.numels[:-1]) * CeilDiv(self.numels[-1], min_rblock) + nbytes = scratch_nbytes_per_block * max_blocks + scratch_base, offset = self.args.workspace(nbytes=nbytes, zero_fill=True) + if offset != 0: + scratch_base = cse_load(f"{scratch_base} + {self.index_to_str(offset)}") + runtime_rblocks = cse_load(f"tl.num_programs({self.range_trees[-1].index})") + scratch_base = cse_load( + f"{scratch_base}.to(tl.pointer_type({scratch_type})) + xoffset * " + f"{scratch_elems_per_block} * {runtime_rblocks}" + ) + + masks = {f"{tree.prefix}mask" for tree in self.range_trees} + self.filter_masks(masks) + masks = sorted(masks) + if self._load_mask: + masks.append(self._load_mask) + + value = cse_compute(f"{value}.to({compute_type})") + value = cse_compute(f"tl.broadcast_to({value}, {self.dense_size_str()})") + init = cse_compute(f"tl.full([], {init}, {compute_type})") + if masks: + cond = " & ".join(masks) + masked_value = cse_compute(TritonKernelOverrides.where(cond, value, init)) + else: + masked_value = value + + combine_helper_fn = self._lift_helper(combine_fn, 2) + dim = self.triton_tensor_ndim() - 1 + assert dim == 0, "" + + block_sum = cse_compute( + f"tl.reduce({masked_value}, {dim}, {combine_helper_fn})" + ) + exclusive_prefix = self.cse.newvar() + if element_nbits == 64: + self.compute.splice( + f""" + {exclusive_prefix} = triton_helpers.exclusive_scan_decoupled_lookback_64( + {scratch_base}, + {block_sum}, + {self.range_trees[-1].get_pid()}, + {combine_helper_fn}, + {init}, + ) + """, + strip=True, + ) + + else: + assert element_nbits <= 32 + value_as_uint_dtype = f"tl.uint{element_nbits}" + + self.compute.splice( + f""" + {exclusive_prefix} = triton_helpers.exclusive_scan_decoupled_lookback( + {scratch_base}, + {block_sum}, + {self.range_trees[-1].get_pid()}, + {combine_helper_fn}, + {init}, + DTYPE_VALUE_AS_UINT={value_as_uint_dtype}, + DTYPE_PACK={scratch_type}, + ) + """, + strip=True, + ) + # Compute final cumsum + block_scan = cse_compute( + f"tl.associative_scan({masked_value}, {dim}, {combine_helper_fn})" + ) + return cse_compute(f"{combine_helper_fn}({exclusive_prefix}, {block_scan})") + + def _get_heuristic(self): + return "split_scan" + + def _get_grid_fn(self): + return "split_scan_grid" diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d1f58187ca4a4ee3c78b926a1262aee9c079c01b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py @@ -0,0 +1,130 @@ +from typing import Any, Dict, List, Optional + +import torch + +from .. import config +from ..utils import _type_of, instance_descriptor +from ..virtualized import V +from .common import KernelArgType, SizeArg, TensorArg, WorkspaceArg + + +def signature_of(arg: KernelArgType, *, size_dtype: str) -> str: + if isinstance(arg, TensorArg): + # TODO: Remove fp8 special handling when Triton supports PyTorch fp8 dtypes. + # Related PR: https://github.com/openai/triton/pull/2279/ + if arg.dtype == torch.float8_e4m3fn: + tye = "*fp8e4nv" + elif arg.dtype == torch.float8_e5m2: + tye = "*fp8e5" + elif arg.dtype == torch.float8_e4m3fnuz: + tye = "*fp8e4b8" + elif arg.dtype == torch.float8_e5m2fnuz: + tye = "*fp8e5b16" + else: + tye = _type_of(arg.dtype) + if V.graph.is_unspec_arg(arg.buffer): + # had unwrapped 0d tensor as scalar + new_tye = tye.lstrip("*") + if new_tye in ["fp16", "bf16"]: + return "fp32" + else: + return new_tye + else: + return tye + if isinstance(arg, SizeArg): + if arg.expr is None: + # From triton/runtime/jit.py + # `None` is nullptr. Implicitly convert to *i8. + return "*i8" + elif isinstance(arg.expr, float): + return "fp32" + if size_dtype == "tl.int32": + return "i32" + elif size_dtype == "tl.int64": + return "i64" + else: + raise NotImplementedError(f"unhandled size_dtype {size_dtype}") + if isinstance(arg, WorkspaceArg): + return "*i8" + raise NotImplementedError(f"unhandled {type(arg)}: {arg}") + + +def signature_to_meta( + signature: List[KernelArgType], + *, + size_dtype: str, + indices: Optional[List[int]] = None, +) -> Dict[int, str]: + if indices is None: + indices = list(range(len(signature))) + return { + i: signature_of(arg, size_dtype=size_dtype) + for i, arg in zip(indices, signature) + } + + +def config_of( + args: List[KernelArgType], + *, + indices: Optional[List[int]] = None, +) -> Any: + if indices is None: + indices = list(range(len(args))) + + def is_aligned(x: KernelArgType, alignment: int, include_tensor: bool) -> bool: + """ + Roughly follow triton code here: + https://github.com/openai/triton/blob/5282ed890d453e10b9ee30076ef89115dd197761/python/triton/runtime/jit.py#L208-L222 + """ + if isinstance(x, TensorArg): + if include_tensor: + offset_aligned = V.graph.sizevars.statically_known_multiple_of( + x.offset * x.dtype.itemsize, alignment # type: ignore[arg-type] + ) + return offset_aligned and not V.graph.scheduler.is_unaligned_buffer( + x.buffer + ) + else: + return False + if isinstance(x, SizeArg): + # TODO(voz): These are kinda redundant, if we can solve out statically_known_multiple_of with + # _maybe_evaluate_static... + if x.name.startswith("load_seed_offset"): + return False + if x.expr is None: + return False + if isinstance(x.expr, float): + return False + return V.graph.sizevars.statically_known_multiple_of(x.expr, alignment) # type: ignore[arg-type] + if isinstance(x, WorkspaceArg): + return V.graph.sizevars.statically_known_multiple_of(x.nbytes, alignment) # type: ignore[arg-type] + raise NotImplementedError(f"unhandled {type(x)}: {x}") + + if config.triton.divisible_by_16: + divisible_by_16 = tuple( + i + for i, arg in zip(indices, args) + if is_aligned(arg, alignment=16, include_tensor=True) + ) + else: + divisible_by_16 = () + divisible_by_8 = tuple( + i + for i, arg in zip(indices, args) + if is_aligned(arg, alignment=8, include_tensor=False) + ) + + equal_to_1 = tuple( + i + for i, arg in zip(indices, args) + if isinstance(arg, SizeArg) + and arg.expr is not None + and V.graph.sizevars.statically_known_equals(arg.expr, 1) # type: ignore[arg-type] + ) + # ids_of_folded_args is set from equal_to_1 + # and None args by the Triton compiler + ids_of_folded_args = tuple(equal_to_1) + + return instance_descriptor( + divisible_by_16, equal_to_1, ids_of_folded_args, divisible_by_8 + ) diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5274f28bbe0034747112fba7e732570249eb8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py @@ -0,0 +1,1543 @@ +import collections +import contextlib +import dataclasses +import functools +import inspect +import operator +import re +from itertools import count +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + TYPE_CHECKING, + Union, +) + +import sympy +from sympy import Expr + +import torch +import torch._ops +from torch._dynamo.utils import counters, dynamo_timed + +from torch._inductor.codegen.multi_kernel import MultiKernelState +from torch.fx.experimental.symbolic_shapes import SymTypes +from torch.fx.node import _get_qualified_name +from torch.utils._sympy.singleton_int import SingletonInt + +from .. import codecache, config, ir +from ..ir import ReinterpretView +from ..utils import ( + cache_on_self, + get_benchmark_name, + LineContext, + sympy_product, + sympy_str, +) +from ..virtualized import V +from .common import CodeGen, DeferredLine, IndentedBuffer, PythonPrinter +from .triton_utils import config_of, signature_to_meta + +if TYPE_CHECKING: + import triton + + from ..graph import GraphLowering + + +pexpr = PythonPrinter().doprint + + +ReuseKey = Tuple[torch.device, torch.dtype, str] + + +def buffer_reuse_key(node: ir.Buffer) -> ReuseKey: + return ( + node.get_device(), + node.get_dtype(), + # NB: this is symbolic so that we don't try to reuse a buffer + # for s0 for s1, just because they happen to share the same + # size hint + sympy_str(V.graph.sizevars.simplify(node.layout.storage_size())), + ) + + +def convert_arg_type(arg: torch.Argument) -> str: + from .cpp import CONTAINER_PYTHON_TO_CPP, PYTHON_TO_CPP + + # use x.real_type instead of x.type so that we get ScalarType instead of int + python_type = repr(arg.real_type) # type: ignore[attr-defined] + + if python_type == "Tensor": + # Conversions rules follow https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/native#func + if arg.alias_info is not None and arg.alias_info.is_write: + return f"at::{python_type}&" + else: + return f"at::{python_type} const&" + + if python_type in PYTHON_TO_CPP: + cpp_type = PYTHON_TO_CPP[python_type] + return cpp_type + + # Convert args of container types e.g. Optional[*] + for py_container, cpp_container in CONTAINER_PYTHON_TO_CPP.items(): + container_match = re.findall(py_container + r"\[([a-zA-Z_]+)]", python_type) + if len(container_match) == 1: + contained_type = container_match[0] + assert ( + contained_type in PYTHON_TO_CPP + ), f"unsupported {py_container} type in convert_arg_type: {contained_type}" + cpp_contained_type = PYTHON_TO_CPP[contained_type] + return f"{cpp_container}<{cpp_contained_type}>" + + raise AssertionError(f"unsupport python_type: {python_type}") + + +def convert_return_type(ret: torch.Argument) -> str: + # use x.real_type instead of x.type so that we get ScalarType instead of int + python_type = repr(ret.real_type) # type: ignore[attr-defined] + python_to_cpp = { + "Tensor": "at::Tensor", + "List[Tensor]": "std::vector", + } + + cpp_type = python_to_cpp.get(python_type, None) + assert cpp_type is not None, f"NYI return type: {python_type}" + # An output aliasing an input is returned by reference only when it's a + # Tensor, not when it's a Tensor[]. For example, aten.split.Tensor's output + # aliases the input tensor, but the op returns a vector by value. + if python_type == "Tensor" and ret.alias_info is not None: + cpp_type += "&" + return cpp_type + + +def get_cpp_op_schema(kernel: torch._ops.OpOverload) -> str: + args = kernel._schema.arguments + returns = kernel._schema.returns + + num_returns = len(returns) + assert num_returns > 0, "must have at least one return value" + + if num_returns == 1: + cpp_return_value = convert_return_type(returns[0]) + elif num_returns > 1: + tuple_returns = ", ".join([convert_return_type(r) for r in returns]) + cpp_return_value = f"std::tuple<{tuple_returns}>" + + cpp_arg_type = [f"{convert_arg_type(arg)} {arg.name}" for arg in args] + return f"{cpp_return_value}({', '.join(cpp_arg_type)})" # type: ignore[possibly-undefined] + + +# TODO: Move to a well known place +TritonMetaParams = Dict[str, int] +TritonGrid = Union[ + Tuple[Union[int, sympy.Expr], ...], Callable[[TritonMetaParams], Tuple[int, ...]] +] + + +def user_defined_kernel_grid_fn_code( + name: str, + configs: List["triton.Config"], + grids: List[TritonGrid], + wrapper: Optional["WrapperCodeGen"] = None, +) -> Tuple[str, str]: + output = IndentedBuffer() + + def _convert_to_sympy_expr(item: Union[int, sympy.Expr]) -> sympy.Expr: + return item if isinstance(item, sympy.Expr) else sympy.Integer(item) + + def determine_grid(grid: TritonGrid): + if wrapper is None or callable(grid): + # return as-is when used in eager mode or when grid is callable + return grid + # Grid contains ints/Expr, so utilize wrapper's expr printer for codegen + sympy_grid = tuple(_convert_to_sympy_expr(g) for g in grid) + return wrapper.codegen_shape_tuple(sympy_grid) + + fn_name = f"grid_wrapper_for_{name}" + output.writeline(f"def {fn_name}(meta):") + with output.indent(): + if len(grids) == 1: + grid = determine_grid(grids[0]) + output.writeline(f"return {grid}") + else: + assert len(grids) > 1 + assert len(grids) == len(configs) + seen = set() + for grid, c in zip(grids, configs): + guards = [f"meta['{name}'] == {val}" for name, val in c.kwargs.items()] + guards = " and ".join(guards) + grid = determine_grid(grid) + statement = f"if {guards}: return {grid}" + if statement in seen: + continue + seen.add(statement) + output.writeline(statement) + + return fn_name, output.getvalue() + + +@dataclasses.dataclass +class SymbolicCallArg: + inner: str + # the original symbolic expression represented by inner + inner_expr: sympy.Expr + + def __str__(self): + return str(self.inner) + + +# Default thread stack sizes vary by platform: +# - Linux: 8 MB +# - macOS: 512 KB +# - Windows: 1 MB +# Just pick something comfortably smaller than the smallest for now. +MAX_STACK_ALLOCATION_SIZE = 1024 * 100 + + +class MemoryPlanningState: + def __init__(self): + super().__init__() + self.reuse_pool: Dict[ + ReuseKey, List[FreeIfNotReusedLine] + ] = collections.defaultdict(list) + self.total_allocated_buffer_size: int = 0 + + def __contains__(self, key: ReuseKey) -> bool: + return bool(self.reuse_pool.get(key, None)) + + def pop(self, key: ReuseKey) -> "FreeIfNotReusedLine": + item = self.reuse_pool[key].pop() + assert not item.is_reused + return item + + def push(self, key: ReuseKey, item: "FreeIfNotReusedLine") -> None: + assert not item.is_reused + self.reuse_pool[key].append(item) + + +class WrapperLine: + pass + + +@dataclasses.dataclass +class EnterSubgraphLine(WrapperLine): + wrapper: "WrapperCodeGen" + graph: "GraphLowering" + + def codegen(self, code: IndentedBuffer) -> None: + self.wrapper.push_codegened_graph(self.graph) + code.do_indent() + + +@dataclasses.dataclass +class ExitSubgraphLine(WrapperLine): + wrapper: "WrapperCodeGen" + + def codegen(self, code: IndentedBuffer) -> None: + self.wrapper.pop_codegened_graph() + code.do_unindent() + + +@dataclasses.dataclass +class EnterDeviceContextManagerLine(WrapperLine): + device_idx: int + last_seen_device_guard_index: Optional[int] + + def codegen(self, code: IndentedBuffer) -> None: + if V.graph.cpp_wrapper: + code.writeline("\n") + if V.graph.aot_mode: + # In AOT mode, we have a stream provided as a param. A stream is + # associated with a device, so we never expect the device to change. + # CUDAStreamGuard sets the stream and the device. + if self.last_seen_device_guard_index is None: + if config.abi_compatible: + code.writeline( + "AOTICudaStreamGuard stream_guard(stream, this->device_idx_);" + ) + else: + code.writeline( + "at::cuda::CUDAStreamGuard stream_guard(" + + "at::cuda::getStreamFromExternal(stream, this->device_idx_));" + ) + else: + assert ( + self.last_seen_device_guard_index == self.device_idx + ), "AOTInductor only supports running on one CUDA device" + else: + if self.last_seen_device_guard_index is None: + code.writeline( + f"AOTICudaGuard device_guard({self.device_idx});" + if config.abi_compatible + else f"at::cuda::CUDAGuard device_guard({self.device_idx});" + ) + else: + code.writeline(f"device_guard.set_index({self.device_idx});") + else: + # Note _DeviceGuard has less overhead than device, but only accepts + # integers + code.writeline(f"with {V.graph.device_ops.device_guard(self.device_idx)}:") + code.do_indent() + code.writeline(V.graph.device_ops.set_device(self.device_idx)) + + +class ExitDeviceContextManagerLine(WrapperLine): + def codegen(self, code: IndentedBuffer) -> None: + if not V.graph.cpp_wrapper: + code.do_unindent() + + +@dataclasses.dataclass +class MemoryPlanningLine(WrapperLine): + wrapper: "WrapperCodeGen" + + def plan(self, state: MemoryPlanningState) -> "MemoryPlanningLine": + """First pass to find reuse""" + return self + + def codegen(self, code: IndentedBuffer) -> None: + """Second pass to output code""" + pass + + def __str__(self) -> str: + """ + Emits a string representation that fits on one line. + """ + args: List[str] = [] + for field in dataclasses.fields(self): + if field.name == "wrapper": + continue + val = getattr(self, field.name) + args.append( + f"{field.name}={val.get_name() if field.type is ir.Buffer else val}" + ) + return f"{type(self).__name__}({', '.join(args)})" + + +@dataclasses.dataclass +class AllocateLine(MemoryPlanningLine): + node: ir.Buffer + + def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine: + if self.node.get_name() in V.graph.removed_buffers: + return NullLine(self.wrapper) + + # try to reuse a recently freed buffer + key = buffer_reuse_key(self.node) + if config.allow_buffer_reuse and key in state: + free_line = state.pop(key) + free_line.is_reused = True + return ReuseLine(self.wrapper, free_line.node, self.node) + + if self.node.get_device().type == "cpu": + static_shape = self.wrapper.static_shape_for_buffer_or_none(self.node) + if static_shape is not None: + state.total_allocated_buffer_size += int( + functools.reduce(operator.mul, static_shape, 1) + ) + + return self + + def codegen(self, code: IndentedBuffer) -> None: + assert self.node.get_name() not in V.graph.removed_buffers + line = self.wrapper.make_buffer_allocation(self.node) + code.writeline(line) + + +@dataclasses.dataclass +class FreeIfNotReusedLine(MemoryPlanningLine): + node: ir.Buffer + is_reused: bool = False + + def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine: + if isinstance(self.node.layout, (ir.AliasedLayout, ir.MultiOutputLayout)): + return self + assert not self.is_reused + if self.node.get_name() in V.graph.removed_buffers: + return NullLine(self.wrapper) + if config.allow_buffer_reuse: + state.push(buffer_reuse_key(self.node), self) + return self + + def codegen(self, code: IndentedBuffer) -> None: + assert self.node.get_name() not in V.graph.removed_buffers + if not self.is_reused: + code.writeline(self.wrapper.make_buffer_free(self.node)) + + +@dataclasses.dataclass +class ReuseLine(MemoryPlanningLine): + node: ir.Buffer + reused_as: ir.Buffer + delete_old: bool = True + + def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine: + if self.node.get_name() in V.graph.removed_buffers: + assert self.reused_as.get_name() in V.graph.removed_buffers + return NullLine(self.wrapper) + assert self.reused_as.get_name() not in V.graph.removed_buffers + return self + + def codegen(self, code: IndentedBuffer) -> None: + assert self.node.get_name() not in V.graph.removed_buffers + assert self.reused_as.get_name() not in V.graph.removed_buffers + code.writeline( + self.wrapper.make_buffer_reuse(self.node, self.reused_as, self.delete_old) + ) + + +class NullLine(MemoryPlanningLine): + pass + + +BufferName = str + + +class WrapperCodeGen(CodeGen): + """ + Generate outer wrapper in Python that calls the kernels. + """ + + def __init__(self): + super().__init__() + self._names_iter: Iterator[int] = count() + self.header = IndentedBuffer() + self.prefix = IndentedBuffer() + self.suffix = IndentedBuffer() + self.wrapper_call = IndentedBuffer() + # If the generated source code is exactly the same, reuse the + # pre-existing kernel for it + self.src_to_kernel: Dict[str, str] = {} + self.kernel_numel_expr: Set[Tuple[str, "GraphLowering"]] = set() + self.lines: List[Union[MemoryPlanningLine, LineContext]] = [] + self.declare = "" + self.declare_maybe_reference = "" + self.ending = "" + self.open_bracket = "[" + self.closed_bracket = "]" + self.comment = "#" + self.namespace = "" + self.none_str = "None" + self.size = "size()" + self.stride = "stride()" + self.last_seen_device_guard_index: Optional[int] = None + self.supports_intermediate_hooks = True + self.expr_printer = pexpr + self.user_defined_kernel_cache: Dict[Tuple[Any, ...], Tuple[str, Any]] = {} + self.unbacked_symbol_decls: Set[str] = set() # str of sympy.Symbol + self.allow_stack_allocation: Optional[bool] = None + self.stack_allocated_buffers: Dict[BufferName, ir.Buffer] = {} + self.computed_sizes: Set[sympy.Symbol] = set() + + # this is used for tracking which GraphLowering instance---parent graph + # or (nested) subgraph---is currently codegened; the primary use case is + # including the graph instance into a cache key to avoid cross-graph + # caching during lowering of nested subgraphs + self.codegened_graph_stack = [V.graph] + + self.write_header() + self.write_prefix() + + if not V.graph.aot_mode: + for name, hashed in V.graph.constant_reprs.items(): + # include a hash so our code cache puts different constants into different files + self.write_constant(name, hashed) + + self.allocated: Set[BufferName] = set() + self.freed: Set[BufferName] = set() + + # maps from reusing buffer to reused buffer + self.reuses: Dict[BufferName, BufferName] = dict() + + self.write_get_raw_stream = functools.lru_cache(None)( # type: ignore[assignment] + self.write_get_raw_stream + ) + + @functools.lru_cache(None) + def add_import_once(line: str) -> None: + self.header.writeline(line) + + self.add_import_once = add_import_once + self._metas: Dict[str, str] = {} + self.multi_kernel_state = MultiKernelState() + + def write_constant(self, name: str, hashed: str) -> None: + self.header.writeline(f"{name} = None # {hashed}") + + def write_header(self) -> None: + self.header.splice( + f""" + from ctypes import c_void_p, c_long + import torch + import math + import random + import os + import tempfile + from math import inf, nan + from torch._inductor.hooks import run_intermediate_hooks + from torch._inductor.utils import maybe_profile + from torch._inductor.codegen.memory_planning import _align as align + + from torch import device, empty_strided + from {codecache.__name__} import AsyncCompile + from torch._inductor.select_algorithm import extern_kernels + from torch._inductor.codegen.multi_kernel import MultiKernelCall + + aten = torch.ops.aten + inductor_ops = torch.ops.inductor + assert_size_stride = torch._C._dynamo.guards.assert_size_stride + empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu + empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda + alloc_from_pool = torch.ops.inductor._alloc_from_pool + reinterpret_tensor = torch.ops.inductor._reinterpret_tensor + async_compile = AsyncCompile() + + """ + ) + + @cache_on_self + def write_triton_header_once(self) -> None: + self.header.splice( + """ + import triton + import triton.language as tl + from torch._inductor.triton_heuristics import grid, split_scan_grid, start_graph, end_graph + {} + """.format( + V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") + ) + ) + + def add_meta_once(self, meta: TritonMetaParams) -> str: + meta = repr(meta) + if meta not in self._metas: + var = f"meta{len(self._metas)}" + self._metas[meta] = var + self.header.writeline(f"{var} = {meta}") + return self._metas[meta] + + @cache_on_self + def get_output_refs(self) -> List[str]: + return [x.codegen_reference(self.wrapper_call) for x in V.graph.graph_outputs] + + def mark_output_type(self) -> None: + return + + def codegen_input_size_asserts(self) -> None: + for name, buf in V.graph.graph_inputs.items(): + if isinstance(buf, sympy.Expr): + continue + + # comparing strides for 0 size tensor is tricky. Ignore them for now. + if sympy_product(buf.get_size()) == 0: + continue + size = self.codegen_shape_tuple(buf.get_size()) + stride = self.codegen_shape_tuple(buf.get_stride()) + self.prefix.writeline(f"assert_size_stride({name}, {size}, {stride})") + + def codegen_input_nan_asserts(self) -> None: + self.prefix.writeline("# make sure graph inputs are not nan/inf") + for name, buf in V.graph.graph_inputs.items(): + if isinstance(buf, sympy.Expr): + continue + + line = f"assert not {name}.isnan().any().item()" + self.prefix.writeline(line) + line = f"assert not {name}.isinf().any().item()" + self.prefix.writeline(line) + + def write_prefix(self) -> None: + self.prefix.splice( + """ + + async_compile.wait(globals()) + del async_compile + + def call(args): + """ + ) + with self.prefix.indent(): + if config.triton.debug_sync_graph: + self.prefix.writeline(V.graph.device_ops.synchronize()) + if V.graph.graph_inputs: + lhs = ", ".join(V.graph.graph_input_names) + if len(V.graph.graph_input_names) == 1: + lhs += "," + self.prefix.writeline(f"{lhs} = args") + self.prefix.writeline("args.clear()") + + self.codegen_inputs(self.prefix, V.graph.graph_inputs) + if config.size_asserts: + self.codegen_input_size_asserts() + if config.nan_asserts: + self.codegen_input_nan_asserts() + + # this function (and below) takes a graph as input so + # that stream caching happens per graph instance. this + # is important for nested subgraph codegening. + def write_get_raw_stream(self, device_idx: int, graph=None) -> str: + self.write_triton_header_once() + name = f"stream{device_idx}" + self.writeline(f"{name} = get_raw_stream({device_idx})") + return name + + def get_codegened_graph(self): + return self.codegened_graph_stack[-1] + + def push_codegened_graph(self, graph): + self.codegened_graph_stack.append(graph) + + def pop_codegened_graph(self): + return self.codegened_graph_stack.pop() + + def next_kernel_suffix(self) -> str: + return f"{next(self._names_iter)}" + + def codegen_device_guard_enter(self, device_idx: int) -> None: + self.writeline( + EnterDeviceContextManagerLine(device_idx, self.last_seen_device_guard_index) + ) + self.last_seen_device_guard_index = device_idx + + def codegen_device_guard_exit(self) -> None: + self.writeline(ExitDeviceContextManagerLine()) + + def generate_return(self, output_refs: List[str]) -> None: + if output_refs: + self.wrapper_call.writeline("return (" + ", ".join(output_refs) + ", )") + else: + self.wrapper_call.writeline("return ()") + + def generate_before_suffix(self, result: IndentedBuffer) -> None: + return + + def generate_end(self, result: IndentedBuffer) -> None: + return + + def generate_fallback_kernel(self, fallback_kernel, args): + self.generate_extern_kernel_alloc(fallback_kernel, args) + + def generate_extern_kernel_alloc(self, extern_kernel, args): + output_name = extern_kernel.get_name() + origin_node = extern_kernel.get_origin_node() + kernel_name = extern_kernel.get_kernel_name() + ending = self.ending + if config.memory_planning and "view_as_complex" in kernel_name: + # view operation fallbacks cause issues since inductor + # doesn't know the memory is still needed and might reuse it. + ending = f".clone(){ending}" + self.writeline( + f"{self.declare}{output_name} = {kernel_name}({', '.join(args)}){ending}" + ) + if ( + self.supports_intermediate_hooks + and config.generate_intermediate_hooks + and origin_node is not None + ): + counters["inductor"]["intermediate_hooks"] += 1 + self.writeline( + f"run_intermediate_hooks({origin_node.name!r}, {output_name})" + ) + + def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel): + if output_view: + args.append(f"out={output_view.codegen_reference()}") + else: + args.append(f"out={codegen_reference}") + self.writeline(f"{kernel}({', '.join(args)})") + + def generate_user_defined_triton_kernel( + self, kernel_name, grid, configs, args, triton_meta + ): + grid, code = user_defined_kernel_grid_fn_code( + kernel_name, configs, grid, wrapper=self + ) + # Must happen after free symbols are already codegened + # Emit the grid wrapper function right before the call + for line in code.split("\n"): + self.writeline(line) + + stream_name = self.write_get_raw_stream( + V.graph.scheduler.current_device.index, V.graph + ) + self.writeline( + f"{kernel_name}.run({', '.join(args)}, grid={grid}, stream={stream_name})" + ) + + def generate_scatter_fallback( + self, output, inputs, kernel, python_kernel_name, src_is_tensor, reduce, kwargs + ): + line = f"{kernel}({','.join(map(str, inputs))}" + if kernel == "aten.scatter_": + if reduce: + line += f", reduce={repr(reduce)}" + else: + line += ", ".join([""] + kwargs) + line += f"){self.ending}" + self.writeline(line) + + def generate_index_put_fallback(self, kernel, x, indices, values, accumulate): + indices_str = f"{self.open_bracket}{', '.join(indices)}{self.closed_bracket}" + args = [x, indices_str, values, accumulate] + self.writeline(self.wrap_kernel_call(kernel, args)) + + def generate_extern_kernel_alloc_and_find_schema_if_needed( + self, + name, + kernel, + codegen_args, + cpp_op_schema, + cpp_kernel_key, + cpp_kernel_overload_name="", + op_overload=None, + raw_args=None, + outputs=None, + ): + self.writeline(f"{name} = {kernel}({', '.join(codegen_args)})") + + def generate_inf_and_nan_checker(self, node): + # TODO: Add check for python too. + pass + + @dynamo_timed + def generate(self, is_inference): + if config.profile_bandwidth: + self.write_triton_header_once() + result = IndentedBuffer() + result.splice(self.header) + + with contextlib.ExitStack() as stack: + stack.enter_context(self.wrapper_call.indent()) + if config.profiler_mark_wrapper_call: + self.generate_profiler_mark_wrapper_call(stack) + if config.profile_bandwidth: + self.generate_start_graph() + + # We disable planning during training because it presently increases peak memory consumption. + if is_inference and config.memory_planning: + self.memory_plan() + # TODO: integrate memory planning & stack allocation? + self.allow_stack_allocation = False + else: + self.memory_plan_reuse() + + if config.triton.store_cubin: + self.generate_reset_kernel_saved_flags() + + for line in self.lines: + if isinstance(line, WrapperLine): + line.codegen(self.wrapper_call) + else: + self.wrapper_call.writeline(line) + + output_refs = self.get_output_refs() + self.mark_output_type() + if config.triton.debug_sync_graph: + self.wrapper_call.writeline(V.graph.device_ops.synchronize()) + + if config.profile_bandwidth: + self.generate_end_graph() + + if config.triton.store_cubin: + self.generate_save_uncompiled_kernels() + + self.generate_return(output_refs) + + self.finalize_prefix() + result.splice(self.prefix) + + with result.indent(): + result.splice(self.wrapper_call) + + self.generate_before_suffix(result) + result.splice(self.suffix) + + self.generate_end(result) + + self.add_benchmark_harness(result) + + return result.getvaluewithlinemap() + + def memory_plan(self): + from .memory_planning import MemoryPlanner + + self.lines = MemoryPlanner(self).plan(self.lines) + + def memory_plan_reuse(self): + out_names = V.graph.get_output_names() + + while ( + self.lines + and isinstance(self.lines[-1], MemoryPlanningLine) + # TODO: this seems legit, NullLine has no node + and self.lines[-1].node.name not in out_names # type: ignore[attr-defined] + ): + # these lines will be pointless + self.lines.pop() + + # codegen allocations in two passes + planning_states = [MemoryPlanningState()] + past_planning_states = [] + for i in range(len(self.lines)): + line = self.lines[i] + if isinstance(line, MemoryPlanningLine): + self.lines[i] = line.plan(planning_states[-1]) + elif isinstance(line, EnterSubgraphLine): + planning_states.append(MemoryPlanningState()) + elif isinstance(line, ExitSubgraphLine): + past_planning_states.append(planning_states.pop()) + past_planning_states.append(planning_states.pop()) + assert len(planning_states) == 0 + + # conservatively use the sum of all allocated buffer sizes + # in potentially nested scopes as the total allocated size + total_allocated_buffer_size = sum( + s.total_allocated_buffer_size for s in past_planning_states + ) + + self.allow_stack_allocation = ( + self.allow_stack_allocation is not False + and config.allow_stack_allocation + and total_allocated_buffer_size <= MAX_STACK_ALLOCATION_SIZE + ) + + def codegen_input_size_var_decl(self, code: IndentedBuffer, name): + code.writeline(f"{self.declare}{name}_size = {name}.{self.size}{self.ending}") + + def codegen_input_stride_var_decl(self, code: IndentedBuffer, name): + code.writeline( + f"{self.declare}{name}_stride = {name}.{self.stride}{self.ending}" + ) + + def codegen_inputs( + self, code: IndentedBuffer, graph_inputs: Dict[str, ir.TensorBox] + ): + """Assign all symbolic shapes to locals""" + + @functools.lru_cache(None) + def sizeof(name): + self.codegen_input_size_var_decl(code, name) + return f"{name}_size" + + @functools.lru_cache(None) + def strideof(name): + self.codegen_input_stride_var_decl(code, name) + return f"{name}_stride" + + # Assign all symbolic shapes needed to local variables + needed = V.graph.sizevars.free_symbols() + + def is_expr(x): + return isinstance(x[1], sympy.Expr) + + graph_inputs_expr = list(filter(is_expr, graph_inputs.items())) + graph_inputs_tensors = list( + filter(lambda x: not is_expr(x), graph_inputs.items()) + ) + + for name, shape in graph_inputs_expr: + shape = V.graph.sizevars.simplify(shape) # type: ignore[arg-type] + if shape in needed: + needed.remove(shape) # type: ignore[arg-type] + code.writeline(f"{self.declare}{shape} = {name}{self.ending}") + + for name, value in graph_inputs_tensors: + shapes = value.get_size() + for dim, shape in enumerate(shapes): + shape = V.graph.sizevars.simplify(shape) # type: ignore[arg-type] + if shape in needed: + needed.remove(shape) # type: ignore[arg-type] + code.writeline( + f"{self.declare}{shape} = {sizeof(name)}[{dim}]{self.ending}" + ) + + for name, value in graph_inputs_tensors: + shapes = value.get_stride() + for dim, shape in enumerate(shapes): + shape = V.graph.sizevars.simplify(shape) # type: ignore[arg-type] + if shape in needed: + needed.remove(shape) # type: ignore[arg-type] + code.writeline( + f"{self.declare}{shape} = {strideof(name)}[{dim}]{self.ending}" + ) + + def ensure_size_computed(self, sym: sympy.Symbol): + if isinstance(sym, sympy.Symbol) and sym.name.startswith("ps"): + if sym in self.computed_sizes: + return + self.computed_sizes.add(sym) + expr = V.graph.sizevars.inv_precomputed_replacements[sym] + self.writeline( + f"{self.declare}{sym} = {self.expr_printer(expr)}{self.ending}" + ) + + def finalize_prefix(self): + pass + + def codegen_python_sizevar(self, x: Expr) -> str: + return pexpr(V.graph.sizevars.simplify(x)) + + def codegen_sizevar(self, x: Expr) -> str: + return self.codegen_python_sizevar(x) + + def codegen_tuple_access(self, basename: str, name: str, index: str) -> str: + return f"{basename}[{index}]" + + def codegen_python_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + parts = list(map(self.codegen_python_sizevar, shape)) + if len(parts) == 0: + return "()" + if len(parts) == 1: + return f"({parts[0]}, )" + return f"({', '.join(parts)})" + + def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str: + return self.codegen_python_shape_tuple(shape) + + def codegen_alloc_from_pool(self, name, offset, dtype, shape, stride) -> str: + return "alloc_from_pool({})".format( + ", ".join( + [ + name, + pexpr(offset), # bytes not numel + str(dtype), + self.codegen_shape_tuple(shape), + self.codegen_shape_tuple(stride), + ] + ) + ) + + def codegen_reinterpret_view(self, data, size, stride, offset, writer) -> str: + size = self.codegen_shape_tuple(size) + stride = self.codegen_shape_tuple(stride) + offset = self.codegen_sizevar(offset) + return f"reinterpret_tensor({data.get_name()}, {size}, {stride}, {offset})" + + def codegen_device_copy(self, src, dst): + self.writeline(f"{dst}.copy_({src})") + + def codegen_multi_output(self, name, value): + self.writeline(f"{self.declare}{name} = {value}{self.ending}") + + def codegen_dynamic_scalar(self, node): + (data,) = (t.codegen_reference() for t in node.inputs) + if node.is_bool: + self.writeline(f"{node.sym} = 1 if {data}.item() else 0") + else: + self.writeline(f"{node.sym} = {data}.item()") + # No one should ever use this buffer, but for uniformity + # define the variable and assign it None + self.writeline(f"{node.get_name()} = None") + + def benchmark_compiled_module(self, output): + def add_fake_input(name, shape, stride, device, dtype): + output.writeline( + f"{name} = rand_strided(" + f"{self.codegen_python_shape_tuple(shape)}, " + f"{self.codegen_python_shape_tuple(stride)}, " + f"device='{device}', dtype={dtype})" + ) + + def add_expr_input(name, val): + output.writeline(f"{name} = {val}") + + output.writelines( + ["", "", "def benchmark_compiled_module(times=10, repeat=10):"] + ) + with output.indent(): + output.splice( + """ + from torch._dynamo.testing import rand_strided + from torch._inductor.utils import print_performance + """, + strip=True, + ) + + for name, value in V.graph.constants.items(): + # all the constants are global variables, that's why we need + # these 'global var_name' lines + output.writeline(f"global {name}") + add_fake_input( + name, value.size(), value.stride(), value.device, value.dtype + ) + + for name, value in V.graph.graph_inputs.items(): + if isinstance(value, sympy.Symbol) and isinstance( + V.graph.sizevars.var_to_val.get(value, None), SingletonInt + ): + # Inductor should only work with dense -> dense graph, and + # SingletonInts belong to metadata that should only live on + # the subclass. + continue + if isinstance(value, sympy.Expr): # Don't need to add symbolic + add_expr_input(name, V.graph.sizevars.size_hint(value)) + else: + shape = [V.graph.sizevars.size_hint(x) for x in value.get_size()] + stride = [V.graph.sizevars.size_hint(x) for x in value.get_stride()] + add_fake_input( + name, shape, stride, value.get_device(), value.get_dtype() + ) + + call_str = f"call([{', '.join(V.graph.graph_inputs.keys())}])" + output.writeline(f"fn = lambda: {call_str}") + output.writeline("return print_performance(fn, times=times, repeat=repeat)") + + def add_benchmark_harness(self, output): + """ + Append a benchmark harness to generated code for debugging + """ + if not config.benchmark_harness: + return + + self.benchmark_compiled_module(output) + + output.writelines(["", "", 'if __name__ == "__main__":']) + with output.indent(): + output.writelines( + [ + "from torch._inductor.wrapper_benchmark import compiled_module_main", + f"compiled_module_main('{get_benchmark_name()}', benchmark_compiled_module)", + ] + ) + + def define_kernel( + self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True + ): + metadata_comment = f"{metadata}\n" if metadata else "" + self.header.splice(f"\n\n{metadata_comment}{name} = {kernel}") + + def define_user_defined_triton_kernel(self, kernel, configs, kwargs): + original_name = kernel.__name__ + + from .common import KernelArgType, SizeArg, TensorArg + + signature: List[KernelArgType] = [] + constants: Dict[int, Any] = {} + non_constant_indices = [] + equal_to_1_arg_idx: List[int] = [] + for idx, key in enumerate(kernel.arg_names): + if key not in kwargs: + continue + arg = kwargs[key] + if idx in kernel.constexprs: + constants[idx] = arg + else: + non_constant_indices.append(idx) + if isinstance(arg, ir.Buffer): + signature.append( + TensorArg( + name=key, + buffer=arg.get_name(), + dtype=arg.get_dtype(), + ) + ) + elif isinstance(arg, ir.ReinterpretView): + # for ReinterpretView we use the underlying + # buffer name and note the (possibly non-zero) + # offset relative to the underlying buffer + signature.append( + TensorArg( + name=key, + buffer=arg.data.get_name(), + dtype=arg.get_dtype(), + offset=arg.layout.offset, + ) + ) + else: + signature.append(SizeArg(key, arg)) + if arg is not None and V.graph.sizevars.statically_known_equals(arg, 1): # type: ignore[arg-type] + equal_to_1_arg_idx.append(idx) + index_dtype = "tl.int32" + triton_meta = { + "signature": signature_to_meta( + signature, + size_dtype=index_dtype, + indices=non_constant_indices, + ), + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, + # Triton compiler includes equal_to_1 args into constants even + # when they are not constexpr. otherwise there may be a segfault + # during launching the Inductor-compiled Triton kernel. + # TODO(aakhundov): add None args to constants, too. currently, this + # causes CUDA errors in test_aot_inductor.test_triton_kernel_with_none_input. + # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 + # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 + "constants": { + **constants, + **{idx: 1 for idx in equal_to_1_arg_idx}, + }, + "configs": [ + config_of( + signature, + indices=non_constant_indices, + ) + ], + } + + # Distinguish between different functions using function id + cache_key: List[Any] = [id(kernel.fn)] + if len(configs) > 0: + for arg in kwargs.values(): + # We need to key on non tensor arg only in autotune mode + if not isinstance(arg, (ir.Buffer, ir.ReinterpretView)): + cache_key.append(arg) + cache_key.append(str(triton_meta)) + cache_key = tuple(cache_key) + + if cache_key in self.user_defined_kernel_cache: + return self.user_defined_kernel_cache[cache_key] + + name = f"{original_name}_{len(self.user_defined_kernel_cache)}" + # Add to the cache for the next use + self.user_defined_kernel_cache[cache_key] = (name, triton_meta) + + compile_wrapper = IndentedBuffer() + compile_wrapper.writeline(f"async_compile.triton({original_name!r}, '''") + + from .triton import gen_common_triton_imports + + compile_wrapper.splice(gen_common_triton_imports()) + + inductor_meta = { + "kernel_name": name, + "backend_hash": torch.utils._triton.triton_hash_with_backend(), + } + + configs = [ + { + "kwargs": config.kwargs, + "num_warps": config.num_warps, + "num_stages": config.num_stages, + } + for config in configs + ] + + compile_wrapper.splice( + f""" + @triton_heuristics.user_autotune( + configs={configs!r}, + inductor_meta={inductor_meta!r}, + triton_meta={triton_meta!r}, + filename=__file__, + custom_kernel=True, + ) + @triton.jit + """ + ) + compile_wrapper.splice(kernel.src, strip=True) + + # Also include any possible kernel being called indirectly + from triton import JITFunction + + symbols_included = {original_name} + + def traverse(cur_kernel): + for symbol_name in cur_kernel.fn.__code__.co_names: + if symbol_name in symbols_included: + continue + if symbol_name in cur_kernel.fn.__globals__: + symbol = cur_kernel.fn.__globals__[symbol_name] + if isinstance(symbol, JITFunction): + compile_wrapper.newline() + compile_wrapper.writeline("@triton.jit") + compile_wrapper.splice(symbol.src, strip=True) + symbols_included.add(symbol_name) + traverse(symbol) + elif isinstance(symbol, (int, str, bool)): + compile_wrapper.newline() + compile_wrapper.writeline(f"{symbol_name} = {symbol!r}") + symbols_included.add(symbol_name) + + traverse(kernel) + + compile_wrapper.writeline( + f"''', device_str='{V.graph.scheduler.current_device.type}')" + ) + _, lineno = inspect.getsourcelines(kernel.fn) + srcfile = inspect.getsourcefile(kernel.fn) + metadata = f"# Original path: {srcfile}:{lineno}" + self.define_kernel( + name, + compile_wrapper.getvalue(), + metadata, + ) + return name, triton_meta + + def generate_numel_expr(self, kernel_name: str, tree): + expr = f"{kernel_name}_{tree.prefix}numel" + if (expr, V.graph) not in self.kernel_numel_expr: + # declare expr once in each graph (scope) + self.kernel_numel_expr.add((expr, V.graph)) + self.writeline( + f"{self.declare}{expr} = {self.expr_printer(tree.numel)}{self.ending}" + ) + else: + self.writeline(f"{expr} = {self.expr_printer(tree.numel)}{self.ending}") + # We can get symbolic expressions here, like s0*64 + # It is fine to have them here, but we need to handle them correctly as their own type + # This is tricky to do, so we wrap in a custom type, distinct from scalars, but also from sympy* + # scalars as well. + # This is handled in `generate_args_decl` which has a correct comment of: TODO: only works for + # constant now, need type info. I agree, this needs type info, and while this is not true type info + # it suffices as a type hint for the purposes of producing the correct code for this type. + return SymbolicCallArg(expr, tree.numel) + + def generate_workspace_allocation(self, nbytes, device, zero_fill): + line = self.make_allocation( + "workspace", device, torch.uint8, shape=(nbytes,), stride=(1,) + ) + self.writeline(line) + if zero_fill: + self.writeline(f"workspace.zero_(){self.ending}") + + def wrap_kernel_call(self, name, call_args): + return f"{name}({', '.join(call_args)}){self.ending}" + + def generate_profiler_mark_wrapper_call(self, stack): + self.wrapper_call.writeline("from torch.profiler import record_function") + self.wrapper_call.writeline( + f"with record_function('graph_{V.graph.graph_id}_inductor_wrapper_call'):" + ) + stack.enter_context(self.wrapper_call.indent()) + + def generate_start_graph(self): + self.wrapper_call.writeline("start_graph()") + + def generate_end_graph(self): + self.wrapper_call.writeline("end_graph()") + + def generate_reset_kernel_saved_flags(self): + self.wrapper_call.splice( + """ + for kernel in globals().values(): + if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): + kernel.cuda_kernel_saved = False + """ + ) + + def generate_save_uncompiled_kernels(self): + """ + Precompile and save the CUBINs of the Triton kernels that haven't + been precompiled and saved as a side effect of running the generated + JIT model (Python wrapper). This can happen when the model contains + control flow: only one pass through the control flow operators covers + the kernels that are saved, the remaining kernels are not launched, + hence not saved. The main purpose of this codegen is to compile and + save the Triton kernels outside the active control flow path for + subsequent AOTInductor code generation and compilation. + """ + self.wrapper_call.splice( + """ + for kernel in globals().values(): + if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): + if not kernel.cuda_kernel_saved: + if len(kernel.launchers) == 0: + kernel.precompile() + kernel.save_cuda_kernel( + grid=(0, 0, 0), # use dummy grid + stream="stream", # use dummy stream + launcher=kernel.launchers[0], + ) + """ + ) + + def generate_default_grid(self, name: str, grid_args: List[Any]): + return grid_args + + def generate_kernel_call( + self, + name, + call_args, + grid=None, + device_index=None, + cuda=True, + triton=True, + arg_types=None, + grid_fn: str = "grid", + triton_meta=None, + ): + """ + Generates kernel call code. + + cuda: Defines whether the backend is GPU. Otherwise the backend is CPU. + + triton: Defines whether the GPU backend uses Triton for codegen. + Otherwise it uses the CUDA language for codegen. + Only valid when cuda == True. + """ + if cuda: + call_args_str = ", ".join(pexpr(item) for item in call_args) + stream_name = self.write_get_raw_stream( + V.graph.scheduler.current_device.index, V.graph + ) + if triton: + grid_str = ", ".join(pexpr(item) for item in grid) + grid_str = f"{grid_fn}({grid_str})" + self.writeline( + f"{name}.run({call_args_str}, grid={grid_str}, stream={stream_name})" + ) + else: + stream_ptr = f"c_void_p({stream_name})" + self.writeline(f"{name}.{name}({call_args_str}, {stream_ptr})") + else: + self.writeline(self.wrap_kernel_call(name, call_args)) + + def writeline(self, line): + self.lines.append(line) + + def enter_context(self, ctx): + self.lines.append(LineContext(ctx)) + + def val_to_cpp_arg_str(self, type_, val, is_legacy_abi) -> str: + raise NotImplementedError() + + def val_to_arg_str(self, s): + if isinstance(s, SymTypes): + return pexpr(sympy.expand(repr(s))) + elif isinstance(s, sympy.Expr): + return pexpr(s) + elif isinstance(s, (tuple, list)): + + @dataclasses.dataclass + class Shim: + ref: Any + + def __repr__(self): + return self.ref + + return repr(type(s)(Shim(self.val_to_arg_str(a)) for a in s)) + elif isinstance(s, torch._ops.OpOverload): + return _get_qualified_name(s) + elif isinstance(s, (ir.Buffer, ReinterpretView)): + return s.codegen_reference() + else: + return repr(s) + + # The following methods are for memory management + def make_buffer_allocation(self, buffer): + device = buffer.get_device() + dtype = buffer.get_dtype() + shape = tuple(buffer.get_size()) + stride = tuple(buffer.get_stride()) + return self.make_allocation(buffer.get_name(), device, dtype, shape, stride) + + def make_allocation(self, name, device, dtype, shape, stride): + if device.type in ("cpu", "cuda"): + # optimized path for faster allocations, saving ~2us versus the stuff below + return ( + f"{name} = empty_strided_{device.type}(" + f"{self.codegen_shape_tuple(shape)}, " + f"{self.codegen_shape_tuple(stride)}, " + f"{dtype})" + ) + # all other devices: + return ( + f"{name} = empty_strided(" + f"{self.codegen_shape_tuple(shape)}, " + f"{self.codegen_shape_tuple(stride)}, " + f"device='{device.type}', dtype={dtype})" + ) + + def make_tensor_alias(self, new_name, old_name, comment=""): + return f"{self.declare}{new_name} = {old_name}{self.ending} {self.comment} {comment}" + + def make_buffer_free(self, buffer): + return f"del {buffer.get_name()}" + + def make_free_by_names(self, names_to_del: List[str]): + return f"del {', '.join(name for name in names_to_del)}" + + def codegen_exact_buffer_reuse(self, old_name: str, new_name: str, del_line: str): + return f"{self.declare_maybe_reference}{new_name} = {old_name}{del_line}{self.ending} {self.comment} reuse" + + def make_buffer_reuse(self, old, new, delete_old: bool): + assert old.get_dtype() == new.get_dtype() + old_name = old.get_name() + new_name = new.get_name() + del_line = ";" + if old_name not in V.graph.get_output_names() and delete_old: + del_line = f"; {self.make_buffer_free(old)}" + + if old.get_size() == new.get_size() and old.get_stride() == new.get_stride(): + if old_name in self.stack_allocated_buffers: + self.stack_allocated_buffers[new_name] = new + return self.codegen_exact_buffer_reuse(old_name, new_name, del_line) + + reinterpret_view = self.codegen_reinterpret_view( + old, new.get_size(), new.get_stride(), 0, self.wrapper_call + ) + if reinterpret_view in self.stack_allocated_buffers: + self.stack_allocated_buffers[new_name] = new + return f"{self.declare_maybe_reference}{new_name} = {reinterpret_view}{del_line} {self.comment} reuse" + + def codegen_deferred_allocation(self, name, layout): + self.writeline( + DeferredLine( + name, + f"{self.declare_maybe_reference}{name} = {layout.view.codegen_reference()}{self.ending} " + f"{self.comment} alias", + ) + ) + + def codegen_allocation(self, buffer): + assert ( + buffer.get_workspace_size() == 0 + ), "Only support zero workspace size for now!" + + name = buffer.get_name() + + if name in V.graph.removed_buffers or name in self.allocated: + return + self.allocated.add(name) + if isinstance( + buffer, + (ir.ExternKernelAlloc, ir.MultiOutput), + ): + return + + layout = buffer.get_layout() + if isinstance(layout, ir.MutationLayout): + return + if isinstance(layout, ir.AliasedLayout): + assert isinstance( + layout.view, ir.ReinterpretView + ), f"unexpected {type(layout.view)}: {layout.view}" + self.codegen_allocation(layout.view.data) + self.codegen_deferred_allocation(name, layout) + return + + self.writeline(AllocateLine(self, buffer)) + + def codegen_free(self, buffer): + assert ( + buffer.get_workspace_size() == 0 + ), "Only support zero workspace size for now!" + + name = buffer.get_name() + + # can be freed but not reused + if isinstance(buffer, ir.InputBuffer): + self.writeline(self.make_buffer_free(buffer)) + return + + if not self.can_reuse(buffer): + return + self.freed.add(name) + + self.writeline(FreeIfNotReusedLine(self, buffer)) + + def can_reuse(self, input_buffer, output_buffer=None): + name = input_buffer.get_name() + if ( + name in V.graph.removed_buffers + or name in V.graph.graph_inputs + or name in V.graph.constants + or name in V.graph.never_reuse_buffers + or name in self.freed + ): + return False + + return True + + def did_reuse(self, buffer, reused_buffer): + # Check whether a given buffer was reused by a possible reuser in the wrapper codegen + # Can be consulted from inside ir codegen, e.g. to determine whether a copy is needed + return ( + buffer.get_name() in self.reuses + and self.reuses[buffer.get_name()] == reused_buffer.get_name() + ) + + def codegen_inplace_reuse(self, input_buffer, output_buffer): + assert buffer_reuse_key(input_buffer) == buffer_reuse_key(output_buffer) + self.codegen_allocation(input_buffer) + self.freed.add(input_buffer.get_name()) + self.allocated.add(output_buffer.get_name()) + self.reuses[output_buffer.get_name()] = input_buffer.get_name() + self.writeline(ReuseLine(self, input_buffer, output_buffer)) + + def codegen_unbacked_symbol_decl(self, symbol): + name = str(symbol) + if name in self.unbacked_symbol_decls: + return name + else: + # When in CppWrapperCpu, we should only generate the declaration once + self.unbacked_symbol_decls.add(name) + return self.declare + name + + def codegen_subgraph_prefix(self, subgraph, outer_inputs, outer_outputs): + for inner_input, outer_input in zip(subgraph.graph.graph_inputs, outer_inputs): + self.writeline(f"{self.declare}{inner_input} = {outer_input}{self.ending}") + + def codegen_subgraph_suffix(self, subgraph, outer_inputs, outer_outputs): + for inner_output, outer_output in zip( + subgraph.graph.graph_outputs, outer_outputs + ): + self.writeline( + f"{outer_output} = {inner_output.codegen_reference()}{self.ending}" + ) + + def codegen_subgraph(self, subgraph, outer_inputs, outer_outputs): + try: + self.push_codegened_graph(subgraph.graph) + self.writeline(f"{self.comment} subgraph: {subgraph.name}") + self.codegen_subgraph_prefix(subgraph, outer_inputs, outer_outputs) + parent_graph = V.graph + with V.set_graph_handler(subgraph.graph): + subgraph.graph.codegen_subgraph( + parent_graph=parent_graph, + ) + self.codegen_subgraph_suffix(subgraph, outer_inputs, outer_outputs) + finally: + self.pop_codegened_graph() + + def codegen_conditional(self, conditional): + name = conditional.get_name() + outer_inputs = [buf.codegen_reference() for buf in conditional.operands] + outer_outputs = [f"{name}[{i}]" for i in range(len(conditional.outputs))] + + self.writeline(f"{name} = [None] * {len(conditional.outputs)}") + self.writeline(f"if {conditional.predicate.codegen_reference()}.item():") + self.writeline(EnterSubgraphLine(self, conditional.true_subgraph.graph)) + self.codegen_subgraph(conditional.true_subgraph, outer_inputs, outer_outputs) + self.writeline(ExitSubgraphLine(self)) + self.writeline("else:") + self.writeline(EnterSubgraphLine(self, conditional.false_subgraph.graph)) + self.codegen_subgraph(conditional.false_subgraph, outer_inputs, outer_outputs) + self.writeline(ExitSubgraphLine(self)) + + @staticmethod + def statically_known_int_or_none(x): + try: + val = V.graph._shape_env._maybe_evaluate_static(x) + return int(x) + except Exception: + return None + + @staticmethod + def statically_known_list_of_ints_or_none(lst): + result = [] + for x in lst: + num = WrapperCodeGen.statically_known_int_or_none(x) + if num is None: + return None + result.append(num) + return result + + @staticmethod + def is_statically_known_list_of_ints(lst): + return WrapperCodeGen.statically_known_list_of_ints_or_none(lst) is not None + + @staticmethod + def static_shape_for_buffer_or_none(buffer): + return WrapperCodeGen.statically_known_list_of_ints_or_none(buffer.get_size()) + + @staticmethod + def can_prove_buffer_has_static_shape(buffer): + return WrapperCodeGen.static_shape_for_buffer_or_none(buffer) is not None diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_3.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82f40dbc48e18822cc7649f9076f5bcd91a5e431 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_3.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_7.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_7.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4a87e0a66faf0d359c629e2e342c3477967431c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_7.cpython-310.pyc differ