diff --git a/ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..096fa1d4061e51b95a9ed4754d1e29b9df148c56 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f05b11c06efbb6011e46324aeff91d6969c207cfbb4d3f538988ba16202597d +size 9372 diff --git a/ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..01c6dee4ec13b54a484298cdf834fee923a2a635 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7acaaa34c1c1a2fa21ffeba8bb02a1e5e3c53df61fec46e5e720bc2503086377 +size 9387 diff --git a/ckpts/universal/global_step120/zero/12.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/12.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6e1feb2cf1d10d4f527ecbb1d04bfa4884f880e0 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5440dd1c78e8a135fde05c1aafdfc6ca2addb712ea8ba21ea6775829fec29d71 +size 9293 diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..75cb93fe4cb4a369c36c22cff3823df9c7200def --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44799c5eca0f7b814a36ada8a2a985441812e58a613f68fbf4a527c11bf0f9da +size 33555612 diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..5521f36ac193300715ce4100563f8b02b32e310a --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d4ff2ab006f3da15b9bfe65118d2106bd3d58e3948524bd1543866e56ef9957 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..752557b4f992f74c4b1aaf28d0f7a30794087a9a --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3108e7a90cec5a3472de3afd1e14002a69023393be351abf71a3efbcd21582c1 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b9446adb95ff353a8624b3a3b34fe3b5805616bf --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b67e13b1af7d8d5c6d522c93188eb1b59b42d6562bc2eb65983c44e8f96a127 +size 9372 diff --git a/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2e71d3ac2a2e9a748302574ba533d1258db46161 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22fcdc17c849cd6418aae8ea5104e5c4fa7e7741611e1946c5644b2a5dcfad48 +size 9387 diff --git a/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1b712e1a1262f093c8091993c6a8f4a941bceed2 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d25990e3302cd65f6895327519b14c5fceb54e98ba62d0c1bff59dd9a53d10 +size 9293 diff --git a/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..7bc05e2ef4c8b5ac3cf1c4be3a5e6071c7f25332 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b21979a3ecb173071fbb2f4162e2729cd7d7c076edf8738243484e51c01ee749 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..3702d6a22b1cd9b326c0049d7a8c2ac458e06ff6 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc1efd776071f65554aacd4cb630512eab6db181ec866f208bbd1f699d90d303 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..586c43e9be0f73e7a60d1c3d12cebdf2083fd5ef --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e436534b1d579979b5dc8db187e435a1fea6fc1713ca7dd5b8e412a90a4723e +size 33555533 diff --git a/ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..735d957ecae8d8c687d7848c298c23f65ef73a9e --- /dev/null +++ b/ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6e2d7fd20d792282caae21ed4736b2acfcfa2641a4d52061cd2d59b7818a484 +size 16778396 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e2a956543922148cf43f82d6bde226d9b8d7a0e8 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d25a4818c7340186511af210b151ebe56995bd46863faea70e4c5a923fc21386 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..06d31871342f7b4ec0a686300907bd7322df1d93 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33a92271a223d91e65ca0abe5a87b9b03f404d0a4ab1d8895d05084be74421c6 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..68ca97d5932db738720e42cdf171a0c6b4ac0228 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05c9f973e08ceaedabdf8dc136cfd9579b6dfeca3d437125d515cbd1c91d15cb +size 33555533 diff --git a/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..dee7c18a7f3b6a943260b5d7d34a38e77865d288 --- /dev/null +++ b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41200e7c76d1d8d858393328284a376692c2ebdc13db3055a2ce3fa15723c866 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c483002f4c779e0f6fdba3fee43faae7e1d2bcaf --- /dev/null +++ b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f66caeb5b4e74302196d38db5cd119b791be8b6d1811ad3099489c5c9f3b5d46 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ac22fcbbce0c41afed27a7c23211085f15170d64 --- /dev/null +++ b/ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b02754a37e50b45da0958d132c2b74fb1c7d56bbaca856ac59800b72a7194c +size 33555533 diff --git a/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..06775fd6afa9503e928444c1b9ae88db46f96e24 --- /dev/null +++ b/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b52420175eca60bb5df5d9e84e2bbf745fb9a25a992fe84d0f7eefcd6ad7ae8e +size 33555612 diff --git a/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4c23ef8918c4a5ae3fec90a967c07a18e8ef6903 --- /dev/null +++ b/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d79a52574022fc5d55a24ca3ba3164f445138534a2203641d17d34fad714a0ef +size 33555627 diff --git a/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..af2ba6cf9e6b97463512caaf7548727a4822526a --- /dev/null +++ b/ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd0074ae34fd37c15d1b698532bdc2101a5d5949484b38a55573c3f7a00f4d9 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..8cffb4feccccd19f71854dd944e8a93419769479 --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9acd645cb2197f7697afb3eade5575c1e41c0fd53f2bb5449d04c0a9ebafd8e0 +size 9372 diff --git a/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..692d6ec2d79178ebaf3a57960000439b999980f5 --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56dc8af36b686f4bd61b94bf34ab79073c751c5f1f3939c9b985246c445e4062 +size 9387 diff --git a/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d35dfcc4fe431399af7b4675461f1984b519bb5f --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd2df67d75555ac4678a88f301d379694bbafbb8ee6ffc6a47acc1f43827f063 +size 9293 diff --git a/ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f70217ee1dafe470403bd490511250c3f5022ea1 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9474660312ed211cad82f8dcb9fda0c509688ebc5c56192005f0d55cab49621f +size 16778396 diff --git a/ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..5a7f9c38d97695ccc0a472517ddebcfccd202ba1 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:162c890f9519502cdab7a9f1f7a770cbaba48bb56d2baa58bd5b682ba3898fa5 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/8.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/8.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9c6476b82886be5d2d9a98ccc08cf7c90dfdbd73 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60c4e65e4bceb984b2965f7e6cfc0ed6b59369efbe4bc7b9aa2b0ec63aff7506 +size 16778317 diff --git a/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..512672a78f172ef823dc56cf62adda7ed2bad021 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ea3507607b2c129631b16b1a49d6699d411f01dcc57d598f117e53f4967efb4 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8700bc724f844e732b29c3df57b3675da5416605 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fd36949e611fd74b5fbfcd4f0c6bcd04fd97f37ab3ac449a94827e7e5b2a87e +size 33555627 diff --git a/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d01b1c0ec6732552273727b647a39bc64feada48 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:846a3b0c05469d767c79a0e0edb03ce465bf10046b8a0651f2f6a366339a0a51 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..9d08ed70aca6aa46ff1bd79c2191c76c60b697bd --- /dev/null +++ b/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd8e20433c17f091607c9862366643b5085e338b08d0a411b7d94da49e389dd3 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..159ad4a3dc06f59c24c31b5332750e960ba7d8e2 --- /dev/null +++ b/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c877cbf4a06255c7db9d34dc5ec8a425822ad1a94fee46f54ab9e51cca3461e0 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3c636080f56929ad634725f2f3b70ed098e601ff --- /dev/null +++ b/ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26c26cf100853a6b3cfc71917f8dab485919dc440cdbf2ee3eb6ef9ec634ee2 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py new file mode 100644 index 0000000000000000000000000000000000000000..9c8aeadc45ae291f363bb4850b30bab4fb14214d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py @@ -0,0 +1,26 @@ +import torch + +from torch._export.db.case import export_case + + +class MyAutogradFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x.clone() + + @staticmethod + def backward(ctx, grad_output): + return grad_output + 1 + + +@export_case( + example_inputs=(torch.randn(3, 2),), +) +class AutogradFunction(torch.nn.Module): + """ + TorchDynamo does not keep track of backward() on autograd functions. We recommend to + use `allow_in_graph` to mitigate this problem. + """ + + def forward(self, x): + return MyAutogradFunction.apply(x) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py new file mode 100644 index 0000000000000000000000000000000000000000..b201c5d679b8eab6e9a3a74705772acf3a9a5af8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py @@ -0,0 +1,23 @@ +import torch + +from torch._export.db.case import export_case +from functorch.experimental.control_flow import cond + + +@export_case( + example_inputs=(torch.tensor(True), torch.ones(3, 2)), + tags={"torch.cond", "python.closure"}, +) +class CondClosedOverVariable(torch.nn.Module): + """ + torch.cond() supports branches closed over arbitrary variables. + """ + + def forward(self, pred, x): + def true_fn(val): + return x * 2 + + def false_fn(val): + return x - 2 + + return cond(pred, true_fn, false_fn, [x + 1]) diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py new file mode 100644 index 0000000000000000000000000000000000000000..3844c7227a365ceb157222c91d179296e73a3522 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py @@ -0,0 +1,30 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.tensor(4), torch.randn(5, 5)), + tags={ + "torch.dynamic-value", + "torch.escape-hatch", + }, +) +class ConstrainAsValueExample(torch.nn.Module): + """ + If the value is not known at tracing time, you can provide hint so that we + can trace further. Please look at constrain_as_value and constrain_as_size APIs. + constrain_as_value is used for values that don't need to be used for constructing + tensor. + """ + + def __init__(self): + super().__init__() + + def forward(self, x, y): + a = x.item() + torch._constrain_as_value(a, min=0, max=5) + + if a < 6: + return y.sin() + return y.cos() diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py new file mode 100644 index 0000000000000000000000000000000000000000..7d6a50320f5baba5843e6e4831789c1993b5e6ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py @@ -0,0 +1,24 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel +from torch.export import Dim + +x = torch.ones(3, 2) +dim0_x = Dim("dim0_x") + +@export_case( + example_inputs=(x,), + tags={"torch.dynamic-shape", "python.builtin"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, + dynamic_shapes={"x": {0: dim0_x}}, +) +class DynamicShapeRound(torch.nn.Module): + """ + Calling round on dynamic shapes is not supported. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return x[: round(x.shape[0] / 2)] diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..16b7e54613d7fe405d28878ff45bd3a6ce5a3f4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py @@ -0,0 +1,21 @@ +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"torch.dynamic-shape", "python.data-structure", "python.assert"}, +) +class ListContains(torch.nn.Module): + """ + List containment relation can be checked on a dynamic shape or constants. + """ + def __init__(self): + super().__init__() + + def forward(self, x): + assert x.size(-1) in [6, 2] + assert x.size(0) not in [4, 5, 6] + assert "monkey" not in ["cow", "pig"] + return x + x diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..1689537db833a90bf09122221dde47aad79ebf34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py @@ -0,0 +1,26 @@ +import contextlib + +import torch + +from torch._export.db.case import export_case + + +@export_case( + example_inputs=(torch.ones(3, 2),), + tags={"python.context-manager"}, +) +class NullContextManager(torch.nn.Module): + """ + Null context manager in Python will be traced out. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + """ + Null context manager in Python will be traced out. + """ + ctx = contextlib.nullcontext() + with ctx: + return x.sin() + x.cos() diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py new file mode 100644 index 0000000000000000000000000000000000000000..4a06207b6eaf8f24d673c7ec227c3a5643c2d6a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py @@ -0,0 +1,19 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.randn(2, 3),), + tags={"python.object-model"}, + support_level=SupportLevel.NOT_SUPPORTED_YET, +) +class OptionalInput(torch.nn.Module): + """ + Tracing through optional input is not supported yet + """ + + def forward(self, x, y=torch.ones(2, 3)): + if y is not None: + return x + y + return x diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py new file mode 100644 index 0000000000000000000000000000000000000000..743a357fc13ca984369cdddadf31bb4ee27e9109 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py @@ -0,0 +1,29 @@ +from enum import Enum + +import torch + +from torch._export.db.case import export_case + + +class Animal(Enum): + COW = "moo" + + +@export_case( + example_inputs=(torch.ones(3, 2),), +) +class SpecializedAttribute(torch.nn.Module): + """ + Model attributes are specialized. + """ + + def __init__(self): + super().__init__() + self.a = "moo" + self.b = 4 + + def forward(self, x): + if self.a == Animal.COW.value: + return x * x + self.b + else: + raise ValueError("bad") diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py new file mode 100644 index 0000000000000000000000000000000000000000..fae18fb1cf934bf1a9437b70578d58cf10130a4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py @@ -0,0 +1,17 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel + + +@export_case( + example_inputs=(torch.randn(3, 2), "attr"), + tags={"python.builtin"}, + support_level=SupportLevel.SUPPORTED, +) +class TensorSetattr(torch.nn.Module): + """ + setattr() call onto tensors is not supported. + """ + def forward(self, x, attr): + setattr(x, attr, torch.randn(3, 2)) + return x + 4 diff --git a/venv/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py b/venv/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d78703e2d5ff96c15bd5b772fea10e044ffbfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py @@ -0,0 +1,41 @@ +import torch + +from torch._export.db.case import export_case, SupportLevel, export_rewrite_case + + +class A: + @classmethod + def func(cls, x): + return 1 + x + + +@export_case( + example_inputs=(torch.ones(3, 4),), + tags={"python.builtin"}, + support_level=SupportLevel.SUPPORTED, +) +class TypeReflectionMethod(torch.nn.Module): + """ + type() calls on custom objects followed by attribute accesses are not allowed + due to its overly dynamic nature. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + a = A() + return type(a).func(x) + + +@export_rewrite_case(parent=TypeReflectionMethod) +class TypeReflectionMethodRewrite(torch.nn.Module): + """ + Custom object class methods will be inlined. + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return A.func(x) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ac132d9db588e95fb3ce327344081dcdd2e7d51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py @@ -0,0 +1 @@ +from .cond import cond diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46a09764ba2ebcedcb6f8401b05c9be3b8be004f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f557c0aa8bc52f0a4cd30e2e7872e0fbce763783 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e241247f17dbe17394f95e2ea29cc652627aaa6c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c08b65458860a9ed756b6e99a7f1b6aba03e71a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py new file mode 100644 index 0000000000000000000000000000000000000000..55567ac4c99e29bfca2a9e8df0048924100ebe50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py @@ -0,0 +1,261 @@ +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree +from torch import Tensor +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._prims_common import clone_preserve_strides +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + + +# NOTE: [auto-functionalizing custom ops] +# Users may wish to torch.compile custom ops that mutate their inputs. +# torch.compile will automatically support this op without anyone needing +# to provide a functionalization kernel for it. Here's how. +# +# Let's say we have a hypothetical mylib::sin_(Tensor(a!) x) -> () +# op. First, when FakeTensor sees this op: +# - If the schema says it returns nothing, we can generate a trivial +# FakeTensor rule for it (that returns nothing). +# - Otherwise, the user needs to provide a FakeTensor rule (abstract impl) +# +# Next, when Python FunctionalTensor sees the op, it will functionalize +# it by emitting a call to an auto_functionalize(op, ["x"], {"x": ...}) +# HOP and replacing the mutated inputs with corresponding outputs of this HOP. +# This HOP effectively runs the functional version of the op when +# called: it clones inputs that will be mutated, runs the op, and +# then returns (output, Tensors with the new values) + + +class AutoFunctionalized(HigherOrderOperator): + """auto_functionalized(_mutable_op, **kwargs) + + This HOP runs a "functional" version of _mutable_op. + + Concretely, it looks at all the arguments that are mutable through + _mutable_op's operator schema, clones those kwargs, runs + `out = _mutable_op(**kwargs)` with the cloned values, and then returns the + operator output concatenated with the cloned values that were mutated. + + We have some restrictions on `_mutable_op`. + See `can_auto_functionalize` for the restrictions. We can likely lift + many of these if users request it. + + The reason why _mutable_op is prefixed with an + underscore is to prevent collisions with kwarg names in **kwargs. + """ + + def __init__(self): + super().__init__("auto_functionalized") + + def __call__( + self, + _mutable_op: torch._ops.OpOverload, + **kwargs: Dict[str, Any], + ) -> Tuple[Any, Tuple[Tensor, ...]]: + assert can_auto_functionalize(_mutable_op) + assert isinstance(kwargs, dict) + return super().__call__(_mutable_op, **kwargs) + + +auto_functionalized = AutoFunctionalized() + + +def can_auto_functionalize(op: torch._ops.OperatorBase) -> bool: + if not isinstance(op, torch._ops.OpOverload): + return False + + if torch._library.utils.is_builtin(op): + # We control the built-ins. These may (in rare cases) + # do input metadata mutation (which we have banned on custom ops) + return False + schema = op._schema + if not schema.is_mutable: + return False + schema = op._schema + + for arg in schema.arguments: + if arg.alias_info is None: + continue + if not arg.alias_info.is_write: + continue + if type(arg.type) is torch.TensorType: + continue + if ( + type(arg.type) is torch.OptionalType + and type(arg.type.getElementType()) is torch.TensorType + ): + continue + # Not yet supported: other Tensor types. This includes things like + # Tensor[], Tensor?[], Tensor[]?. + return False + + # The returns must not alias anything + for ret in schema.returns: + if ret.alias_info is None and type(ret.type) is torch.TensorType: + continue + # Not yet supported: List[Tensor] return. + return False + return True + + +@auto_functionalized.py_impl(DispatchKey.CompositeExplicitAutograd) +def auto_functionalized_dense( + _mutable_op: torch._ops.OpOverload, + _only_clone_these_tensors: Optional[Tuple[str, ...]] = None, + **kwargs: Dict[str, Any], +) -> Tuple[Any, Tuple[Tensor, ...]]: + new_kwargs = dict(**kwargs) + result = [] + + _mutable_args_names = get_mutable_arg_names(_mutable_op) + for name in _mutable_args_names: + if ( + _only_clone_these_tensors is not None + and name not in _only_clone_these_tensors + ): + new_kwargs[name] = kwargs[name] + else: + new_kwargs[name] = ( + clone_preserve_strides(kwargs[name]) + if kwargs[name] is not None + else None + ) + result.append(new_kwargs[name]) + out = _mutable_op(**new_kwargs) + + if isinstance(out, tuple): + return (*out, *result) # type: ignore[return-value] + else: + return (out, *result) # type: ignore[return-value] + + +@auto_functionalized.py_impl(FakeTensorMode) +def auto_functionalized_fake( + mode, + _mutable_op: torch._ops.OpOverload, + **kwargs: Dict[str, Any], +) -> Tuple[Any, Tuple[Tensor, ...]]: + with mode: + result = auto_functionalized_dense(_mutable_op, **kwargs) + return result + + +@auto_functionalized.py_impl(ProxyTorchDispatchMode) +def auto_functionalized_proxy( + mode, + _mutable_op: torch._ops.OpOverload, + **kwargs: Dict[str, Any], +) -> Tuple[Any, Tuple[Tensor, ...]]: + if not mode.enable_tracing: + return auto_functionalized(_mutable_op, **kwargs) + + with disable_proxy_modes_tracing(): + out = auto_functionalized(_mutable_op, **kwargs) + + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + out_proxy = mode.tracer.create_proxy( + "call_function", + auto_functionalized, + (_mutable_op,), + proxy_kwargs, + ) + result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + return result + + +auto_functionalized.fallthrough(DispatchKey.AutogradCPU) +auto_functionalized.fallthrough(DispatchKey.AutogradCUDA) + + +def get_mutable_arg_names(op: torch._ops.OpOverload) -> List[str]: + """ + Returns the list of argument names that get mutated according to the + schema. + """ + mutable_args_names = [ + arg.name + for arg in op._schema.arguments + if arg.alias_info is not None and arg.alias_info.is_write + ] + return mutable_args_names + + +def do_auto_functionalize( + op: torch._ops.OpOverload, args: Tuple[Any, ...], kwargs: Dict[str, Any] +) -> Any: + """Functionalizes a call to op(*args, **kwargs) by emitting a call to + `outs = auto_functionalized(op, normalized_kwargs)` + and replacing the mutated (args, kwargs) with the corresponding outputs. + + The normalized_kwargs are just the (args, kwargs), but all in kwarg form. + This makes handling easier for the auto_functionalized HOP. + """ + from torch._subclasses.functional_tensor import PythonFunctionalizeAPI + + ctx = PythonFunctionalizeAPI() + + # All of the (args, kwargs), but all as kwargs. The names for the + # args come from the schema. This makes it easier for us to work with them. + normalized_kwargs = {} + schema = op._schema + for idx, arg in enumerate(schema.arguments): + # NB: torch_dispatch kwargs are the args defined as kwarg-only in the schema + if arg.name in kwargs: + normalized_kwargs[arg.name] = kwargs[arg.name] + elif idx < len(args): + # if its out of bounds we don't need to do anything + # as it means the the optional arg was passed with its default + # value + normalized_kwargs[arg.name] = args[idx] + else: + normalized_kwargs[arg.name] = arg.default_value + + unwrapped_kwargs = ctx.unwrap_tensors(normalized_kwargs) # type: ignore[arg-type] + with ctx.redispatch_to_next(): + unwrapped_outs = auto_functionalized( + op, **unwrapped_kwargs # type: ignore[arg-type] + ) + + # List of the name of args that get mutated (according to the schema) + mutable_args_names = get_mutable_arg_names(op) + + unwrapped_actual_out: Union[Any, Tuple[Any]] = unwrapped_outs[ + : -len(mutable_args_names) + ] + unwrapped_mutable_out = unwrapped_outs[-len(mutable_args_names) :] + + if len(op._schema.returns) == 0: + assert unwrapped_actual_out[0] is None + unwrapped_actual_out = None + elif len(op._schema.returns) == 1: + assert len(unwrapped_actual_out) == 1 + unwrapped_actual_out = unwrapped_actual_out[0] + else: + assert len(unwrapped_actual_out) == len(op._schema.returns) + + for name, unwrapped_out in zip(mutable_args_names, unwrapped_mutable_out): + # Can be None if input was `Tensor(a!)?` + if unwrapped_out is None: + continue + assert isinstance(unwrapped_out, torch.Tensor) + orig_arg = normalized_kwargs[name] + ctx.replace(orig_arg, unwrapped_out) + ctx.commit_update(orig_arg) + ctx.sync(orig_arg) + + return ctx.wrap_tensors(unwrapped_actual_out) # type: ignore[arg-type] + + +@auto_functionalized.py_functionalize_impl +def auto_functionalized_func(ctx, _mutable_op, **kwargs): + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + with ctx.redispatch_to_next(): + result = auto_functionalized(_mutable_op, **unwrapped_kwargs) + return ctx.wrap_tensors(result) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py new file mode 100644 index 0000000000000000000000000000000000000000..ae4dba02bac1ec4f086b86e87dd8d6a6fafde40c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py @@ -0,0 +1,349 @@ +import torch +import torch._subclasses.functional_tensor + +import torch.utils._pytree as pytree + +from torch._C import DispatchKey +from torch._C._functorch import ( + _add_batch_dim, + get_unwrapped, + is_batchedtensor, + maybe_get_bdim, +) +from torch._functorch.utils import exposed_in + +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_alias, + _has_potential_branch_input_mutation, + _set_compilation_env, + autograd_not_implemented, + reenter_make_fx, + UnsupportedAliasMutationException, +) + +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from torch.utils._python_dispatch import _get_current_dispatch_mode + + +@exposed_in("torch") +def cond(pred, true_fn, false_fn, operands): + r""" + Conditionally applies `true_fn` or `false_fn`. + + .. warning:: + `torch.cond` is a prototype feature in PyTorch. It has limited support for input and output types and + doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch. + Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype + + `cond` is structured control flow operator. That is, it is like a Python if-statement, + but has restrictions on `true_fn`, `false_fn`, and `operands` that enable it to be + capturable using torch.compile and torch.export. + + Assuming the constraints on `cond`'s arguments are met, `cond` is equivalent to the following:: + + def cond(pred, true_branch, false_branch, operands): + if pred: + return true_branch(*operands) + else: + return false_branch(*operands) + + Args: + pred (Union[bool, torch.Tensor]): A boolean expression or a tensor with one element, + indicating which branch function to apply. + + true_fn (Callable): A callable function (a -> b) that is within the + scope that is being traced. + + false_fn (Callable): A callable function (a -> b) that is within the + scope that is being traced. The true branch and false branch must + have consistent input and outputs, meaning the inputs have to be + the same, and the outputs have to be the same type and shape. + + operands (Tuple of possibly nested dict/list/tuple of torch.Tensor): A tuple of inputs to the true/false functions. + + Example:: + + def true_fn(x: torch.Tensor): + return x.cos() + def false_fn(x: torch.Tensor): + return x.sin() + return cond(x.shape[0] > 4, true_fn, false_fn, (x,)) + + Restrictions: + - The conditional statement (aka `pred`) must meet one of the following constraints: + + - It's a `torch.Tensor` with only one element, and torch.bool dtype + + - It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10` + + - The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints: + + - The function signature must match with operands. + + - The function must return a tensor with the same metadata, e.g. shape, + dtype, etc. + + - The function cannot have in-place mutations on inputs or global variables. + (Note: in-place tensor operations such as `add_` for intermediate results + are allowed in a branch) + + .. warning:: + Temporal Limitations: + + - `cond` only supports **inference** right now. Autograd will be supported in the future. + + - The **output** of branches must be a **single Tensor**. Pytree of tensors will be supported in the future. + + """ + + if torch.compiler.is_dynamo_compiling(): + return cond_op(pred, true_fn, false_fn, operands) + + def _validate_input(pred, true_fn, false_fn, operands): + if not isinstance(pred, (bool, torch.Tensor, torch.SymBool)): + raise RuntimeError(f"Expected pred to be bool or tensor, but got {pred}.") + + if isinstance(pred, torch.Tensor) and pred.numel() != 1: + raise RuntimeError( + f"Expected pred to be bool or single-element tensor, but got {pred}." + ) + + if not callable(true_fn) or not callable(false_fn): + raise RuntimeError("Expect both branches to be callbale.") + + if not isinstance(operands, (tuple, list)) or pytree.tree_any( + lambda t: not isinstance(t, torch.Tensor), operands + ): + raise RuntimeError( + "Expect operands to be a tuple of possibly nested dict/list/tuple that only" + f"consists of tensor leaves, but got {operands}." + ) + + _validate_input(pred, true_fn, false_fn, operands) + + if not torch._dynamo.is_dynamo_supported(): + raise RuntimeError("torch.cond requires dynamo support.") + + with _set_compilation_env(): + with torch._dynamo.utils.disable_cache_limit(): + return torch.compile(cond_op, backend="eager", fullgraph=True)( + pred, true_fn, false_fn, operands + ) + + +""" +We're going to define a `cond_op` operation. +In order to do this, we need implementations for each of the dispatch keys. +""" +cond_op = HigherOrderOperator("cond") + + +def trace_cond(proxy_mode, func_overload, pred, true_fn, false_fn, operands): + assert isinstance( + operands, (list, tuple) + ), "Cond operands must be a list or tuple of tensors" + assert all( + isinstance(o, torch.Tensor) for o in operands + ), "Cond operands must be a list of tensors" + + pre_dispatch = getattr(proxy_mode, "pre_dispatch", False) + + with disable_proxy_modes_tracing(): + true_graph = reenter_make_fx(true_fn, pre_dispatch)(*operands) + false_graph = reenter_make_fx(false_fn, pre_dispatch)(*operands) + + true_outs = [] + false_outs = [] + for node in true_graph.graph.nodes: + if node.op == "output": + true_outs.extend(node.args) + + for node in false_graph.graph.nodes: + if node.op == "output": + false_outs.extend(node.args) + + flat_true_outs = pytree.arg_tree_leaves(*true_outs) + flat_false_outs = pytree.arg_tree_leaves(*false_outs) + if len(flat_true_outs) != len(flat_false_outs): + raise torch._dynamo.exc.CondOpArgsMismatchError( + f"Expected to return same number of outputs but got:" + f"\n {true_fn.__name__} returns {len(flat_true_outs)} item(s)" + f"\n {false_fn.__name__} returns {len(flat_false_outs)} item(s)" + ) + + for i in range(0, len(flat_true_outs)): + true_out = flat_true_outs[i] + false_out = flat_false_outs[i] + if true_out.meta["tensor_meta"] != false_out.meta["tensor_meta"]: + raise torch._dynamo.exc.CondOpArgsMismatchError( + f"Expected each tensor to have same metadata but got:" + f"\n {true_fn.__name__} returns {true_out.meta['tensor_meta']}" + f"\n {false_fn.__name__} returns {false_out.meta['tensor_meta']}" + ) + + # There are probably better ways - I know that create_arg has some self incrementing name + # magic to it, but since we explicitly have to get the name for register_module, + # I was not sure how to do that. This kinda simulates it. + next_name = None + i = 0 + while not next_name: + candidate = f"true_graph_{i}" + if hasattr(proxy_mode.tracer.root, candidate): + i += 1 + else: + next_name = candidate + + true_name = next_name + false_name = f"false_graph_{i}" + assert not hasattr(proxy_mode.tracer.root, false_name) + + proxy_mode.tracer.root.register_module(true_name, true_graph) + proxy_mode.tracer.root.register_module(false_name, false_graph) + + args = (pred, true_graph, false_graph, operands) + + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args) + + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="conditional" + ) + + # At this point, we're *guaranteed* that whether an output came from the + # true or false branch is indistinguishable. So, as this is just for tracing + # purposes, choose the true branch. + + # TODO: Uhh.... it shouldn't matter, but changing this to true_fn results in + # a FakeTensorMode error : + # `Current active mode not registered` + # TODO Sometimes the operands are not completely FakeTensor, something seems went wrong in + # dynamo? Because of that it runs real computation sometimes and re-triggering downstream dispatch keys. + out = false_fn(*operands) + + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@cond_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def cond_op_dense(pred, true_fn, false_fn, operands): + mode = _get_current_dispatch_mode() + assert mode is None, "Mode should never be enabled for CPU/CUDA key" + if pred: + return true_fn(*operands) + else: + return false_fn(*operands) + + +cond_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(cond_op, deferred_error=True) +) + + +@cond_op.py_impl(ProxyTorchDispatchMode) +def inner(mode, pred, true_fn, false_fn, operands): + if mode.enable_tracing: + return trace_cond(mode, cond_op, pred, true_fn, false_fn, operands) + else: + return cond_op(pred, true_fn, false_fn, operands) + + +@cond_op.py_impl(FakeTensorMode) +def cond_fake_tensor_mode(mode, pred, true_fn, false_fn, operands): + with mode: + true_outs = true_fn(*operands) + flat_true_outs = pytree.tree_leaves(true_outs) + flat_false_outs = pytree.tree_leaves(false_fn(*operands)) + if len(flat_true_outs) != len(flat_false_outs): + raise RuntimeError("Unmatched number of outputs from cond() branches.") + + for true_out, false_out in zip(flat_true_outs, flat_false_outs): + true_meta = _extract_tensor_metadata(true_out) + false_meta = _extract_tensor_metadata(false_out) + if true_meta != false_meta: + raise torch._dynamo.exc.CondOpArgsMismatchError( + f"Expected each tensor to have same metadata but got:" + f"\n {true_fn.__name__} returns {true_meta}" + f"\n {false_fn.__name__} returns {false_meta}" + ) + return true_outs + + +@cond_op.py_functionalize_impl +def cond_func(ctx, pred, true_fn, false_fn, inputs): + unwrapped_inputs = ctx.unwrap_tensors(inputs) + unwrapped_pred = ctx.unwrap_tensors(pred) + with ctx.redispatch_to_next() as m: + functional_true = ctx.functionalize(true_fn) + functional_false = ctx.functionalize(false_fn) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + for branch in [functional_true, functional_false]: + if _has_potential_branch_input_mutation( + branch, unwrapped_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + "One of torch.cond branch might be modifying the input!" + ) + for branch in [true_fn, false_fn]: + if _has_potential_branch_input_alias( + branch, unwrapped_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + "One of torch.cond branch might be aliasing the input!" + ) + + cond_return = cond_op( + unwrapped_pred, functional_true, functional_false, unwrapped_inputs + ) + return ctx.wrap_tensors(cond_return) + + +@cond_op.py_impl(torch._C._functorch.TransformType.Vmap) +def cond_batch_rule(interpreter, pred, true_fn, false_fn, inputs): + assert isinstance( + inputs, (list, tuple) + ), "Cond inputs must be a list or tuple of tensors" + assert all( + isinstance(i, torch.Tensor) for i in inputs + ), "Cond inputs must be a list of tensors" + + pred_ = get_unwrapped(pred) if is_batchedtensor(pred) else pred + + # unbatched tensors are not vmapped + tensors, in_dims = zip( + *[ + (get_unwrapped(t), maybe_get_bdim(t)) if is_batchedtensor(t) else (t, None) + for t in inputs + ] + ) + + if is_batchedtensor(pred): + # prepend "pred" and vmap everything + tensors = (pred_,) + tensors + in_dims = (0,) + in_dims + + def fn(p, *args): + t = true_fn(*args) + f = false_fn(*args) + return torch.where(p, t[0], f[0]) + + with interpreter.lower(): + result = torch.vmap(fn, in_dims=in_dims)(*tensors) + + else: + # predicate is known at this stage and it is a boolean expression or a + # tensor with one element. + true_fn = torch.vmap(true_fn, in_dims=in_dims) + false_fn = torch.vmap(false_fn, in_dims=in_dims) + + with interpreter.lower(): + result = cond_op(pred, true_fn, false_fn, tensors) + + if not isinstance(result, tuple): + result = (result,) + lvl = interpreter.level() + return tuple([_add_batch_dim(r, 0, lvl) for r in result]) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py new file mode 100644 index 0000000000000000000000000000000000000000..08c49c964631e5794e2783c3351d5f10fae29a94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py @@ -0,0 +1,204 @@ +from enum import Enum +from typing import Any, Dict, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + + +class _EffectType(Enum): + ORDERED = "Ordered" + + +SIDE_EFFECTS: Dict[torch._ops.OpOverload, _EffectType] = { + torch.ops.aten._print.default: _EffectType.ORDERED, +} + + +class WithEffects(HigherOrderOperator): + """ + with_effects(token, op, args, kwargs) -> (new_token, op_results) + + This HOP helps ensure ordering between side effectful ops like prints or ops + using torchbind objects. This is needed to ensure a traced graph from + AOTAutograd is functional so that future optimization passes do not reorder + these operators. This is done through threading "effect tokens" through the + graph to enforce data dependence between side effectful ops. + + The tokens are basically dummy values (torch.tensor([])). We create a token + per "effect type", which are enumerated in the _EffectType enum. + """ + + def __init__(self): + super().__init__("with_effects") + + def __call__( + self, + token, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], + ) -> Tuple[Any, ...]: + assert isinstance(op, torch._ops.OpOverload) + assert not has_aliasing(op), "Ops with aliasing is not supported" + assert has_effects(op, args, kwargs) + assert isinstance(kwargs, dict) + return super().__call__(token, op, *args, **kwargs) + + +with_effects = WithEffects() + + +def has_aliasing(op: torch._ops.OpOverload): + for arg in op._schema.arguments: + if arg.alias_info is not None: + return True + for arg in op._schema.returns: + if arg.alias_info is not None: + return True + return False + + +def has_effects(op, args, kwargs) -> bool: + return ( + isinstance(op, torch._ops.OpOverload) + and not has_aliasing(op) + and get_effect_key(op, args, kwargs) is not None + ) + + +def get_effect_key(op, args, kwargs) -> Optional[_EffectType]: + if op in SIDE_EFFECTS: + return SIDE_EFFECTS[op] + + for arg in args: + if isinstance(arg, torch.ScriptObject): + return _EffectType.ORDERED + + return None + + +@with_effects.py_impl(DispatchKey.CompositeExplicitAutograd) +def with_effects_dense( + token: torch.Tensor, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], +) -> Tuple[torch.Tensor, ...]: + out = op(*args, **kwargs) + new_token = torch.tensor([]) + if isinstance(out, tuple): + return (new_token, *out) + return (new_token, out) + + +@with_effects.py_impl(FakeTensorMode) +def with_effects_fake( + mode, + token: torch.Tensor, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], +) -> Tuple[torch.Tensor, ...]: + with mode: + result = with_effects_dense(token, op, *args, **kwargs) + return result + + +@with_effects.py_impl(ProxyTorchDispatchMode) +def with_effects_proxy( + mode, + token: torch.Tensor, + op: torch._ops.OpOverload, + *args: Tuple[Any, ...], + **kwargs: Dict[str, Any], +) -> Tuple[torch.Tensor, ...]: + if not mode.enable_tracing: + return with_effects(token, op, *args, **kwargs) + + with disable_proxy_modes_tracing(): + out = with_effects(token, op, *args, **kwargs) + + proxy_token = mode.tracer.unwrap_proxy(token) + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args) + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + + out_proxy = mode.tracer.create_proxy( + "call_function", + with_effects, + (proxy_token, op, *proxy_args), + proxy_kwargs, + ) + result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + return result + + +with_effects.fallthrough(DispatchKey.AutogradCPU) +with_effects.fallthrough(DispatchKey.AutogradCUDA) + + +def handle_effects( + allow_token_discovery: bool, + tokens: Dict[_EffectType, torch.Tensor], + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], +) -> Any: + """ + Args: + allow_token_discovery: Whether or not we are discovering tokens. If this + is true, we will create a token for every side effect type seen that + does not have a token assigned yet. If this is false, the tokens + should've all been created ahead of time, so we will error if there is + no token mapping to every effect type. + + tokens: Map of effect type to tokens. This is to chain operators of the + same effects together so that they do not get reordered in later + optimization passes. + """ + + # Get a token. We can't do `tokens.get(op, torch.tensor([]))` because + # this will create an empty tensor during proxy mode tracing if the token + # doesn't exist. But the tokens should always exist during proxy mode tracing. + key = get_effect_key(op, args, kwargs) + assert key is not None + if key not in tokens: + assert allow_token_discovery, f"Could not find a token for effect {key}" + tokens[key] = torch.tensor([]) + token = tokens[key] + + from torch._subclasses.functional_tensor import PythonFunctionalizeAPI + + ctx = PythonFunctionalizeAPI() + + unwrapped_token = ctx.unwrap_tensors([token])[0] # type: ignore[arg-type] + unwrapped_args = ctx.unwrap_tensors(args) # type: ignore[arg-type] + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) # type: ignore[arg-type] + with ctx.redispatch_to_next(): + (new_token, *unwrapped_outs) = with_effects( + unwrapped_token, op, *unwrapped_args, **unwrapped_kwargs # type: ignore[arg-type] + ) + + if len(op._schema.returns) == 0: + assert unwrapped_outs[0] is None + unwrapped_outs = None # type: ignore[assignment] + elif len(op._schema.returns) == 1: + assert len(unwrapped_outs) == 1 + unwrapped_outs = unwrapped_outs[0] + else: + assert len(unwrapped_outs) == len(op._schema.returns) + + # Add the newly created token into the tokens map for a following call to + # use this token. + wrapped_token = ctx.wrap_tensors(new_token) + assert isinstance(wrapped_token, torch.Tensor) + tokens[key] = wrapped_token + + return ctx.wrap_tensors(unwrapped_outs) # type: ignore[arg-type] diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/map.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/map.py new file mode 100644 index 0000000000000000000000000000000000000000..76f4b89532c86a614a9f0179b32f2f380d0d1d73 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/map.py @@ -0,0 +1,358 @@ +import torch +import torch.utils._pytree as pytree +from torch._C import DispatchKey +from torch._dispatch.python import suspend_functionalization +from torch._functorch.aot_autograd import AOTConfig, create_joint, from_fun + +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_alias, + _has_potential_branch_input_mutation, + reenter_make_fx, + UnsupportedAliasMutationException, +) +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch._subclasses.functional_tensor import ( + disable_functional_mode, + FunctionalTensor, +) +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + make_fx, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.multiprocessing.reductions import StorageWeakRef + + +# TODO: We add this to prevent dymamo from tracing into map_wrapper, +# remove the wrapper call when it's ready. +class MapWrapper(HigherOrderOperator): + def __call__(self, xs, *args): + return map_wrapper(xs, *args) + + +map = MapWrapper("map") +map_impl = HigherOrderOperator("map_impl") + +dummy_aot_config = AOTConfig( + fw_compiler=None, # type: ignore[arg-type] + bw_compiler=None, # type: ignore[arg-type] + partition_fn=None, # type: ignore[arg-type] + decompositions={}, + num_params_buffers=0, + aot_id=0, + keep_inference_input_mutations=False, +) + + +def create_fw_bw_graph(f, num_mapped_args, *args): + mapped_xs = args[:num_mapped_args] + pos_args = args[num_mapped_args:] + + # Note: We create "clean" environments for make_fx by suspending all dispatch keys + # between Autograd and Python key. Currently, we only suspend functionalization but more can be + # added when required. Will encounter two problems if we don't suspend functionalization: + # + # 1. make_fx fails to capture operations on input: the inputs are wrapped as _to_functional_tensor_wrapper, + # but they will be unwrapped before entering ProxyTorchDispatchMode as part of the dispatching. + # However, it's the outside wrapper that tracer creates proxies for. This casuses tracer fail to + # fetch the proxy for the inputs and fail to capture any operations on them. + # + # 2. make_fx fails to capture output: the outputs after ProxyTorchDispatchMode are further + # wrapped as FunctionalTensorWrapper in Functionalize key after return. However, the tracer + # only associates the inner tensor with proxy in ProxyTorchDispatchMode. Therefore, + # when creating the output node, it fails to associate the wrapped tensor with its proxy. + # Instead, it will create _tensor_constant as output. + + with suspend_functionalization(), disable_functional_mode(): + with disable_proxy_modes_tracing(): + + def _from_fun(t): + if isinstance(t, torch.Tensor): + if t.dtype != torch.bool: + return torch.empty_strided( + t.size(), + t.stride(), + dtype=t.dtype, + requires_grad=t.requires_grad, + ) + else: + # clone of a functional tensor produces a functional tensor + # but we want to avoid it so we clone a non-functional version + maybe_unfunc_t = t + if isinstance(t, FunctionalTensor): + torch._sync(t) + maybe_unfunc_t = from_fun(t) + elif torch._is_functional_tensor(t): + # need to handle both types of functionalization here: + # these are the tensors that came from the user, + # which could be either FunctionalTensorWrapper or FunctionalTensor + torch._sync(t) + maybe_unfunc_t = torch._from_functional_tensor(t) + return maybe_unfunc_t.clone() + return t + + unwrapped_mapped_xs = pytree.tree_map(_from_fun, mapped_xs) + example_xs = _unstack_pytree(unwrapped_mapped_xs)[0] + + example_pos_args = [ + _from_fun(arg) if isinstance(arg, torch.Tensor) else arg + for arg in pos_args + ] + example_flat_out = pytree.tree_map( + _from_fun, f(*example_xs, *example_pos_args) + ) + if any( + not isinstance(out, torch.Tensor) + for out in example_flat_out + if out is not None + ): + raise RuntimeError( + "Expect outputs of map only contains tensors or None. " + f"Got types {[type(out) for out in example_flat_out]}." + ) + example_grad = [_from_fun(out) for out in example_flat_out] + + fw_graph = make_fx(f)(*example_xs, *example_pos_args) + + def joint_f(*example_args): + joint_mapped_args = example_args[:joint_num_mapped] + args = example_args[joint_num_mapped:] + + mapped_input = joint_mapped_args[:num_mapped_args] + mapped_grads = joint_mapped_args[num_mapped_args:] + + def fw_with_masks(*args): + fw_out = f(*args) + return fw_out, [ + True + if isinstance(ret, torch.Tensor) and ret.requires_grad + else False + for ret in fw_out + ] + + joint = create_joint(fw_with_masks, aot_config=dummy_aot_config) + _, grads = joint( + list(mapped_input) + list(args), + [ + grad + for grad in mapped_grads + if grad is not None and grad.requires_grad + ], + ) + + # In order to keep map functional for backward graph, + # we clone outputs that are aliasing inputs + input_storage = { + StorageWeakRef(arg._typed_storage()) + for arg in example_args + if isinstance(arg, torch.Tensor) + } + + def maybe_clone(t): + if ( + isinstance(t, torch.Tensor) + and StorageWeakRef(t._typed_storage()) in input_storage + ): + return t.clone() + return t + + return pytree.tree_map(maybe_clone, grads) + + joint_num_mapped = len(example_grad) + len(example_xs) + joint_graph = make_fx(joint_f)(*example_xs, *example_grad, *example_pos_args) + return fw_graph, joint_graph + + +def map_wrapper(f, xs, *args): + flat_xs, xs_spec = pytree.tree_flatten(xs) + if not all(isinstance(t, torch.Tensor) for t in flat_xs): + raise RuntimeError(f"Mapped xs can only consist of tensors. Got xs {flat_xs}.") + + num_mapped_args = len(flat_xs) + shapes = [xs.shape for xs in flat_xs] + leading_dim_size = shapes[0][0] + if leading_dim_size == 0: + raise RuntimeError("Leading dimensions of mapped xs cannot be 0.") + + if any(cur_shape[0] != leading_dim_size for cur_shape in shapes): + raise RuntimeError( + f"Leading dimensions of mapped xs must be consistent. Got shapes {shapes}." + ) + + out_spec = None + + def flat_fn(*flat_args): + xs = pytree.tree_unflatten(list(flat_args[:num_mapped_args]), xs_spec) + unflattened_out = f(xs, *flat_args[num_mapped_args:]) + flat_out, tmp_out_spec = pytree.tree_flatten(unflattened_out) + + nonlocal out_spec + out_spec = tmp_out_spec + return flat_out + + return pytree.tree_unflatten( + map_impl(flat_fn, flat_xs, args), out_spec # type: ignore[arg-type] + ) + + +class MapAutogradOp(torch.autograd.Function): + @staticmethod + def forward(ctx, fw_graph, joint_graph, num_mapped_args, *flat_args): + ctx.save_for_backward(*flat_args) + ctx._joint_graph = joint_graph + ctx._num_mapped_args = num_mapped_args + with torch._C._AutoDispatchBelowAutograd(): + return ( + *map_impl( + fw_graph, flat_args[:num_mapped_args], flat_args[num_mapped_args:] + ), + ) + + @staticmethod + def backward(ctx, *flat_grads): + fw_args = ctx.saved_tensors + fw_mapped_args = fw_args[: ctx._num_mapped_args] + pos_args = fw_args[ctx._num_mapped_args :] + + grads = map_impl( + ctx._joint_graph, + fw_mapped_args + flat_grads, + pos_args, + ) + return None, None, None, *grads + + +def trace_map(proxy_mode, func_overload, f, xs, pos_args): + leading_dim_size = xs[0].shape[0] + + example_input = _unstack_pytree(xs)[0] + body_graph = f + + pre_dispatch = getattr(proxy_mode, "pre_dispatch", False) + body_graph = reenter_make_fx(body_graph, pre_dispatch)(*example_input, *pos_args) + + next_name = None + i = 0 + while not next_name: + candidate = f"body_graph_{i}" + if hasattr(proxy_mode.tracer.root, candidate): + i += 1 + else: + next_name = candidate + + proxy_mode.tracer.root.register_module(next_name, body_graph) + + with disable_proxy_modes_tracing(): + example_outs = body_graph(*example_input, *pos_args) + + def expand_tensor(t): + if isinstance(t, torch.Tensor): + return t.expand(leading_dim_size, *t.shape) + return t + + expanded_outs = pytree.tree_map(expand_tensor, example_outs) + + node_args = (body_graph, list(xs), list(pos_args)) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="map_impl" + ) + return track_tensor_tree( + expanded_outs, out_proxy, constant=None, tracer=proxy_mode.tracer + ) + + +def _unstack_pytree(xs): + flat_xs, inspec = pytree.tree_flatten(xs) + if not all(isinstance(xs, torch.Tensor) for xs in flat_xs): + raise RuntimeError(f"Leaves of xs must be Tensor {flat_xs}") + + if not all(xs.shape[0] == flat_xs[0].shape[0] for xs in flat_xs): + raise RuntimeError( + f"Leaves of xs must have same leading dimension size {[xs.shape for xs in flat_xs]}" + ) + + a = zip(*flat_xs) + + pytrees = [] + for tuple in a: + pytrees.append(pytree.tree_unflatten(tuple, inspec)) + return pytrees + + +def _stack_pytree(pytrees): + flat_out = [] + out_spec = None + for pt in pytrees: + flat_pt, out_spec = pytree.tree_flatten(pt) + flat_out.append(flat_pt) + assert out_spec is not None + b = zip(*flat_out) + stacked_out = [] + for leaves in b: + if all(isinstance(leaf, torch.Tensor) for leaf in leaves): + stacked_out.append(torch.stack(leaves)) + elif all(leaf is None for leaf in leaves): + # Backward graph can return None output when forward inputs doesn't require grad. + # When we eagerly execute backward graph, we need to call _stack_pytree on its output, + # therefore we need to deal with None output. + stacked_out.append(None) # type: ignore[arg-type] + else: + raise RuntimeError(f"Cannot stack {leaves}.") + return pytree.tree_unflatten(stacked_out, out_spec) + + +@map_impl.py_impl(DispatchKey.CompositeExplicitAutograd) +def map_dense(f, xs, pos_args): + pytrees = [] + for inp in _unstack_pytree(xs): + pytrees.append(f(*inp, *pos_args)) + return _stack_pytree(pytrees) + + +@map_impl.py_impl(DispatchKey.Autograd) +def map_autograd(f, xs, pos_args): + num_mapped_args = len(xs) + fw_graph, bw_graph = create_fw_bw_graph(f, num_mapped_args, *xs, *pos_args) + flat_out = MapAutogradOp.apply(fw_graph, bw_graph, num_mapped_args, *xs, *pos_args) + return flat_out + + +@map_impl.py_impl(ProxyTorchDispatchMode) +def map_proxy_torch_dispatch_mode(mode, f, xs, args): + if mode.enable_tracing: + return trace_map(mode, map_impl, f, xs, args) + else: + return map_impl(f, xs, args) + + +@map_impl.py_impl(FakeTensorMode) +def map_fake_tensor_mode(mode, f, xs, args): + with mode: + return map_dense(f, xs, args) + + +@map_impl.py_functionalize_impl +def map_functionalize(ctx, f, xs, pos_args): + unwrapped_xs = ctx.unwrap_tensors(xs) + unwrapped_args = ctx.unwrap_tensors(pos_args) + wrapped_fn = ctx.functionalize(f) + + with ctx.redispatch_to_next(): + with disable_proxy_modes_tracing(): + example_inputs = (*_unstack_pytree(unwrapped_xs)[0], *unwrapped_args) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + if _has_potential_branch_input_mutation( + f, example_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException("torch.map is mutating the input!") + + if _has_potential_branch_input_alias( + f, example_inputs, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException("torch.map is aliasing the input!") + + map_return = map_impl(wrapped_fn, unwrapped_xs, unwrapped_args) + return ctx.wrap_tensors(map_return) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..f675519ee18294bca908f29ccd553f05f4cd0971 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py @@ -0,0 +1,170 @@ + +import torch +import torch.utils._pytree as pytree +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, + maybe_handle_decomp, +) +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch._prims_common import elementwise_dtypes, ELEMENTWISE_TYPE_PROMOTION_KIND +from torch._higher_order_ops.utils import autograd_not_implemented + +# TODO to figure out a more generic approach +ALLOWABLE_OPS = [ + torch.ops.aten.linear.default, + torch.ops.aten.mm.default, + torch.ops.aten.conv2d.default, + torch.ops.aten.convolution.default, + torch.ops.aten.mul.Tensor, + torch.ops.aten.mul.Scalar, + torch.ops.aten.div.Tensor, + torch.ops.aten.div.Scalar, +] + + +class OutDtypeOperator(HigherOrderOperator): + """ + The out_dtype operator takes an existing ATen functional operator, an + `out_dtype` argument, and arguments to the original operator, and executes + the original operator and returns a Tensor with the `out_dtype` precision. + This operator does not mandate a compute precision so it allows the + representation to not be opinionated about the exact implementation. + + The general implementation for all operators will be the following: + 1. Promote inputs dtypes based on default PyTorch dtype promotion rules, + using the dtypes of all input Tensors/Scalars and the `out_dtype` + arugument. + 2. Execute the operator + 3. Cast the output to `out_dtype` + """ + + + def __init__(self): + super().__init__("out_dtype") + # TODO(ydwu4): Subclassing HigherOrderOperator causes __module__ to + # become different (torch._higher_order_ops.out_dtype) which will result + # in torch.fx to record the op incorrectly in the graph. + self.__module__ = "torch.ops.higher_order" + + def __call__(self, op, output_dtype, *args): + if not isinstance(op, torch._ops.OpOverload): + raise ValueError("out_dtype's first argument must be an OpOverload") + if op._schema.is_mutable: + raise ValueError("out_dtype's first argument needs to be a functional operator") + if not ( + len(op._schema.returns) == 1 and + isinstance(op._schema.returns[0].type, torch.TensorType) + ): + raise ValueError( + "out_dtype's can only apply to ops that return a single tensor" + f"Instead got {[r.type for r in op._schema.returns]}" + ) + + if op not in ALLOWABLE_OPS: + raise ValueError( + f"out_dtype only allows the following operators: {ALLOWABLE_OPS}." + ) + + res = super().__call__(op, output_dtype, *args) + + return res + + +out_dtype = OutDtypeOperator() + +def trace_out_dtype(proxy_mode, func_overload, op, output_dtype, *args): + # NB: Long-term we should put the decomposition logic into + # ProxyTorchDispatchMode so that people do not need to call maybe_handle_decomp + # in all HigherOrderOp proxy implementations. + r = maybe_handle_decomp(proxy_mode, func_overload, (op, output_dtype, *args), {}) + if r is not NotImplemented: + return r + + with disable_proxy_modes_tracing(): + # This is a simplified implementation of this operator just for tracing. + # Actual implementation may also first promote the arguments + out = op(*args).to(dtype=output_dtype) + + node_args = (op, output_dtype, *args) + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", func_overload, proxy_args, {}, name="out_dtype" + ) + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@out_dtype.py_impl(DispatchKey.CompositeExplicitAutograd) +def out_dtype_dense( + op: torch._ops.OpOverload, + output_dtype: torch.dtype, + *args +): + if is_int_mm(op, output_dtype, args): + return torch._int_mm(*args) + return out_dtype_fallback(op, output_dtype, *args) + + +def is_int_mm(op, output_dtype, args): + return ( + op == torch.ops.aten.mm.default and + output_dtype == torch.int32 and + len(args) == 2 and + args[0].dtype == torch.int8 and + args[1].dtype == torch.int8 and + args[0].is_cuda and + args[1].is_cuda + ) + + +def out_dtype_fallback(op, output_dtype, *args): + flat_inputs = pytree.arg_tree_leaves(*args) + [torch.ones(1, dtype=output_dtype)] + promote_dtype: torch.dtype = elementwise_dtypes( + *flat_inputs, + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + )[0] + + casted_args = pytree.tree_map_only( + torch.Tensor, lambda arg: arg.to(dtype=promote_dtype), args + ) + res = op(*casted_args).to(dtype=output_dtype) + return res + + +out_dtype.py_impl(DispatchKey.Autograd)(autograd_not_implemented(out_dtype, deferred_error=True)) + + +@out_dtype.py_impl(ProxyTorchDispatchMode) +def out_dtype_proxy( + mode: ProxyTorchDispatchMode, + op: torch._ops.OpOverload, + output_dtype: torch.dtype, + *args +): + if mode.enable_tracing: + return trace_out_dtype(mode, out_dtype, op, output_dtype, *args) + else: + return out_dtype(op, output_dtype, *args) + + +@out_dtype.py_impl(FakeTensorMode) +def out_dtype_fake_tensor_mode( + mode: FakeTensorMode, + op: torch._ops.OpOverload, + output_dtype: torch.dtype, + *args +): + with mode: + return out_dtype_dense(op, output_dtype, *args) + + +@out_dtype.py_functionalize_impl +def out_dtype_func(ctx, op, output_dtype, *args): + unwrapped_args = tuple(ctx.unwrap_tensors(arg) for arg in args) + + with ctx.redispatch_to_next(): + res = out_dtype(op, output_dtype, *unwrapped_args) + return ctx.wrap_tensors(res) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..57e319230a4abc1bb2dec0763f098023a43dda40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py @@ -0,0 +1,100 @@ +import torch +import torch._subclasses.functional_tensor + +import torch.utils._pytree as pytree + +from torch._C import DispatchKey +from torch._functorch.utils import exposed_in + +from torch._higher_order_ops.utils import _set_compilation_env, autograd_not_implemented +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + make_fx, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.utils._python_dispatch import _get_current_dispatch_mode + + +@exposed_in("torch") +def strict_mode(callable, operands): + if torch.compiler.is_dynamo_compiling(): + return strict_mode_op(callable, operands) + + with _set_compilation_env(): + with torch._dynamo.utils.disable_cache_limit(): + return torch.compile(strict_mode_op, backend="eager", fullgraph=True)( + callable, operands + ) + + +strict_mode_op = HigherOrderOperator("strict_mode") + + +@strict_mode_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def strict_mode_op_dense(callable, operands): + mode = _get_current_dispatch_mode() + assert mode is None, "Mode should never be enabled for CPU/CUDA key" + return callable(*operands) + + +strict_mode_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(strict_mode_op, deferred_error=True) +) + + +@strict_mode_op.py_impl(ProxyTorchDispatchMode) +def inner(mode, callable, operands): + if mode.enable_tracing: + return trace_strict_mode(mode, strict_mode_op, callable, operands) + else: + return strict_mode_op(callable, operands) + + +def trace_strict_mode(mode, strict_mode_op, callable, operands): + pre_dispatch = getattr(mode, "pre_dispatch", False) + + with disable_proxy_modes_tracing(): + graph = make_fx(callable, pre_dispatch=pre_dispatch)(*operands) + + next_name = None + i = 0 + while not next_name: + candidate = f"strict_graph_{i}" + if hasattr(mode.tracer.root, candidate): + i += 1 + else: + next_name = candidate + + graph_name = next_name + mode.tracer.root.register_module(graph_name, graph) + + args = (graph, operands) + + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args) + + out_proxy = mode.tracer.create_proxy( + "call_function", strict_mode_op, proxy_args, {}, name="strict_mode" + ) + + out = graph(*operands) + return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + + +@strict_mode_op.py_impl(FakeTensorMode) +def strict_mode_fake_tensor_mode(mode, callable, operands): + with mode: + true_outs = callable(*operands) + return true_outs + + +@strict_mode_op.py_functionalize_impl +def strict_mode_func(ctx, callable, inputs): + unwrapped_inputs = ctx.unwrap_tensors(inputs) + with ctx.redispatch_to_next(): + functional_callable = ctx.functionalize(callable) + + cond_return = strict_mode_op(functional_callable, unwrapped_inputs) + return ctx.wrap_tensors(cond_return) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py new file mode 100644 index 0000000000000000000000000000000000000000..89b94561affddf51b5992ab30d52c9c930668a3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py @@ -0,0 +1,842 @@ +import dataclasses +import logging +import threading +import warnings +from collections import defaultdict +from typing import Any, Dict, List, Optional, Union + +import torch.utils._pytree as pytree +from torch import Tensor +from torch._C import DispatchKey +from torch._ops import HigherOrderOperator +from torch._prims_common import clone_preserve_strides +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + +log = logging.getLogger("torch._dynamo") + + +############################################################################### +# Kernel Side Table + + +# We cannot put Triton Kernels into the FX graph as the graph nodes +# do not support arbitrary functions. +# Use a side table. +# We use two dicts so that fetching both the kernel and id are O(1) +class KernelSideTable: + id_to_kernel: Dict[int, Any] = dict() + kernel_to_id: Dict[Any, int] = dict() + lock = threading.Lock() + + # Returns index on the table + def add_kernel(self, kernel) -> int: + with self.lock: + if kernel in self.kernel_to_id: + return self.kernel_to_id[kernel] + + idx = len(self.id_to_kernel) + self.id_to_kernel[idx] = kernel + self.kernel_to_id[kernel] = idx + return idx + + # Returns the triton kernel at the given index + def get_kernel(self, idx: int): + # No need to lock here as fetching from dict is atomic + assert idx in self.id_to_kernel + return self.id_to_kernel[idx] + + # Resets the table (only meant to be used in unit tests) + # This is only safe assuming single threaded execution + def reset_table(self) -> None: + self.id_to_kernel = dict() + self.kernel_to_id = dict() + + +kernel_side_table = KernelSideTable() + + +############################################################################### +# Mutation Tracker + + +@dataclasses.dataclass(frozen=True) +class Param: + idx: int + + +@dataclasses.dataclass(frozen=True) +class Intermediate: + idx: int + + def fake(self): + return self.idx < 0 + + +@dataclasses.dataclass(frozen=True) +class Op: + name: str + fn_call_name: Optional[str] + args: List[Union[Param, Intermediate]] + ret: Intermediate = dataclasses.field(repr=False) + + def __post_init__(self): + if self.name == "tt.call": + assert self.fn_call_name is not None + else: + assert self.fn_call_name is None + + +def generate_ttir(kernel, kwargs): + """ + Uses Triton's internal code generation to create TTIR + """ + from triton.compiler.compiler import ASTSource + from triton.runtime.autotuner import Autotuner + from triton.runtime.jit import JITFunction + + import torch + from torch._subclasses.fake_tensor import FakeTensor + + if isinstance(kernel, Autotuner): + if len(kernel.configs) > 0: + # If we are autotuning, then it doesn't matter which version gets + # picked for tracing purposes, so lets pick the first one + kwargs = {**kwargs, **kernel.configs[0].kwargs} + kernel = kernel.fn + + assert isinstance(kernel, JITFunction) + + if len(kwargs) != len(kernel.arg_names): + raise Exception("Incorrect number of arguments passed to kernel") + + # Replace all SymExprs with a regular value for TTIR generation + # Replace all FakeTensor with real tensors + # These replacements are needed for triton's type, key and config functions + ordered_args: Dict[str, Any] = {} + for name in kernel.arg_names: + a = kwargs[name] + if isinstance(a, (torch.SymInt, torch.SymFloat, torch.SymBool)): + ordered_args[name] = 2 + elif isinstance(a, FakeTensor): + ordered_args[name] = torch.empty(2, dtype=a.dtype) + else: + ordered_args[name] = a + + ordered_tensor_names = [ + name for name, arg in ordered_args.items() if isinstance(arg, Tensor) + ] + specialization = kernel._get_config(*ordered_args.values()) + constants = { + i: arg + for i, arg in enumerate(ordered_args.values()) + if not isinstance(arg, Tensor) + } + + # Build kernel signature -- doesn't include constexpr arguments. + signature = { + i: kernel._type_of(kernel._key_of(arg)) + for i, arg in enumerate(ordered_args.values()) + if i not in kernel.constexprs + } + + def get_backend(): + from triton.compiler.backends.cuda import CUDABackend + from triton.runtime.driver import driver + + target = driver.get_current_target() + return CUDABackend(target) + + backend = get_backend() + + options = backend.parse_options(dict()) + # triton._C.libtriton.triton.ir.load_dialects(context) + # backend.load_dialects(context) + + src = ASTSource(kernel, signature, constants, specialization) + ttir_module = src.make_ir(options) + if not ttir_module.verify(): + raise Exception("Verification for TTIR module has failed") + + return ttir_module, ordered_tensor_names + + +def ttir_to_functions(ttir_module) -> Dict[str, Dict[Intermediate, List[Op]]]: + """ + Walk the `ttir_module` bottom up to mine the `functions` from + the structured MLIR entities representing the Triton kernel + (mlir::Operation, mlir::Block, mlir::Region). + """ + functions: Dict[str, Dict[Intermediate, List[Op]]] = {} + + # block id --> op result (Intermediate) --> one or more ops + op_stack: Dict[int, Dict[Intermediate, List[Op]]] = defaultdict( + lambda: defaultdict(list) + ) + region_id_to_block_ids: Dict[int, List[int]] = defaultdict(list) + block_id_to_block_arg_ids: Dict[int, List[int]] = {} + replacements: Dict[int, Union[Intermediate, Param]] = {} + reindex_map: Dict[int, int] = {} + next_fake_intermediate = 0 + + def reindex(idx): + if idx not in reindex_map: + reindex_map[idx] = len(reindex_map) + return reindex_map[idx] + + def mlir_to_functions(op) -> None: + name: str = op.get_name() + if name == "builtin.module": + # this wraps all tt.func ops + return + + operand_ids: List[int] = [ + reindex(op.get_operand(i).id()) for i in range(op.get_num_operands()) + ] + result_ids: List[int] = [ + reindex(op.get_result(i).id()) for i in range(op.get_num_results()) + ] + + child_block_ids: List[int] = [] + for i in [op.get_region(i).id() for i in range(op.get_num_regions())]: + # as the walk is bottom-up, the region_id_to_block_ids[i] + # must be populated by the time we process the enclosing op + child_block_ids.extend(region_id_to_block_ids[i]) + + parent_block_id = -1 + parent_block = op.get_block() + if parent_block is not None: + parent_block_id = parent_block.id() + if parent_block_id not in block_id_to_block_arg_ids: + block_id_to_block_arg_ids[parent_block_id] = [] + for i in range(parent_block.get_num_arguments()): + block_id_to_block_arg_ids[parent_block_id].append( + reindex(parent_block.get_argument(i).id()), + ) + # the region info is collected via ops' parent blocks to be + # used later when the region's encloding op is traversed + parent_region = parent_block.get_parent() + if parent_region is not None: + region_id_to_block_ids[parent_region.id()].append(parent_block_id) + + nonlocal next_fake_intermediate + + if name == "tt.func": + # for function ops: gather and inline + # the ops from all child blocks + fn_ops = defaultdict(list) + for child_block_id in child_block_ids: + for result, block_fn_ops in op_stack.pop(child_block_id).items(): + for block_fn_op in block_fn_ops: + fn_ops[result].append(block_fn_op) + + # replace the corresponding Intermediates in the + # child op args with the function args (Params) + for i, idx in enumerate(block_id_to_block_arg_ids[child_block_ids[0]]): + replacements[idx] = Param(i) + + for fn_op_list in fn_ops.values(): + for fn_op in fn_op_list: + for i in range(len(fn_op.args)): + arg = fn_op.args[i] + if isinstance(arg, Intermediate) and arg.idx in replacements: + fn_op.args[i] = replacements[arg.idx] + + # next function capture starts + # with empty replacements + replacements.clear() + + fn_name = op.get_str_attr("sym_name") + functions[fn_name] = fn_ops + elif child_block_ids: + if name in ("scf.if", "scf.for", "scf.while"): + # for blocked control flow ops: inline the enclosed + # ops into the parent block + rewire the last op in + # each child block (yield) to return the scf result + yield_ops = [] + for block_id in child_block_ids: + # the block args used as operands of the ops in the block + # (and nested blocks inlined in the current block by now) + # are replaced by new fake Intermediates to avoid "this + # operand is not returned by anything other op in the fn" + # error in the downstream analysis + for idx in block_id_to_block_arg_ids[block_id]: + next_fake_intermediate -= 1 + replacements[idx] = Intermediate(next_fake_intermediate) + + if block_id in op_stack: + block_ops = op_stack.pop(block_id) + if not block_ops: + continue + last_ret, last_ops = block_ops.popitem() + if all(op.name == "scf.yield" for op in last_ops): + # if last_ops are scf.yield, treat them separately + yield_ops.extend(last_ops) + else: + # otherwise, return last_ops to the block + block_ops[last_ret] = last_ops + for op_result, child_ops in block_ops.items(): + op_stack[parent_block_id][op_result].extend(child_ops) + + scf_results = [Intermediate(idx) for idx in result_ids] + for scf_result in scf_results: + for yield_op in yield_ops: + op_stack[parent_block_id][scf_result].append(yield_op) + else: + # TODO(oulgen): add support for tt.reduce + raise Exception( + f"Unknown blocked function: {name}. Can't capture the TTIR." + ) + else: + callee = None + if name == "tt.call": + callee = op.get_flat_symbol_ref_attr("callee") + args: List[Union[Param, Intermediate]] = [ + Intermediate(operand) for operand in operand_ids + ] + block_ops = op_stack[parent_block_id] + if result_ids: + for result_id in result_ids: + res = Intermediate(result_id) + block_ops[res].append(Op(name, callee, args, res)) + else: + next_fake_intermediate -= 1 + fake_res = Intermediate(next_fake_intermediate) + block_ops[fake_res].append(Op(name, callee, args, fake_res)) + + ttir_module.walk(mlir_to_functions) + + return functions + + +def parse_ttir(ttir, kwargs): + """ + Given a Triton emitted TTIR text, this function lexes and parses the + code using a minimal grammar defined inside. During the lexing/parsing, + we drop any constant value and type information as they are not + necessary to us. + Being able to choose what we need makes this not a general purpose TTIR + parser which further makes parsing much simpler. + """ + # TODO(oulgen): + # - Support closures (e.g. "tt.reduce") + + try: + import lark # type: ignore[import-not-found] + from lark import Lark, Transformer, v_args + except ModuleNotFoundError: + warnings.warn( + "Using slow path for user-defined Triton kernels. `pip install lark` to fix this." + ) + raise + + # Ops looks like one of the following forms: + # + # %14 = tt.addptr %13, %4 : tensor<4x!tt.ptr>, tensor<4xi32> + # tt.store %14, %12, %5 {cache = 1 : i32, evict = 1 : i32} : tensor<4xf32> + # %15 = "tt.atomic_rmw"(%14, %12, %5) <{atomic_rmw_op = 5 : i32, scope = 1 : i32, sem = 4 : i32}> : (tensor<4x!tt.ptr>, tensor<4xf32>, tensor<4xi1>) -> tensor<4xf32> # noqa: B950 + grammar = """ + start: (module_block | loc_line)+ + + loc_line: "#loc" /.+/ NEWLINE + + module_block: "module" "{" func_block+ "}" LOC + + func_block: "tt.func" ("public"|"private") FN_NAME "(" /.+/ NEWLINE stmt* "}" LOC -> process_func + + ?stmt: op | if | for | while | condition_stmt | label_stmt | cf_stmt + + if: [assign_lhs "="] "scf.if" args rest stmt* "}" "else" "{" stmt* "}" LOC -> process_if + for: [assign_lhs "="] "scf.for" args rest stmt* "}" divisibility_annot? LOC -> process_for + while: [assign_lhs "="] "scf.while" args rest stmt* "}" "do" "{" stmt* "}" LOC -> process_while + + condition_stmt: "scf.condition" "(" arg ")" args rest + label_stmt: LABEL ":" "// pred:" LABEL + | LABEL "(" /.+/ NEWLINE + cf_stmt: "cf" "." NAME /.+/ NEWLINE + + op: OP_NAME LOC + | [assign_lhs "="] OP_NAME [FN_NAME] args rest? -> process_op + + ?rest: (":" | "{" | "\\"" | "->" | "<" | "=") /.+/ NEWLINE + divisibility_annot: "{" "tt.divisibility_arg1" /[^}]+/ "}" + + args: | "(" ")" | "("? arg ("," arg)* ")"? + + ?arg: INTERMEDIATE + | INTERMEDIATE_CONSTANT + | CONSTANT + | PARAM + | "[" args "]" + | arg_with_index + + ?arg_with_index: arg "#" DIGIT+ + + ?assign_lhs: (INTERMEDIATE | INTERMEDIATE_CONSTANT) [":" DIGIT+] + + PARAM.5: "%arg" DIGIT+ + INTERMEDIATE.4: "%" DIGIT+ + INTERMEDIATE_CONSTANT.3: "%" NAME + CONSTANT: FLOAT | DIGIT+ | NAME ("<" DIGIT+ ">")? + LABEL: "^bb" DIGIT+ + + NAME: (LETTER | DIGIT | "_")+ + NON_CF_NAME: /(?!(cf))/ NAME + FN_NAME: "@" (NAME | ESCAPED_STRING) + OP_NAME: "\\""? NON_CF_NAME ("." NAME)+ "\\""? + + LOC.5: "loc(#loc" DIGIT* ")" + + %import common.LETTER + %import common.DIGIT + %import common.WS + %import common.NEWLINE + %import common.ESCAPED_STRING + %import common.FLOAT + %ignore WS + """ + + next_fake_intermediate = 0 + + def convert(token): + if isinstance(token, lark.tree.Tree): + if token.data == "args": + res = [] + for a in token.children: + c = convert(a) + if isinstance(c, list): + res.extend(c) + else: + res.append(c) + return res + elif token.data in {"assign_lhs", "arg_with_index"}: + # Drop length/index qualifier + return convert(token.children[0]) + else: + raise AssertionError(f"Tree node with {token.data}") + + if token is None or ( + isinstance(token, lark.lexer.Token) + and token.type in ("CONSTANT", "INTERMEDIATE_CONSTANT") + ): + nonlocal next_fake_intermediate + next_fake_intermediate -= 1 + return Intermediate(next_fake_intermediate) + + assert isinstance(token, lark.lexer.Token) + + if token.type == "INTERMEDIATE": + return Intermediate(int(token.value[len("%") :])) + if token.type == "PARAM": + return Param(int(token.value[len("%arg") :])) + + raise AssertionError(f"{type(token.type)} => {token.value} invalid") + + # In alternative representation, function names are quoted. + # It should be possible to move this into the grammar alltogether. + def convert_name(token): + if token is None: + return None + s = token.value + if len(s) > 2 and s[0] == '"' and s[-1] == '"': + return s[1:-1] + return s + + functions: Dict[str, Dict[Intermediate, List[Op]]] = {} + + def extend_dict_list(d1, d2): + for key, values in d2.items(): + d1[key].extend(values) + + @v_args(inline=True) + class TransformOps(Transformer): + def process_op(self, ret, op_name, fn_name, args, *rest): + return Op( + convert_name(op_name), + convert_name(fn_name), + convert(args), + convert(ret), + ) + + def process_func(self, name, _args, *stmts): + ops: Dict[Intermediate, List[Op]] = defaultdict(list) + for e in stmts: + if isinstance(e, Op): + ops[e.ret].append(e) + elif isinstance(e, dict): + extend_dict_list(ops, e) + functions[name.value] = ops + + def _process_scf(self, ret, stmts): + ret = convert(ret) + ops: Dict[Intermediate, List[Op]] = defaultdict(list) + for e in stmts: + if isinstance(e, Op): + if e.name == "scf.yield": + ops[ret].append(Op(e.name, None, e.args, ret)) + else: + ops[e.ret].append(e) + elif isinstance(e, dict): + extend_dict_list(ops, e) + return ops + + def process_if(self, ret, _args, _rest, *stmts): + return self._process_scf(ret, stmts) + + def process_for(self, ret, _args, _rest, *stmts): + return self._process_scf(ret, stmts) + + def process_while(self, ret, _args, _rest, *stmts): + return self._process_scf(ret, stmts) + + parser = Lark( + grammar, parser="lalr", maybe_placeholders=True, transformer=TransformOps() + ) + parser.parse(ttir) + return functions + + +class MemoizeWithCycleCheck: + def __init__(self, fn): + self.fn = fn + self.reset() + + def __call__(self, functions, fn_name, num_args): + key = (fn_name, num_args) + if key not in self.cache: + self.cache[key] = None + self.cache[key] = self.fn(functions, fn_name, num_args) + if self.cache[key] is None: + raise Exception("Recursion is not supported") + return self.cache[key] + + def reset(self): + self.cache = {} + + +@MemoizeWithCycleCheck +def analyze_kernel_mutations(functions, fn_name, num_args): + """ + Analyzes the graph to detect all sinks from a predefined list of sinks + by using triton's MemWrite trait list. NOTE: What if triton exposed this? + From each sink, it traverses the CFG backwards to identify all the input + pointers that are mutated. + """ + # Name of mutation op to mutated parameter indices + # List from Triton Github include/triton/Dialect/Triton/IR/TritonOps.td + # All the OPs that have MemWrite trait. + # What if Triton exposed this? + MUTATION_OPS = {"tt.store": [0], "tt.atomic_cas": [0], "tt.atomic_rmw": [0]} + # Ops that we want to bail out on + UNKNOWN_OPS = {"tt.elementwise_inline_asm"} + + stack: List[Union[Param, Intermediate]] = [] + visited = set() + ops = functions[fn_name] + for op_list in ops.values(): + for op in op_list: + if op.name in UNKNOWN_OPS: + raise Exception( + f"ttir analysis hit an op we do not know how to analyze: {op.name}" + ) + + if op.name == "tt.call": + assert op.fn_call_name in functions + mutations = analyze_kernel_mutations( + functions, op.fn_call_name, len(op.args) + ) + stack.extend(arg for arg, mutated in zip(op.args, mutations) if mutated) + else: + for idx in MUTATION_OPS.get(op.name, []): + stack.append(op.args[idx]) + + # The following is an iterative DFS algorithm + mutated = [False] * num_args + while stack: + arg = stack.pop() + if arg in visited: + continue + + visited.add(arg) + + if isinstance(arg, Param): + if arg.idx >= num_args: + # This is an argument defined in the kernel, not passed in + continue + mutated[arg.idx] = True + elif isinstance(arg, Intermediate) and not arg.fake(): + for op in ops[arg]: + # Skip arguments to load + if op.name != "tt.load": + stack.extend(op.args) + return mutated + + +def identify_mutated_tensors(kernel, kwargs): + """ + Given a triton kernel and the arguments for this kernel, this function + 1) Retrieves the TTIR converted version of the kernel from Triton's API. + 2) Parses the TTIR and creates a control flow graph + 3) Analyzes the graph to detect all input tensor mutations + """ + + ttir_module = None + functions = None + try: + from torch._dynamo import config + + if not config.optimize_user_defined_triton_kernels: + raise Exception("optimize_user_defined_triton_kernels is False") + + ttir_module, ordered_tensor_names = generate_ttir(kernel, kwargs) + + # extract functions from TTIR + if hasattr(ttir_module, "walk"): + # use MLIR bindings exposed by Triton code + functions = ttir_to_functions(ttir_module) + else: + # parse string representation of Triton IR + functions = parse_ttir(str(ttir_module), kwargs) + + assert functions is not None + kernel_name = next(iter(functions.keys())) + # Triton codegen modifies the name + assert kernel.fn.__name__ in kernel_name + # Reset the cache between top level invocations + # The cache for analyze kernel mutations is mainly used for cycle + # detection, so each top level invocation needs a clean cache + analyze_kernel_mutations.reset() + mutations = analyze_kernel_mutations( + functions, kernel_name, len(ordered_tensor_names) + ) + + return [ + ordered_tensor_names[i] for i, mutated in enumerate(mutations) if mutated + ] + except Exception as e: + import traceback + + warnings.warn( + "Encountered an exception in identify_mutated_tensors, " + "assuming every input is mutated:\n" + "".join( + traceback.TracebackException.from_exception(e).format() # noqa: G001 + ) + ) + if ttir_module is not None: + log.debug("TTIR:\n%s", str(ttir_module)) + if functions is not None: + log.debug("functions:") + for name, fn in functions.items(): + log.debug("===\t%s\t===", name) + for ret, ops in fn.items(): + log.debug("%s\t=>\t%s", ret, ops) + return [key for key, value in kwargs.items() if isinstance(value, Tensor)] + + +############################################################################### +# Triton Kernel Wrappers + + +# Used for wrapping a Triton Kernel +class TritonKernelWrapperMutation(HigherOrderOperator): + def __init__(self): + super().__init__("triton_kernel_wrapper_mutation") + + +triton_kernel_wrapper_mutation = TritonKernelWrapperMutation() + + +# Used for wrapping a Triton Kernel in a functional manner +class TritonKernelWrapperFunctional(HigherOrderOperator): + def __init__(self): + super().__init__("triton_kernel_wrapper_functional") + + +triton_kernel_wrapper_functional = TritonKernelWrapperFunctional() + + +@triton_kernel_wrapper_mutation.py_impl(DispatchKey.CompositeExplicitAutograd) +def triton_kernel_wrapper_mutation_dense(*, kernel_idx, grid, kwargs): + from torch._inductor.codegen.wrapper import user_defined_kernel_grid_fn_code + + kernel = kernel_side_table.get_kernel(kernel_idx) + + if len(grid) == 1: + grid_fn = grid[0] + else: + fn_name, code = user_defined_kernel_grid_fn_code( + kernel.fn.__name__, kernel.configs, grid + ) + namespace: Dict[str, Any] = {} + exec(code, namespace) + grid_fn = namespace[fn_name] + + kernel[grid_fn](**kwargs) + + +@triton_kernel_wrapper_mutation.py_impl(FakeTensorMode) +def triton_kernel_wrapper_mutation_fake_tensor_mode(mode, *, kernel_idx, grid, kwargs): + with mode: + return None + + +def trace_triton_kernel_wrapper(proxy_mode, func_overload, node_args): + with disable_proxy_modes_tracing(): + out = func_overload(**node_args) + + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", + func_overload, + (), + proxy_args, + name=func_overload.__name__ + "_proxy", + ) + return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer) + + +@triton_kernel_wrapper_mutation.py_impl(ProxyTorchDispatchMode) +def triton_kernel_wrapper_mutation_proxy_torch_dispatch_mode( + mode, *, kernel_idx, grid, kwargs +): + if mode.enable_tracing: + trace_triton_kernel_wrapper( + mode, + triton_kernel_wrapper_mutation, + {"kernel_idx": kernel_idx, "grid": grid, "kwargs": kwargs}, + ) + else: + triton_kernel_wrapper_mutation(kernel_idx=kernel_idx, grid=grid, kwargs=kwargs) + + return None + + +@triton_kernel_wrapper_mutation.py_functionalize_impl +def triton_kernel_wrapper_mutation_functionalize(ctx, kernel_idx, grid, kwargs): + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + kernel = kernel_side_table.get_kernel(kernel_idx) + # TODO(oulgen): Preexisting bug, if two kernel inputs are views of each + # other, and one gets mutated in kernel, and later another gets mutated, + # they are no longer equal. Fix this by graph breaking on this condition + # earlier in dynamo. + tensors_to_clone = identify_mutated_tensors(kernel, unwrapped_kwargs) + with ctx.redispatch_to_next(): + unwrapped_outputs = triton_kernel_wrapper_functional( + kernel_idx=kernel_idx, + grid=grid, + kwargs=unwrapped_kwargs, + tensors_to_clone=tensors_to_clone, + ) + + assert set(unwrapped_outputs.keys()).issubset(set(kwargs.keys())) + for key, output_arg in unwrapped_outputs.items(): + if not isinstance(output_arg, Tensor): + continue + input_arg = kwargs[key] + assert isinstance(input_arg, Tensor) + + ctx.replace(input_arg, output_arg) + # indicate that above replace is hidden from autograd + ctx.mark_mutation_hidden_from_autograd(input_arg) + ctx.commit_update(input_arg) + ctx.sync(input_arg) + # sync calls replace_ under the hood, so again indicate that + # this indirect replace is hidden from autograd + ctx.mark_mutation_hidden_from_autograd(input_arg) + return None + + +@triton_kernel_wrapper_functional.py_impl(DispatchKey.CompositeExplicitAutograd) +def triton_kernel_wrapper_functional_dense( + *, kernel_idx, grid, kwargs, tensors_to_clone +): + # TODO(oulgen): For performance reasons, we want to ensure that these + # `clone_preserve_strides` calls are never executed at runtime + # (inductor should always optimize them away). + # Requires https://github.com/pytorch/pytorch/issues/109240 + kwargs = { + key: (clone_preserve_strides(val) if key in tensors_to_clone else val) + for key, val in kwargs.items() + } + triton_kernel_wrapper_mutation(kernel_idx=kernel_idx, grid=grid, kwargs=kwargs) + return {key: val for key, val in kwargs.items() if key in tensors_to_clone} + + +@triton_kernel_wrapper_functional.py_impl(FakeTensorMode) +def triton_kernel_wrapper_functional_fake_tensor_mode( + mode, *, kernel_idx, grid, kwargs, tensors_to_clone +): + # TODO(oulgen): For performance reasons, we want to ensure that these + # `clone_preserve_strides` calls are never executed at runtime + # (inductor should always optimize them away). + # Requires https://github.com/pytorch/pytorch/issues/109240 + with mode: + return { + key: clone_preserve_strides(val) + for key, val in kwargs.items() + if key in tensors_to_clone + } + + +@triton_kernel_wrapper_functional.py_impl(ProxyTorchDispatchMode) +def triton_kernel_wrapper_functional_proxy_torch_dispatch_mode( + mode, *, kernel_idx, grid, kwargs, tensors_to_clone +): + if mode.enable_tracing: + return trace_triton_kernel_wrapper( + mode, + triton_kernel_wrapper_functional, + { + "kernel_idx": kernel_idx, + "grid": grid, + "kwargs": kwargs, + "tensors_to_clone": tensors_to_clone, + }, + ) + else: + return triton_kernel_wrapper_functional( + kernel_idx=kernel_idx, + grid=grid, + kwargs=kwargs, + tensors_to_clone=tensors_to_clone, + ) + + +@triton_kernel_wrapper_functional.py_functionalize_impl +def triton_kernel_wrapper_functional_functionalize( + ctx, kernel_idx, grid, kwargs, tensors_to_clone +): + unwrapped_kwargs = ctx.unwrap_tensors(kwargs) + with ctx.redispatch_to_next(): + outputs = triton_kernel_wrapper_functional( + kernel_idx=kernel_idx, + grid=grid, + kwargs=unwrapped_kwargs, + tensors_to_clone=tensors_to_clone, + ) + return ctx.wrap_tensors(outputs) + + +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.ADInplaceOrView) +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.BackendSelect) +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined] +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCUDA) +triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCPU) + +triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.ADInplaceOrView) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.BackendSelect) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined] +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA) +triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCPU) diff --git a/venv/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..2ee4b51f2da6975b50b7df7ba0f3555bd77b21fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py @@ -0,0 +1,232 @@ +import torch +import torch.utils._pytree as pytree + +from torch._C import DispatchKey + +from torch._higher_order_ops.utils import ( + _has_potential_branch_input_alias, + _has_potential_branch_input_mutation, + _set_compilation_env, + autograd_not_implemented, + reenter_make_fx, + UnsupportedAliasMutationException, +) +from torch._ops import HigherOrderOperator +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) + + +class WhileLoopOp(HigherOrderOperator): + def __call__(self, cond_fn, body_fn, operands): + if not isinstance(cond_fn, torch.fx.GraphModule) or not isinstance( + body_fn, torch.fx.GraphModule + ): + raise RuntimeError( + "cond_fn and body_fn must be torch.fx.GraphModule, got " + f"{type(cond_fn)} and {type(body_fn)}" + ) + if not isinstance(operands, tuple): + raise RuntimeError("operands must be a tuple, got " f"{type(operands)}") + if not all(isinstance(t, (torch.Tensor, int, float, bool)) for t in operands): + raise RuntimeError( + "operands must be a tuple of tensors, ints, floats, or bools, got " + f"{operands}" + ) + return super().__call__(cond_fn, body_fn, operands) + + +while_loop_op = HigherOrderOperator("while_loop") + + +def while_loop(cond_fn, body_fn, operands): + r""" + Run body_fn(*operands) while cond_fn(*operands) returns a True scalar tensor. Returns the output of body_fn or + initial operands. + + .. warning:: + `torch.while_loop` is a prototype feature in PyTorch. It has limited support for input and output types and + doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch. + Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype + + `while_loop` is a structured control flow operator. It preserves the loop semantic across the torch.compile and torch.export. + + `while_loop` is equivalent to the following: + + def while_loop(cond_fn, body_fn, operands): + val = operands + while cond_fn(*val): + val = body_fn(*val) + return val + + Args: + cond_fn (Callable): A callable function that returns a boolean Scalar tensor. + + body_fn (Callable): A callable function that takes the same inputs as `cond_fn` and returns a tuple of tensors + + operands (Tuple of possibly nested dict/list/tuple of tensors): A tuple of inputs to cond_fn and body_fn. It's also + the initial value of states that are carried across iterations. + + Example: + + def cond_fn(iter, x): + return iter.sum() < 10 + + def body_fn(iter, x): + return iter + 1, x.sin() + + while_loop(cond_fn, body_fn, (torch.zeros(1), torch.randn(3, 4))) + + Restrictions: + + - body_fn must return tensors with the same metadata (e.g.shape, dtype) as inputs. + + - body_fn and cond_fn must not in-place mutate the operands. A clone before the mutation is required. + + - body_fn and cond_fn must not mutate python varialbles (e.g. list/dict) created outside of the body_fn. + + - body_fn and cond_fn's output cannot aliase any of the inputs. A clone is required. + + .. warning:: + Temporal Limitations: + + - 'while_loop' only supports **inference** right now. Autograd will be supported in the future. + + """ + if torch.compiler.is_dynamo_compiling(): + return while_loop_op(cond_fn, body_fn, operands) + + def _validate_input(cond_fn, body_fn, operands): + if not callable(cond_fn) or not callable(body_fn): + raise RuntimeError("Expect cond_fn and body_fn to be callbale.") + + if not isinstance(operands, (tuple, list)) or pytree.tree_any( + lambda t: not isinstance(t, torch.Tensor), operands + ): + raise RuntimeError( + "Expect operands to be a tuple of possibly nested dict/list/tuple that only" + f"consists of tensor leaves, but got {operands}." + ) + + _validate_input(cond_fn, body_fn, operands) + + with _set_compilation_env(), torch._dynamo.utils.disable_cache_limit(): + return torch.compile(while_loop_op, backend="eager", fullgraph=True)( + cond_fn, body_fn, operands + ) + + +@while_loop_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def while_loop_dense(cond_fn, body_fn, operands): + init_val = operands + + def _is_boolean_scalar_tensor(pred): + return ( + isinstance(pred, torch.Tensor) + and pred.size() == torch.Size([]) + and pred.dtype == torch.bool + ) + + if not isinstance(operands, tuple): + raise RuntimeError(f"operands must be a tuple but got {type(operands)}") + + while pred := cond_fn(*init_val): + if not _is_boolean_scalar_tensor(pred): + raise RuntimeError( + f"cond_fn must return a boolean scalar tensor but got {pred}" + ) + out = body_fn(*init_val) + assert isinstance( + out, tuple + ), f"body_fn should return a tuple but got {type(out)}" + assert len(out) == len( + init_val + ), "body_fn should return the same number of elements as operands" + init_val = out + return init_val + + +while_loop_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(while_loop_op, deferred_error=True) +) + + +@while_loop_op.py_impl(ProxyTorchDispatchMode) +def while_loop_tracing(mode, cond_fn, body_fn, operands): + def _trace_while_loop(proxy_mode, while_loop_op, cond_fn, body_fn, operands): + pre_dispatch = getattr(proxy_mode, "pre_dispatch", False) + with disable_proxy_modes_tracing(): + cond_graph = reenter_make_fx(cond_fn, pre_dispatch)(*operands) + body_graph = reenter_make_fx(body_fn, pre_dispatch)(*operands) + + next_name = None + i = 0 + while not next_name: + candidate = f"while_loop_cond_graph_{i}" + if hasattr(proxy_mode.tracer.root, candidate): + i += 1 + else: + next_name = candidate + cond_graph_name = next_name + body_graph_name = f"while_loop_body_graph_{i}" + assert not hasattr(proxy_mode.tracer.root, body_graph_name) + + proxy_mode.tracer.root.register_module(cond_graph_name, cond_graph) + proxy_mode.tracer.root.register_module(body_graph_name, body_graph) + + args = (cond_graph, body_graph, operands) + + proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args) + + out_proxy = proxy_mode.tracer.create_proxy( + "call_function", while_loop_op, proxy_args, {}, name="while_loop" + ) + + # body_fn return output with the same pytree and tensor meta data as operands + # so we could just return the output after one iteration. + out = body_fn(*operands) + return track_tensor_tree( + out, out_proxy, constant=None, tracer=proxy_mode.tracer + ) + + if mode.enable_tracing: + return _trace_while_loop(mode, while_loop_op, cond_fn, body_fn, operands) + else: + return while_loop_op(cond_fn, body_fn, operands) + + +@while_loop_op.py_impl(FakeTensorMode) +def while_loop_fake_tensor_mode(mode, cond_fn, body_fn, operands): + return body_fn(*operands) + + +@while_loop_op.py_functionalize_impl +def while_loop_func(ctx, cond_fn, body_fn, operands): + unwrapped_operands = ctx.unwrap_tensors(operands) + with ctx.redispatch_to_next() as m: + functional_cond_fn = ctx.functionalize(cond_fn) + functional_body_fn = ctx.functionalize(body_fn) + pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch + for fn, fn_name in [ + (functional_cond_fn, "cond_fn"), + (functional_body_fn, "body_fn"), + ]: + if _has_potential_branch_input_mutation( + fn, unwrapped_operands, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + f"torch.while_loop's {fn_name} might be modifying the input!" + ) + + for fn in [functional_cond_fn, functional_body_fn]: + if _has_potential_branch_input_alias( + fn, unwrapped_operands, pre_dispatch=pre_dispatch + ): + raise UnsupportedAliasMutationException( + f"torch.while_loop's {fn_name} might be aliasing the input!" + ) + ret = while_loop_op(functional_cond_fn, functional_body_fn, unwrapped_operands) + return ctx.wrap_tensors(ret) diff --git a/venv/lib/python3.10/site-packages/torch/lib/libc10_cuda.so b/venv/lib/python3.10/site-packages/torch/lib/libc10_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..b30b31cf11226792d8db78f460f338e6d32213da Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/lib/libc10_cuda.so differ diff --git a/venv/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 b/venv/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..346f88be2766ed671dd3f9d187dedd3fcd6f9e59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 differ diff --git a/venv/lib/python3.10/site-packages/torch/lib/libshm.so b/venv/lib/python3.10/site-packages/torch/lib/libshm.so new file mode 100644 index 0000000000000000000000000000000000000000..b0af7c66061e84734143787864a4f59e2a9197a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/lib/libshm.so differ diff --git a/venv/lib/python3.10/site-packages/torch/lib/libtorch.so b/venv/lib/python3.10/site-packages/torch/lib/libtorch.so new file mode 100644 index 0000000000000000000000000000000000000000..786719178e9d741938cbfcd36b4c4c5fe84df5df Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/lib/libtorch.so differ diff --git a/venv/lib/python3.10/site-packages/torch/special/__init__.py b/venv/lib/python3.10/site-packages/torch/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a25f0f7c03682ed1e11a869be6551fafdad40f34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/special/__init__.py @@ -0,0 +1,1283 @@ +import torch +from torch._C import _add_docstr, _special # type: ignore[attr-defined] +from torch._torch_docs import common_args, multi_dim_common + +__all__ = [ + 'airy_ai', + 'bessel_j0', + 'bessel_j1', + 'bessel_y0', + 'bessel_y1', + 'chebyshev_polynomial_t', + 'chebyshev_polynomial_u', + 'chebyshev_polynomial_v', + 'chebyshev_polynomial_w', + 'digamma', + 'entr', + 'erf', + 'erfc', + 'erfcx', + 'erfinv', + 'exp2', + 'expit', + 'expm1', + 'gammainc', + 'gammaincc', + 'gammaln', + 'hermite_polynomial_h', + 'hermite_polynomial_he', + 'i0', + 'i0e', + 'i1', + 'i1e', + 'laguerre_polynomial_l', + 'legendre_polynomial_p', + 'log1p', + 'log_ndtr', + 'log_softmax', + 'logit', + 'logsumexp', + 'modified_bessel_i0', + 'modified_bessel_i1', + 'modified_bessel_k0', + 'modified_bessel_k1', + 'multigammaln', + 'ndtr', + 'ndtri', + 'polygamma', + 'psi', + 'round', + 'shifted_chebyshev_polynomial_t', + 'shifted_chebyshev_polynomial_u', + 'shifted_chebyshev_polynomial_v', + 'shifted_chebyshev_polynomial_w', + 'scaled_modified_bessel_k0', + 'scaled_modified_bessel_k1', + 'sinc', + 'softmax', + 'spherical_bessel_j0', + 'xlog1py', + 'xlogy', + 'zeta', +] + +Tensor = torch.Tensor + +entr = _add_docstr(_special.special_entr, + r""" +entr(input, *, out=None) -> Tensor +Computes the entropy on :attr:`input` (as defined below), elementwise. + +.. math:: + \begin{align} + \text{entr(x)} = \begin{cases} + -x * \ln(x) & x > 0 \\ + 0 & x = 0.0 \\ + -\infty & x < 0 + \end{cases} + \end{align} +""" + """ + +Args: + input (Tensor): the input tensor. + +Keyword args: + out (Tensor, optional): the output tensor. + +Example:: + >>> a = torch.arange(-0.5, 1, 0.5) + >>> a + tensor([-0.5000, 0.0000, 0.5000]) + >>> torch.special.entr(a) + tensor([ -inf, 0.0000, 0.3466]) +""") + +psi = _add_docstr(_special.special_psi, + r""" +psi(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.digamma`. +""") + +digamma = _add_docstr(_special.special_digamma, + r""" +digamma(input, *, out=None) -> Tensor + +Computes the logarithmic derivative of the gamma function on `input`. + +.. math:: + \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} +""" + r""" +Args: + input (Tensor): the tensor to compute the digamma function on + +Keyword args: + {out} + +.. note:: This function is similar to SciPy's `scipy.special.digamma`. + +.. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`. + Previously it returned `NaN` for `0`. + +Example:: + + >>> a = torch.tensor([1, 0.5]) + >>> torch.special.digamma(a) + tensor([-0.5772, -1.9635]) + +""".format(**common_args)) + +gammaln = _add_docstr(_special.special_gammaln, + r""" +gammaln(input, *, out=None) -> Tensor + +Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + +.. math:: + \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|) +""" + """ +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.special.gammaln(a) + tensor([ 0.5724, 0.0000, -0.1208]) + +""".format(**common_args)) + +polygamma = _add_docstr(_special.special_polygamma, + r""" +polygamma(n, input, *, out=None) -> Tensor + +Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`. +:math:`n \geq 0` is called the order of the polygamma function. + +.. math:: + \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x) + +.. note:: + This function is implemented only for nonnegative integers :math:`n \geq 0`. +""" + """ +Args: + n (int): the order of the polygamma function + {input} + +Keyword args: + {out} + +Example:: + >>> a = torch.tensor([1, 0.5]) + >>> torch.special.polygamma(1, a) + tensor([1.64493, 4.9348]) + >>> torch.special.polygamma(2, a) + tensor([ -2.4041, -16.8288]) + >>> torch.special.polygamma(3, a) + tensor([ 6.4939, 97.4091]) + >>> torch.special.polygamma(4, a) + tensor([ -24.8863, -771.4742]) +""".format(**common_args)) + +erf = _add_docstr(_special.special_erf, + r""" +erf(input, *, out=None) -> Tensor + +Computes the error function of :attr:`input`. The error function is defined as follows: + +.. math:: + \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erf(torch.tensor([0, -1., 10.])) + tensor([ 0.0000, -0.8427, 1.0000]) +""".format(**common_args)) + +erfc = _add_docstr(_special.special_erfc, + r""" +erfc(input, *, out=None) -> Tensor + +Computes the complementary error function of :attr:`input`. +The complementary error function is defined as follows: + +.. math:: + \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfc(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 1.8427, 0.0000]) +""".format(**common_args)) + +erfcx = _add_docstr(_special.special_erfcx, + r""" +erfcx(input, *, out=None) -> Tensor + +Computes the scaled complementary error function for each element of :attr:`input`. +The scaled complementary error function is defined as follows: + +.. math:: + \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x) +""" + r""" + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfcx(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 5.0090, 0.0561]) +""".format(**common_args)) + +erfinv = _add_docstr(_special.special_erfinv, + r""" +erfinv(input, *, out=None) -> Tensor + +Computes the inverse error function of :attr:`input`. +The inverse error function is defined in the range :math:`(-1, 1)` as: + +.. math:: + \mathrm{erfinv}(\mathrm{erf}(x)) = x +""" + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.])) + tensor([ 0.0000, 0.4769, -inf]) +""".format(**common_args)) + +logit = _add_docstr(_special.special_logit, + r""" +logit(input, eps=None, *, out=None) -> Tensor + +Returns a new tensor with the logit of the elements of :attr:`input`. +:attr:`input` is clamped to [eps, 1 - eps] when eps is not None. +When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN. + +.. math:: + \begin{align} + y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\ + z_{i} &= \begin{cases} + x_{i} & \text{if eps is None} \\ + \text{eps} & \text{if } x_{i} < \text{eps} \\ + x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\ + 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps} + \end{cases} + \end{align} +""" + r""" +Args: + {input} + eps (float, optional): the epsilon for input clamp bound. Default: ``None`` + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516]) + >>> torch.special.logit(a, eps=1e-6) + tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261]) +""".format(**common_args)) + +logsumexp = _add_docstr(_special.special_logsumexp, + r""" +logsumexp(input, dim, keepdim=False, *, out=None) + +Alias for :func:`torch.logsumexp`. +""".format(**multi_dim_common)) + +expit = _add_docstr(_special.special_expit, + r""" +expit(input, *, out=None) -> Tensor + +Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.randn(4) + >>> t + tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) + >>> torch.special.expit(t) + tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) +""".format(**common_args)) + +exp2 = _add_docstr(_special.special_exp2, + r""" +exp2(input, *, out=None) -> Tensor + +Computes the base two exponential function of :attr:`input`. + +.. math:: + y_{i} = 2^{x_{i}} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4])) + tensor([ 1., 2., 8., 16.]) +""".format(**common_args)) + +expm1 = _add_docstr(_special.special_expm1, + r""" +expm1(input, *, out=None) -> Tensor + +Computes the exponential of the elements minus 1 +of :attr:`input`. + +.. math:: + y_{i} = e^{x_{i}} - 1 + +.. note:: This function provides greater precision than exp(x) - 1 for small values of x. + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.expm1(torch.tensor([0, math.log(2.)])) + tensor([ 0., 1.]) +""".format(**common_args)) + +xlog1py = _add_docstr(_special.special_xlog1py, + r""" +xlog1py(input, other, *, out=None) -> Tensor + +Computes ``input * log1p(other)`` with the following cases. + +.. math:: + \text{out}_{i} = \begin{cases} + \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ + 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\ + \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise} + \end{cases} + +Similar to SciPy's `scipy.special.xlog1py`. + +""" + r""" + +Args: + input (Number or Tensor) : Multiplier + other (Number or Tensor) : Argument + +.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. + +Keyword args: + {out} + +Example:: + + >>> x = torch.zeros(5,) + >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) + >>> torch.special.xlog1py(x, y) + tensor([0., 0., 0., 0., nan]) + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([3, 2, 1]) + >>> torch.special.xlog1py(x, y) + tensor([1.3863, 2.1972, 2.0794]) + >>> torch.special.xlog1py(x, 4) + tensor([1.6094, 3.2189, 4.8283]) + >>> torch.special.xlog1py(2, y) + tensor([2.7726, 2.1972, 1.3863]) +""".format(**common_args)) + +xlogy = _add_docstr(_special.special_xlogy, + r""" +xlogy(input, other, *, out=None) -> Tensor + +Computes ``input * log(other)`` with the following cases. + +.. math:: + \text{out}_{i} = \begin{cases} + \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ + 0 & \text{if } \text{input}_{i} = 0.0 \\ + \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise} + \end{cases} + +Similar to SciPy's `scipy.special.xlogy`. + +""" + r""" + +Args: + input (Number or Tensor) : Multiplier + other (Number or Tensor) : Argument + +.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. + +Keyword args: + {out} + +Example:: + + >>> x = torch.zeros(5,) + >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) + >>> torch.special.xlogy(x, y) + tensor([0., 0., 0., 0., nan]) + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([3, 2, 1]) + >>> torch.special.xlogy(x, y) + tensor([1.0986, 1.3863, 0.0000]) + >>> torch.special.xlogy(x, 4) + tensor([1.3863, 2.7726, 4.1589]) + >>> torch.special.xlogy(2, y) + tensor([2.1972, 1.3863, 0.0000]) +""".format(**common_args)) + +i0 = _add_docstr(_special.special_i0, + r""" +i0(input, *, out=None) -> Tensor + +Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} + +""" + r""" +Args: + input (Tensor): the input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.i0(torch.arange(5, dtype=torch.float32)) + tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019]) + +""".format(**common_args)) + +i0e = _add_docstr(_special.special_i0e, + r""" +i0e(input, *, out=None) -> Tensor +Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i0e(torch.arange(5, dtype=torch.float32)) + tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070]) +""".format(**common_args)) + +i1 = _add_docstr(_special.special_i1, + r""" +i1(input, *, out=None) -> Tensor +Computes the first order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i1(torch.arange(5, dtype=torch.float32)) + tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595]) +""".format(**common_args)) + +i1e = _add_docstr(_special.special_i1e, + r""" +i1e(input, *, out=None) -> Tensor +Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \exp(-|x|) * i1(x) = + \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i1e(torch.arange(5, dtype=torch.float32)) + tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788]) +""".format(**common_args)) + +ndtr = _add_docstr(_special.special_ndtr, + r""" +ndtr(input, *, out=None) -> Tensor +Computes the area under the standard Gaussian probability density function, +integrated from minus infinity to :attr:`input`, elementwise. + +.. math:: + \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) + tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987]) +""".format(**common_args)) + +ndtri = _add_docstr(_special.special_ndtri, + r""" +ndtri(input, *, out=None) -> Tensor +Computes the argument, x, for which the area under the Gaussian probability density function +(integrated from minus infinity to x) is equal to :attr:`input`, elementwise. + +.. math:: + \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1) + +.. note:: + Also known as quantile function for Normal Distribution. + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1])) + tensor([ -inf, -0.6745, 0.0000, 0.6745, inf]) +""".format(**common_args)) + +log_ndtr = _add_docstr(_special.special_log_ndtr, + r""" +log_ndtr(input, *, out=None) -> Tensor +Computes the log of the area under the standard Gaussian probability density function, +integrated from minus infinity to :attr:`input`, elementwise. + +.. math:: + \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right) + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) + tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014]) +""".format(**common_args)) + +log1p = _add_docstr(_special.special_log1p, + r""" +log1p(input, *, out=None) -> Tensor + +Alias for :func:`torch.log1p`. +""") + +sinc = _add_docstr(_special.special_sinc, + r""" +sinc(input, *, out=None) -> Tensor + +Computes the normalized sinc of :attr:`input.` + +.. math:: + \text{out}_{i} = + \begin{cases} + 1, & \text{if}\ \text{input}_{i}=0 \\ + \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise} + \end{cases} +""" + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> t = torch.randn(4) + >>> t + tensor([ 0.2252, -0.2948, 1.0267, -1.1566]) + >>> torch.special.sinc(t) + tensor([ 0.9186, 0.8631, -0.0259, -0.1300]) +""".format(**common_args)) + +round = _add_docstr(_special.special_round, + r""" +round(input, *, out=None) -> Tensor + +Alias for :func:`torch.round`. +""") + +softmax = _add_docstr(_special.special_softmax, + r""" +softmax(input, dim, *, dtype=None) -> Tensor + +Computes the softmax function. + +Softmax is defined as: + +:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}` + +It is applied to all slices along dim, and will re-scale them so that the elements +lie in the range `[0, 1]` and sum to 1. + +Args: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + +Examples:: + >>> t = torch.ones(2, 2) + >>> torch.special.softmax(t, 0) + tensor([[0.5000, 0.5000], + [0.5000, 0.5000]]) + +""") + +log_softmax = _add_docstr(_special.special_log_softmax, + r""" +log_softmax(input, dim, *, dtype=None) -> Tensor + +Computes softmax followed by a logarithm. + +While mathematically equivalent to log(softmax(x)), doing these two +operations separately is slower and numerically unstable. This function +is computed as: + +.. math:: + \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) +""" + r""" + +Args: + input (Tensor): input + dim (int): A dimension along which log_softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + +Example:: + >>> t = torch.ones(2, 2) + >>> torch.special.log_softmax(t, 0) + tensor([[-0.6931, -0.6931], + [-0.6931, -0.6931]]) +""") + +zeta = _add_docstr(_special.special_zeta, + r""" +zeta(input, other, *, out=None) -> Tensor + +Computes the Hurwitz zeta function, elementwise. + +.. math:: + \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x} + +""" + r""" +Args: + input (Tensor): the input tensor corresponding to `x`. + other (Tensor): the input tensor corresponding to `q`. + +.. note:: + The Riemann zeta function corresponds to the case when `q = 1` + +Keyword args: + {out} + +Example:: + >>> x = torch.tensor([2., 4.]) + >>> torch.special.zeta(x, 1) + tensor([1.6449, 1.0823]) + >>> torch.special.zeta(x, torch.tensor([1., 2.])) + tensor([1.6449, 0.0823]) + >>> torch.special.zeta(2, torch.tensor([1., 2.])) + tensor([1.6449, 0.6449]) +""".format(**common_args)) + +multigammaln = _add_docstr(_special.special_multigammaln, + r""" +multigammaln(input, p, *, out=None) -> Tensor + +Computes the `multivariate log-gamma function +`_ with dimension +:math:`p` element-wise, given by + +.. math:: + \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) + +where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function. + +All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend. +""" + """ + +Args: + input (Tensor): the tensor to compute the multivariate log-gamma function + p (int): the number of dimensions + +Keyword args: + {out} + +Example:: + + >>> a = torch.empty(2, 3).uniform_(1, 2) + >>> a + tensor([[1.6835, 1.8474, 1.1929], + [1.0475, 1.7162, 1.4180]]) + >>> torch.special.multigammaln(a, 2) + tensor([[0.3928, 0.4007, 0.7586], + [1.0311, 0.3901, 0.5049]]) +""".format(**common_args)) + +gammainc = _add_docstr(_special.special_gammainc, + r""" +gammainc(input, other, *, out=None) -> Tensor + +Computes the regularized lower incomplete gamma function: + +.. math:: + \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt + +where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive +and at least one is strictly positive. +If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. +:math:`\Gamma(\cdot)` in the equation above is the gamma function, + +.. math:: + \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. + +See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions. + +Supports :ref:`broadcasting to a common shape ` +and float inputs. + +.. note:: + The backward pass with respect to :attr:`input` is not yet supported. + Please open an issue on PyTorch's Github to request it. + +""" + r""" +Args: + input (Tensor): the first non-negative input tensor + other (Tensor): the second non-negative input tensor + +Keyword args: + {out} + +Example:: + + >>> a1 = torch.tensor([4.0]) + >>> a2 = torch.tensor([3.0, 4.0, 5.0]) + >>> a = torch.special.gammaincc(a1, a2) + tensor([0.3528, 0.5665, 0.7350]) + tensor([0.3528, 0.5665, 0.7350]) + >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) + tensor([1., 1., 1.]) + +""".format(**common_args)) + +gammaincc = _add_docstr(_special.special_gammaincc, + r""" +gammaincc(input, other, *, out=None) -> Tensor + +Computes the regularized upper incomplete gamma function: + +.. math:: + \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt + +where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive +and at least one is strictly positive. +If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. +:math:`\Gamma(\cdot)` in the equation above is the gamma function, + +.. math:: + \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. + +See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions. + +Supports :ref:`broadcasting to a common shape ` +and float inputs. + +.. note:: + The backward pass with respect to :attr:`input` is not yet supported. + Please open an issue on PyTorch's Github to request it. + +""" + r""" +Args: + input (Tensor): the first non-negative input tensor + other (Tensor): the second non-negative input tensor + +Keyword args: + {out} + +Example:: + + >>> a1 = torch.tensor([4.0]) + >>> a2 = torch.tensor([3.0, 4.0, 5.0]) + >>> a = torch.special.gammaincc(a1, a2) + tensor([0.6472, 0.4335, 0.2650]) + >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) + tensor([1., 1., 1.]) + +""".format(**common_args)) + +airy_ai = _add_docstr(_special.special_airy_ai, + r""" +airy_ai(input, *, out=None) -> Tensor + +Airy function :math:`\text{Ai}\left(\text{input}\right)`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_j0 = _add_docstr(_special.special_bessel_j0, + r""" +bessel_j0(input, *, out=None) -> Tensor + +Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_j1 = _add_docstr(_special.special_bessel_j1, + r""" +bessel_j1(input, *, out=None) -> Tensor + +Bessel function of the first kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_y0 = _add_docstr(_special.special_bessel_y0, + r""" +bessel_y0(input, *, out=None) -> Tensor + +Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_y1 = _add_docstr(_special.special_bessel_y1, + r""" +bessel_y1(input, *, out=None) -> Tensor + +Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t, + r""" +chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion: + +.. math:: + T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) + +is evaluated. Otherwise, the explicit trigonometric formula: + +.. math:: + T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x)) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u, + r""" +chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, +:math:`2 \times \text{input}` is returned. If :math:`n < 6` or +:math:`|\text{input}| > 1`, the recursion: + +.. math:: + T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) + +is evaluated. Otherwise, the explicit trigonometric formula: + +.. math:: + \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))} + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v, + r""" +chebyshev_polynomial_v(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w, + r""" +chebyshev_polynomial_w(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h, + r""" +hermite_polynomial_h(input, n, *, out=None) -> Tensor + +Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he, + r""" +hermite_polynomial_he(input, n, *, out=None) -> Tensor + +Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l, + r""" +laguerre_polynomial_l(input, n, *, out=None) -> Tensor + +Laguerre polynomial :math:`L_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p, + r""" +legendre_polynomial_p(input, n, *, out=None) -> Tensor + +Legendre polynomial :math:`P_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0, + r""" +modified_bessel_i0(input, *, out=None) -> Tensor + +Modified Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1, + r""" +modified_bessel_i1(input, *, out=None) -> Tensor + +Modified Bessel function of the first kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0, + r""" +modified_bessel_k0(input, *, out=None) -> Tensor + +Modified Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1, + r""" +modified_bessel_k1(input, *, out=None) -> Tensor + +Modified Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0, + r""" +scaled_modified_bessel_k0(input, *, out=None) -> Tensor + +Scaled modified Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1, + r""" +scaled_modified_bessel_k1(input, *, out=None) -> Tensor + +Scaled modified Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t, + r""" +shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u, + r""" +shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v, + r""" +shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w, + r""" +shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0, + r""" +spherical_bessel_j0(input, *, out=None) -> Tensor + +Spherical Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) diff --git a/venv/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b63b16f0570e0191c182ad211ac1ad7c809d89d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ccdad48eca97dccf5c5930a86ec09c58d1a4ce00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/__init__.py @@ -0,0 +1,68 @@ +import os.path as _osp +import torch + +from .throughput_benchmark import ThroughputBenchmark +from .cpp_backtrace import get_cpp_backtrace +from .backend_registration import rename_privateuse1_backend, generate_methods_for_privateuse1_backend +from . import deterministic +from . import collect_env +import weakref +import copyreg + +def set_module(obj, mod): + """ + Set the module attribute on a python object for a given object for nicer printing + """ + if not isinstance(mod, str): + raise TypeError("The mod argument should be a string") + obj.__module__ = mod + +if torch._running_with_deploy(): + # not valid inside torch_deploy interpreter, no paths exists for frozen modules + cmake_prefix_path = None +else: + cmake_prefix_path = _osp.join(_osp.dirname(_osp.dirname(__file__)), 'share', 'cmake') + +def swap_tensors(t1, t2): + """ + This function swaps the content of the two Tensor objects. + At a high level, this will make t1 have the content of t2 while preserving + its identity. + + This will not work if t1 and t2 have different slots. + """ + # Ensure there are no weakrefs + if weakref.getweakrefs(t1): + raise RuntimeError("Cannot swap t1 because it has weakref associated with it") + if weakref.getweakrefs(t2): + raise RuntimeError("Cannot swap t2 because it has weakref associated with it") + t1_slots = set(copyreg._slotnames(t1.__class__)) # type: ignore[attr-defined] + t2_slots = set(copyreg._slotnames(t2.__class__)) # type: ignore[attr-defined] + if t1_slots != t2_slots: + raise RuntimeError("Cannot swap t1 and t2 if they have different slots") + + def swap_attr(name): + tmp = getattr(t1, name) + setattr(t1, name, (getattr(t2, name))) + setattr(t2, name, tmp) + + # Swap the types + # Note that this will fail if there are mismatched slots + swap_attr("__class__") + + # Swap the dynamic attributes + swap_attr("__dict__") + + # Swap the slots + for slot in t1_slots: + if hasattr(t1, slot) and hasattr(t2, slot): + swap_attr(slot) + elif hasattr(t1, slot): + setattr(t2, slot, (getattr(t1, slot))) + delattr(t1, slot) + elif hasattr(t2, slot): + setattr(t1, slot, (getattr(t2, slot))) + delattr(t2, slot) + + # Swap the at::Tensor they point to + torch._C._swap_tensor_impl(t1, t2) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_config_module.py b/venv/lib/python3.10/site-packages/torch/utils/_config_module.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0478535772c9523f743675a95a1a9fbb0998d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_config_module.py @@ -0,0 +1,369 @@ +import contextlib + +import copy +import hashlib +import inspect +import io +import pickle +import tokenize +import unittest +import warnings +from types import FunctionType, ModuleType +from typing import Any, Dict, Optional, Set, Union +from unittest import mock + +# Types saved/loaded in configs +CONFIG_TYPES = (int, float, bool, type(None), str, list, set, tuple, dict) + + +def install_config_module(module): + """ + Converts a module-level config into a `ConfigModule()`. + + See _config_typing.pyi for instructions on how to get the converted module to typecheck. + """ + + class ConfigModuleInstance(ConfigModule): + _bypass_keys = set({"_is_dirty", "_hash_digest"}) + + def visit(source, dest, prefix): + """Walk the module structure and move everything to module._config""" + for key, value in list(source.__dict__.items()): + if ( + key.startswith("__") + or isinstance(value, (ModuleType, FunctionType)) + or (hasattr(value, "__module__") and value.__module__ == "typing") + ): + continue + + name = f"{prefix}{key}" + if isinstance(value, CONFIG_TYPES): + config[name] = value + default[name] = value + if dest is module: + delattr(module, key) + elif isinstance(value, type): + assert value.__module__ == module.__name__ + # a subconfig with `class Blah:` syntax + proxy = SubConfigProxy(module, f"{name}.") + visit(value, proxy, f"{name}.") + setattr(dest, key, proxy) + else: + raise AssertionError(f"Unhandled config {key}={value} ({type(value)})") + + config: Dict[str, Any] = dict() + default: Dict[str, Any] = dict() + + compile_ignored_keys = get_assignments_with_compile_ignored_comments(module) + + visit(module, module, "") + module._config = config + module._default = default + module._allowed_keys = set(config.keys()) + module._compile_ignored_keys = compile_ignored_keys + module.__class__ = ConfigModuleInstance + module._is_dirty = True + module._hash_digest = None + + +COMPILE_IGNORED_MARKER = "@compile_ignored" + + +# Gets all the keys (i.e. assignments) with a @compile_ignored comment +def get_assignments_with_compile_ignored_comments(module): + source_code = inspect.getsource(module) + assignments = set() + + # Tokenize the source code to retrieve comments + tokens = tokenize.tokenize(io.BytesIO(source_code.encode("utf-8")).readline) + current_comment = "", -1 + prev_name = "" + + for token in tokens: + if token.type == tokenize.COMMENT: + prev_name = "" + maybe_current = token.string.strip() + if COMPILE_IGNORED_MARKER in maybe_current: + assert current_comment == ( + "", + -1, + ), f"unconsumed {COMPILE_IGNORED_MARKER}" + current_comment = maybe_current, token.start[0] + elif token.type == tokenize.NAME: + # Only accept the first name token, to handle if you have + # something like foo: Bar = ... + if not prev_name: + prev_name = token.string + elif token.type == tokenize.OP and token.string == "=": + # Check if the current assignment follows a comment + # with COMPILE_IGNORED_MARKER + if ( + COMPILE_IGNORED_MARKER in current_comment[0] + and current_comment[1] == token.start[0] - 1 + ): + assignments.add(prev_name) + current_comment = "", -1 # reset + prev_name = "" + assert current_comment == ("", -1), f"unconsumed {COMPILE_IGNORED_MARKER}" + return assignments + + +class ConfigModule(ModuleType): + # NOTE: This should be kept in sync with _config_typing.pyi. + + # The default values of the configuration settings. This can be used to + # determine if the config has been changed or not. + _default: Dict[str, Any] + # The actual configuration settings. E.g., torch._dynamo.config.debug + # would live as "debug" in the key, and torch._inductor.config.triton.cudagraphs + # maps as "triton.cudagraphs" + _config: Dict[str, Any] + _allowed_keys: Set[str] + _bypass_keys: Set[str] + _compile_ignored_keys: Set[str] + _is_dirty: bool + _hash_digest: Optional[bytes] + + def __init__(self): + raise NotImplementedError( + f"use {__name__}.install_config_module(sys.modules[__name__])" + ) + + def __setattr__(self, name, value): + if name in self._bypass_keys: + super().__setattr__(name, value) + elif name not in self._allowed_keys: + raise AttributeError(f"{self.__name__}.{name} does not exist") + else: + self._config[name] = value + + def __getattr__(self, name): + try: + return self._config[name] + except KeyError as e: + # make hasattr() work properly + raise AttributeError(f"{self.__name__}.{name} does not exist") from e + + def __delattr__(self, name): + # must support delete because unittest.mock.patch deletes + # then recreate things + del self._config[name] + + def save_config(self) -> bytes: + """Convert config to a pickled blob""" + config = dict(self._config) + for key in config.get("_save_config_ignore", ()): + config.pop(key) + return pickle.dumps(config, protocol=2) + + def codegen_config(self) -> str: + """Convert config to Python statements that replicate current config. + This does NOT include config settings that are at default values. + """ + lines = [] + mod = self.__name__ + for k, v in self._config.items(): + if k in self._config.get("_save_config_ignore", ()): + continue + if v == self._default[k]: + continue + lines.append(f"{mod}.{k} = {v!r}") + return "\n".join(lines) + + def get_hash(self) -> bytes: + """Hashes the configs that are not compile_ignored""" + if self._is_dirty or self._hash_digest is None: + dict_to_hash = { + k: v + for k, v in self._config.items() + if k not in self._compile_ignored_keys + } + string_to_hash = repr(sorted(dict_to_hash.items())) + self._hash_digest = hashlib.md5(string_to_hash.encode("utf-8")).digest() + self._is_dirty = False + return self._hash_digest + + def to_dict(self) -> Dict[str, Any]: + warnings.warn( + "config.to_dict() has been deprecated. It may no longer change the underlying config." + " use config.shallow_copy_dict() or config.get_config_copy() instead", + DeprecationWarning, + ) + return self.shallow_copy_dict() + + def shallow_copy_dict(self) -> Dict[str, Any]: + return {**self._config} + + def load_config(self, maybe_pickled_config: Union[bytes, Dict[str, Any]]) -> None: + """Restore from a prior call to save_config() or shallow_copy_dict()""" + if not isinstance(maybe_pickled_config, dict): + config = pickle.loads(maybe_pickled_config) + else: + config = maybe_pickled_config + self._config.update(config) + + def get_config_copy(self) -> Dict[str, Any]: + return copy.deepcopy(self._config) + + def patch( + self, + arg1: Optional[Union[str, Dict[str, Any]]] = None, + arg2: Any = None, + **kwargs, + ): + """ + Decorator and/or context manager to make temporary changes to a config. + + As a decorator: + + @config.patch("name", val) + @config.patch(name1=val1, name2=val2) + @config.patch({"name1": val1, "name2", val2}) + def foo(...): + ... + + As a context manager: + + with config.patch("name", val): + ... + """ + changes: Dict[str, Any] + if arg1 is not None: + if arg2 is not None: + assert isinstance(arg1, str) + # patch("key", True) syntax + changes = {arg1: arg2} + else: + assert isinstance(arg1, dict) + # patch({"key": True}) syntax + changes = arg1 + assert not kwargs + else: + # patch(key=True) syntax + changes = kwargs + assert arg2 is None + assert isinstance(changes, dict), f"expected `dict` got {type(changes)}" + prior: Dict[str, Any] = {} + config = self + dirty = False + + class ConfigPatch(ContextDecorator): + def __enter__(self): + assert not prior + nonlocal dirty + for key in changes.keys(): + # KeyError on invalid entry + prior[key] = config._config[key] + dirty = key not in config._compile_ignored_keys + config._config.update(changes) + config._is_dirty = dirty + + def __exit__(self, exc_type, exc_val, exc_tb): + nonlocal dirty + config._config.update(prior) + config._is_dirty = dirty + prior.clear() + + return ConfigPatch() + + def _make_closure_patcher(self, **changes): + """ + A lower-overhead version of patch() for things on the critical path. + + Usage: + + # do this off the critical path + change_fn = config.make_closure_patcher(foo=True) + + ... + + revert = change_fn() + try: + ... + finally: + revert() + + """ + config = self._config + + def change(): + prior = {k: config[k] for k in changes} + config.update(changes) + + def revert(): + config.update(prior) + + return revert + + return change + + +class ContextDecorator(contextlib.ContextDecorator): + """ + Same as contextlib.ContextDecorator, but with support for + `unittest.TestCase` + """ + + def __enter__(self): + raise NotImplementedError("NYI") + + def __exit__(self, exc_type, exc_val, exc_tb): + raise NotImplementedError("NYI") + + def __call__(self, func): + if isinstance(func, type) and issubclass(func, unittest.TestCase): + + class _TestCase(func): # type: ignore[valid-type, misc] + @classmethod + def setUpClass(cls): + self.__enter__() + try: + super().setUpClass() + except Exception: + self.__exit__(None, None, None) + raise + + @classmethod + def tearDownClass(cls): + try: + super().tearDownClass() + finally: + self.__exit__(None, None, None) + + _TestCase.__name__ = func.__name__ + _TestCase.__qualname__ = func.__qualname__ + _TestCase.__module__ = func.__module__ + + return _TestCase + + return super().__call__(func) + + +class SubConfigProxy: + """ + Shim to redirect to main config. + `config.triton.cudagraphs` maps to _config["triton.cudagraphs"] + """ + + def __init__(self, config, prefix): + # `super().__setattr__` to bypass custom `__setattr__` + super().__setattr__("_config", config) + super().__setattr__("_prefix", prefix) + + def __setattr__(self, name, value): + return self._config.__setattr__(self._prefix + name, value) + + def __getattr__(self, name): + return self._config.__getattr__(self._prefix + name) + + def __delattr__(self, name): + return self._config.__delattr__(self._prefix + name) + + +def patch_object(obj, name, value): + """ + Workaround `mock.patch.object` issue with ConfigModule + """ + if isinstance(obj, ConfigModule): + return obj.patch(name, value) + return mock.patch.object(obj, name, value) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_content_store.py b/venv/lib/python3.10/site-packages/torch/utils/_content_store.py new file mode 100644 index 0000000000000000000000000000000000000000..f36837ed674e9e21511fb6a22834cab0ca1a0602 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_content_store.py @@ -0,0 +1,238 @@ +# This module provides a FAST (on GPU) content addressable store for storages +# (and tensors on top of them) with VERY WEAK portability guarantees (e.g., +# don't expect CPU/CUDA to address to the same hash, don't expect it to be +# portable across devices) that is NOT cryptographically secure. In return, +# we are able to hash 40G of tensor data on GPU in less than a second, +# compared to running SHA-1 in CPU which would a minute or so. The primary +# use case is for efficiently snapshotting intermediate tensor data for +# offline debugging, but it's been put in this module in case you think of +# another use case for it. The hash function could be replaced with a +# straight reimplementation of SHA-1, which would give us much stronger +# portability guarantees. +# +# WARNING: THERE IS NO BC/FC GUARANTEE FOR THIS FORMAT! If you need to format +# shift the result, consider packing it into a single torch.save object +# with traditional view sharing. +# +# Because of the weak portability guarantees, you can only write to the +# content store from a single process; we don't provide any capability +# of "reopening" a content store to add more things to it. But we don't +# assume that you can keep all of the tensors you want to add to the store +# in memory at once, because you probably can't! Nor do we assume that +# you know a priori whether or not two storages can be deduplicated or not. +# +# Note: only storages are content-addressed; tensors are name addressed +# +# Note: our padding strategy means that [1, 0] and [1] int16 tensors would +# map to the same (padded) storage. We think this will be immaterial for most +# users. + +import ctypes +import functools +import hashlib +import os.path +import struct +from collections import defaultdict +from typing import Dict, Optional, Set + +import torch +import torch._prims as prims +import torch._utils +import torch.nn.functional as F +from torch._C import default_generator + +from torch.multiprocessing.reductions import StorageWeakRef + + +def lazy_compile(**compile_kwargs): + """Lazily wrap a function with torch.compile on the first call + + This avoids eagerly importing dynamo. + """ + + def decorate_fn(fn): + @functools.wraps(fn) + def compile_hook(*args, **kwargs): + compiled_fn = torch.compile(fn, **compile_kwargs) + globals()[fn.__name__] = functools.wraps(fn)(compiled_fn) + return compiled_fn(*args, **kwargs) + + return compile_hook + + return decorate_fn + + +# Use of torch.compile is mandatory for (1) good memory usage +# and (2) xor_sum implementation. This is our first instance of +# using PT2 to implement a kernel in PyTorch; if we get AOT capabilities +# it would be good to apply it here. +@lazy_compile(dynamic=True) +def hash_storage_kernel(x): + # The randint calls are carefully written to hit things we + # have lowerings for in inductor. Lack of unsigned 32-bit integer + # is a pain. + a = torch.randint( + -(2**31), 2**31, x.shape, device=x.device, dtype=torch.int32 + ).abs() + a = ((a % (2**31 - 1)) + 1).long() + b = ( + torch.randint(-(2**31), 2**31, x.shape, device=x.device, dtype=torch.int32) + .abs() + .long() + ) + # This is a standard shift-multiply universal hash family + # plus xor sum hash, using Philox to generate random numbers. + # Our Philox RNG is not deterministic across devices so + # don't use this for stable hashing. + # + # This assumes fixed length so you're also obligated to bucket + # by the length of tensor as well + return prims.xor_sum((a * x + b).int(), [0]) + + +# Returns a hex digest of the data in the storage. Guaranteed to be +# SHA-1 if stable_hash=True, otherwise it will consistent for a single +# process run but not necessarily across processes. +def hash_storage(storage: torch.UntypedStorage, *, stable_hash: bool = False) -> str: + import torch._dynamo + from torch._dynamo.utils import is_compile_supported + + device_type = storage.device.type + if stable_hash or not is_compile_supported(device_type): + cpu_storage = storage.cpu() + # TODO: make storage support buffer protocol so this isn't + # necessary + buf = (ctypes.c_byte * cpu_storage.nbytes()).from_address( + cpu_storage.data_ptr() + ) + sha1 = hashlib.sha1() + sha1.update(buf) + return sha1.hexdigest() + + # TODO: factor this into a random utility + if device_type == "cpu": + generator = default_generator + elif device_type == "cuda": + import torch.cuda + + generator = torch.cuda.default_generators[storage.device.index] + else: + raise AssertionError(f"unhandled device type {device_type}") + state = generator.get_state() + try: + generator.manual_seed(0) + x = torch.empty(0, dtype=torch.uint8, device=storage.device).set_(storage) # type: ignore[call-overload] + # The dtype-casting view cannot be compiled, and so the + # padding/reshaping also needs to be done externally even + # though it could be profitably fused + pad = -x.numel() % 4 + if pad > 0: + x = F.pad(x, (0, pad), "constant", 0) + x = x.view(torch.int32) + # We run the 32-bit hash five times with differing parameters to + # reduce chance of collision + ITER = 5 + cs = [hash_storage_kernel(x).item() for _ in range(ITER)] + return struct.pack(">" + "i" * ITER, *cs).hex() + finally: + generator.set_state(state) + + +class ContentStoreWriter: + # Structure: + # storages/ + # 00/ + # 0000..00 + # tensors/ + # name + def __init__(self, loc: str, stable_hash: bool = False) -> None: + self.loc: str = loc + self.seen_storage_hashes: Set[str] = set() + self.stable_hash = stable_hash + + # TODO: offer some sort of non-blocking API to speed things up + def write_storage(self, storage: torch.UntypedStorage) -> str: + h = hash_storage(storage, stable_hash=self.stable_hash) + if h in self.seen_storage_hashes: + return h + # TODO: consider not using torch.save for this; we don't actually + # need any metadata for the storage + subfolder = os.path.join(self.loc, "storages") + os.makedirs(subfolder, exist_ok=True) + target = os.path.join(subfolder, h) + if os.path.exists(target): + return h + torch.save(storage, target) + self.seen_storage_hashes.add(h) + return h + + def compute_tensor_metadata(self, t: torch.Tensor, h=None): + if h is None: + h = hash_storage(t.untyped_storage(), stable_hash=self.stable_hash) + return ( + t.dtype, + h, + t.storage_offset(), + tuple(t.shape), + t.stride(), + torch._utils.get_tensor_metadata(t), + ) + + def write_tensor(self, name: str, t: torch.Tensor) -> None: + storage = t.untyped_storage() + h = self.write_storage(storage) + # TODO: Support more advanced snapshotting of requires_grad/grad/etc + d, f = os.path.split(name) + payload = self.compute_tensor_metadata(t, h=h) + subfolder = os.path.join(self.loc, "tensors", d) + os.makedirs(subfolder, exist_ok=True) + torch.save(payload, os.path.join(subfolder, f)) + + +class ContentStoreReader: + def __init__(self, loc: str, *, cache=True) -> None: + self.loc = loc + self.storage_cache: Optional[ + Dict[Optional[torch.device], Dict[str, StorageWeakRef]] + ] = None + if cache: + self.storage_cache = defaultdict(dict) + + def read_storage(self, h: str, *, device=None) -> torch.UntypedStorage: + if device is not None: + device = torch.device(device) + ws = ( + self.storage_cache[device].get(h) + if self.storage_cache is not None + else None + ) + s: Optional[torch.UntypedStorage] + if ws is not None: + s = torch.UntypedStorage._new_with_weak_ptr(ws.cdata) + if s is not None: + return s + s = torch.load( + os.path.join(self.loc, "storages", h), + weights_only=True, + map_location=device, + )._untyped_storage + assert s is not None + if self.storage_cache is not None: + self.storage_cache[device][h] = StorageWeakRef(s) + return s + + def read_tensor_metadata(self, name: str): + fn = os.path.join(self.loc, "tensors", name) + if not os.path.exists(fn): + raise FileNotFoundError(fn) + return torch.load(fn, weights_only=True) + + def read_tensor(self, name: str, *, device=None) -> torch.Tensor: + dtype, h, storage_offset, size, stride, metadata = self.read_tensor_metadata( + name + ) + storage = self.read_storage(h, device=device) + t = torch.tensor([], dtype=dtype, device=storage.device) + t.set_(storage, storage_offset, size, stride) + torch._utils.set_tensor_metadata(t, metadata) + return t diff --git a/venv/lib/python3.10/site-packages/torch/utils/_contextlib.py b/venv/lib/python3.10/site-packages/torch/utils/_contextlib.py new file mode 100644 index 0000000000000000000000000000000000000000..c55e6961857524d705dac1d9e6baefe5d19d95c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_contextlib.py @@ -0,0 +1,152 @@ +# Extra utilities for working with context managers that should have been +# in the standard library but are not + +import functools +import inspect +import warnings +import sys +from typing import Any, Callable, TypeVar, cast + +# Used for annotating the decorator usage of _DecoratorContextManager (e.g., +# 'no_grad' and 'enable_grad'). +# See https://mypy.readthedocs.io/en/latest/generics.html#declaring-decorators +FuncType = Callable[..., Any] +F = TypeVar('F', bound=FuncType) + + +def _wrap_generator(ctx_factory, func): + """ + Wrap each generator invocation with the context manager factory. + + The input should be a function that returns a context manager, + not a context manager itself, to handle one-shot context managers. + """ + @functools.wraps(func) + def generator_context(*args, **kwargs): + gen = func(*args, **kwargs) + + # Generators are suspended and unsuspended at `yield`, hence we + # make sure the grad mode is properly set every time the execution + # flow returns into the wrapped generator and restored when it + # returns through our `yield` to our caller (see PR #49017). + try: + # Issuing `None` to a generator fires it up + with ctx_factory(): + response = gen.send(None) + + while True: + try: + # Forward the response to our caller and get its next request + request = yield response + + except GeneratorExit: + # Inform the still active generator about its imminent closure + with ctx_factory(): + gen.close() + raise + + except BaseException: + # Propagate the exception thrown at us by the caller + with ctx_factory(): + response = gen.throw(*sys.exc_info()) + + else: + # Pass the last request to the generator and get its response + with ctx_factory(): + response = gen.send(request) + + # We let the exceptions raised above by the generator's `.throw` or + # `.send` methods bubble up to our caller, except for StopIteration + except StopIteration as e: + # The generator informed us that it is done: take whatever its + # returned value (if any) was and indicate that we're done too + # by returning it (see docs for python's return-statement). + return e.value + + return generator_context + + +def context_decorator(ctx, func): + """ + Like contextlib.ContextDecorator. + + But with the following differences: + 1. Is done by wrapping, rather than inheritance, so it works with context + managers that are implemented from C and thus cannot easily inherit from + Python classes + 2. Wraps generators in the intuitive way (c.f. https://bugs.python.org/issue37743) + 3. Errors out if you try to wrap a class, because it is ambiguous whether + or not you intended to wrap only the constructor + + The input argument can either be a context manager (in which case it must + be a multi-shot context manager that can be directly invoked multiple times) + or a callable that produces a context manager. + """ + assert not (callable(ctx) and hasattr(ctx, '__enter__')), ( + f"Passed in {ctx} is both callable and also a valid context manager " + "(has __enter__), making it ambiguous which interface to use. If you " + "intended to pass a context manager factory, rewrite your call as " + "context_decorator(lambda: ctx()); if you intended to pass a context " + "manager directly, rewrite your call as context_decorator(lambda: ctx)" + ) + + if not callable(ctx): + def ctx_factory(): + return ctx + else: + ctx_factory = ctx + + if inspect.isclass(func): + raise RuntimeError( + "Cannot decorate classes; it is ambiguous whether or not only the " + "constructor or all methods should have the context manager applied; " + "additionally, decorating a class at definition-site will prevent " + "use of the identifier as a conventional type. " + "To specify which methods to decorate, decorate each of them " + "individually." + ) + + if inspect.isgeneratorfunction(func): + return _wrap_generator(ctx_factory, func) + + @functools.wraps(func) + def decorate_context(*args, **kwargs): + with ctx_factory(): + return func(*args, **kwargs) + + return decorate_context + + +class _DecoratorContextManager: + """Allow a context manager to be used as a decorator.""" + + def __call__(self, orig_func: F) -> F: + if inspect.isclass(orig_func): + warnings.warn("Decorating classes is deprecated and will be disabled in " + "future versions. You should only decorate functions or methods. " + "To preserve the current behavior of class decoration, you can " + "directly decorate the `__init__` method and nothing else.") + func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs)) + else: + func = orig_func + + return cast(F, context_decorator(self.clone, func)) + + def __enter__(self) -> None: + raise NotImplementedError + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + raise NotImplementedError + + def clone(self): + # override this method if your children class takes __init__ parameters + return self.__class__() + + +class _NoParamDecoratorContextManager(_DecoratorContextManager): + """Allow a context manager to be used as a decorator without parentheses.""" + + def __new__(cls, orig_func=None): + if orig_func is None: + return super().__new__(cls) + return cls()(orig_func) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py b/venv/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py new file mode 100644 index 0000000000000000000000000000000000000000..0c09a82413fec8ceb9c277d3e036b7f8061fc3da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py @@ -0,0 +1,58 @@ +import collections + + +Entry = collections.namedtuple('Entry', 'version, hash') + + +def update_hash(seed, value): + # Good old boost::hash_combine + # https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html + return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2)) + + +def hash_source_files(hash_value, source_files): + for filename in source_files: + with open(filename) as file: + hash_value = update_hash(hash_value, file.read()) + return hash_value + + +def hash_build_arguments(hash_value, build_arguments): + for group in build_arguments: + if group: + for argument in group: + hash_value = update_hash(hash_value, argument) + return hash_value + + +class ExtensionVersioner: + def __init__(self): + self.entries = {} + + def get_version(self, name): + entry = self.entries.get(name) + return None if entry is None else entry.version + + def bump_version_if_changed(self, + name, + source_files, + build_arguments, + build_directory, + with_cuda, + is_python_module, + is_standalone): + hash_value = 0 + hash_value = hash_source_files(hash_value, source_files) + hash_value = hash_build_arguments(hash_value, build_arguments) + hash_value = update_hash(hash_value, build_directory) + hash_value = update_hash(hash_value, with_cuda) + hash_value = update_hash(hash_value, is_python_module) + hash_value = update_hash(hash_value, is_standalone) + + entry = self.entries.get(name) + if entry is None: + self.entries[name] = entry = Entry(0, hash_value) + elif hash_value != entry.hash: + self.entries[name] = entry = Entry(entry.version + 1, hash_value) + + return entry.version diff --git a/venv/lib/python3.10/site-packages/torch/utils/_cuda_trace.py b/venv/lib/python3.10/site-packages/torch/utils/_cuda_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..18c8ba4e4a99a82b360c67325893b4b307d70fc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_cuda_trace.py @@ -0,0 +1,99 @@ +import logging +from typing import Callable, Generic, List + +from typing_extensions import ParamSpec # Python 3.10+ + +logger = logging.getLogger(__name__) +P = ParamSpec("P") + + +class CallbackRegistry(Generic[P]): + def __init__(self, name: str): + self.name = name + self.callback_list: List[Callable[P, None]] = [] + + def add_callback(self, cb: Callable[P, None]) -> None: + self.callback_list.append(cb) + + def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None: + for cb in self.callback_list: + try: + cb(*args, **kwargs) + except Exception as e: + logger.exception( + "Exception in callback for %s registered with CUDA trace", self.name + ) + + +CUDAEventCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA event creation" +) +CUDAEventDeletionCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA event deletion" +) +CUDAEventRecordCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( + "CUDA event record" +) +CUDAEventWaitCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( + "CUDA event wait" +) +CUDAMemoryAllocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA memory allocation" +) +CUDAMemoryDeallocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA memory deallocation" +) +CUDAStreamCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA stream creation" +) +CUDADeviceSynchronizationCallbacks: "CallbackRegistry[[]]" = CallbackRegistry( + "CUDA device synchronization" +) +CUDAStreamSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA stream synchronization" +) +CUDAEventSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA event synchronization" +) + + +def register_callback_for_cuda_event_creation(cb: Callable[[int], None]) -> None: + CUDAEventCreationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_deletion(cb: Callable[[int], None]) -> None: + CUDAEventDeletionCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_record(cb: Callable[[int, int], None]) -> None: + CUDAEventRecordCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_wait(cb: Callable[[int, int], None]) -> None: + CUDAEventWaitCallbacks.add_callback(cb) + + +def register_callback_for_cuda_memory_allocation(cb: Callable[[int], None]) -> None: + CUDAMemoryAllocationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_memory_deallocation(cb: Callable[[int], None]) -> None: + CUDAMemoryDeallocationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_stream_creation(cb: Callable[[int], None]) -> None: + CUDAStreamCreationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_device_synchronization(cb: Callable[[], None]) -> None: + CUDADeviceSynchronizationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_stream_synchronization( + cb: Callable[[int], None] +) -> None: + CUDAStreamSynchronizationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_synchronization(cb: Callable[[int], None]) -> None: + CUDAEventSynchronizationCallbacks.add_callback(cb) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py b/venv/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..93605d3b0ba8490b1cf1892839b27a4c95b6713c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py @@ -0,0 +1,970 @@ +""" +Contains utility functions for working with nested python data structures. + +A *pytree* is Python nested data structure. It is a tree in the sense that +nodes are Python collections (e.g., list, tuple, dict) and the leaves are +Python values. Furthermore, a pytree should not contain reference cycles. + +pytrees are useful for working with nested collections of Tensors. For example, +one can use `tree_map` to map a function over all Tensors inside some nested +collection of Tensors and `tree_leaves` to get a flat list of all Tensors +inside some nested collection. pytrees are helpful for implementing nested +collection support for PyTorch APIs. +""" + +import functools +import sys +import types +import warnings +from typing import ( + Any, + Callable, + Iterable, + List, + Optional, + overload, + Tuple, + Type, + TypeVar, + Union, +) + +import torch + +if torch._running_with_deploy(): # type: ignore[no-untyped-call] + raise ImportError("C++ pytree utilities do not work with torch::deploy.") + +import optree +from optree import PyTreeSpec # direct import for type annotations + +from torch.utils._pytree import KeyEntry + + +__all__ = [ + "PyTree", + "Context", + "FlattenFunc", + "UnflattenFunc", + "DumpableContext", + "ToDumpableContextFn", + "FromDumpableContextFn", + "TreeSpec", + "LeafSpec", + "keystr", + "key_get", + "register_pytree_node", + "tree_flatten", + "tree_flatten_with_path", + "tree_unflatten", + "tree_leaves", + "tree_leaves_with_path", + "tree_structure", + "tree_map", + "tree_map_with_path", + "tree_map_", + "tree_map_only", + "tree_map_only_", + "tree_all", + "tree_any", + "tree_all_only", + "tree_any_only", + "treespec_dumps", + "treespec_loads", + "treespec_pprint", +] + + +T = TypeVar("T") +S = TypeVar("S") +U = TypeVar("U") +R = TypeVar("R") + + +Context = Any +PyTree = Any +TreeSpec = PyTreeSpec +FlattenFunc = Callable[[PyTree], Tuple[List[Any], Context]] +UnflattenFunc = Callable[[Iterable[Any], Context], PyTree] +OpTreeUnflattenFunc = Callable[[Context, Iterable[Any]], PyTree] +DumpableContext = Any # Any json dumpable text +ToDumpableContextFn = Callable[[Context], DumpableContext] +FromDumpableContextFn = Callable[[DumpableContext], Context] +KeyPath = Tuple[KeyEntry, ...] +FlattenWithKeysFunc = Callable[[PyTree], Tuple[List[Tuple[KeyEntry, Any]], Any]] + + +def _reverse_args(func: UnflattenFunc) -> OpTreeUnflattenFunc: + @functools.wraps(func) + def wrapped(*args: Any, **kwargs: Any) -> Any: + return func(*reversed(args), **kwargs) + + return wrapped + + +def register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None, +) -> None: + """Register a container-like type as pytree node. + + Args: + cls (type): A Python type to treat as an internal pytree node. + flatten_fn (callable): A function to be used during flattening, taking an instance of + ``cls`` and returning a pair, with (1) an iterable for the children to be flattened + recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be + passed to the ``unflatten_fn``. + unflatten_fn (callable): A function taking two arguments: the auxiliary data that was + returned by ``flatten_fn`` and stored in the treespec, and the unflattened children. + The function should return an instance of ``cls``. + serialized_type_name (str, optional): A keyword argument used to specify the fully + qualified name used when serializing the tree spec. + to_dumpable_context (callable, optional): An optional keyword argument to custom specify how + to convert the context of the pytree to a custom json dumpable representation. This is + used for json serialization, which is being used in :mod:`torch.export` right now. + from_dumpable_context (callable, optional): An optional keyword argument to custom specify + how to convert the custom json dumpable representation of the context back to the + original context. This is used for json deserialization, which is being used in + :mod:`torch.export` right now. + + Example:: + + >>> # xdoctest: +SKIP + >>> # Registry a Python type with lambda functions + >>> register_pytree_node( + ... set, + ... lambda s: (sorted(s), None, None), + ... lambda children, _: set(children), + ... ) + """ + if flatten_with_keys_fn is not None: + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + _private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + from . import _pytree as python + + python._private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def _register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, +) -> None: + """Register a container-like type as pytree node for the C++ pytree only. + + The ``namespace`` argument is used to avoid collisions that occur when different libraries + register the same Python type with different behaviors. It is recommended to add a unique prefix + to the namespace to avoid conflicts with other libraries. Namespaces can also be used to specify + the same class in different namespaces for different use cases. + + .. warning:: + For safety reasons, a ``namespace`` must be specified while registering a custom type. It is + used to isolate the behavior of flattening and unflattening a pytree node type. This is to + prevent accidental collisions between different libraries that may register the same type. + + Args: + cls (type): A Python type to treat as an internal pytree node. + flatten_fn (callable): A function to be used during flattening, taking an instance of + ``cls`` and returning a pair, with (1) an iterable for the children to be flattened + recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be + passed to the ``unflatten_fn``. + unflatten_fn (callable): A function taking two arguments: the auxiliary data that was + returned by ``flatten_fn`` and stored in the treespec, and the unflattened children. + The function should return an instance of ``cls``. + serialized_type_name (str, optional): A keyword argument used to specify the fully + qualified name used when serializing the tree spec. + to_dumpable_context (callable, optional): An optional keyword argument to custom specify how + to convert the context of the pytree to a custom json dumpable representation. This is + used for json serialization, which is being used in :mod:`torch.export` right now. + from_dumpable_context (callable, optional): An optional keyword argument to custom specify + how to convert the custom json dumpable representation of the context back to the + original context. This is used for json deserialization, which is being used in + :mod:`torch.export` right now. + """ + warnings.warn( + "torch.utils._cxx_pytree._register_pytree_node is deprecated. " + "Please use torch.utils._cxx_pytree.register_pytree_node instead.", + stacklevel=2, + ) + + _private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def _private_register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, +) -> None: + """This is an internal function that is used to register a pytree node type + for the C++ pytree only. End-users should use :func:`register_pytree_node` + instead. + """ + # TODO(XuehaiPan): remove this condition when we make Python pytree out-of-box support + # PyStructSequence types + if not optree.is_structseq_class(cls): + optree.register_pytree_node( + cls, + flatten_fn, + _reverse_args(unflatten_fn), + namespace="torch", + ) + + +def tree_flatten( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Tuple[List[Any], TreeSpec]: + """Flatten a pytree. + + See also :func:`tree_unflatten`. + + The flattening order (i.e., the order of elements in the output list) is deterministic, + corresponding to a left-to-right depth-first tree traversal. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> tree_flatten(tree) + ([1, 2, 3, 4, None, 5], PyTreeSpec({'a': *, 'b': (*, [*, *]), 'c': *, 'd': *}, NoneIsLeaf)) + >>> tree_flatten(1) + ([1], PyTreeSpec(*, NoneIsLeaf)) + >>> tree_flatten(None) + ([None], PyTreeSpec(*, NoneIsLeaf)) + + For unordered dictionaries, :class:`dict` and :class:`collections.defaultdict`, the order is + dependent on the **sorted** keys in the dictionary. Please use :class:`collections.OrderedDict` + if you want to keep the keys in the insertion order. + + >>> from collections import OrderedDict + >>> tree = OrderedDict([('b', (2, [3, 4])), ('a', 1), ('c', None), ('d', 5)]) + >>> tree_flatten(tree) + ([2, 3, 4, 1, None, 5], PyTreeSpec(OrderedDict([('b', (*, [*, *])), ('a', *), ('c', *), ('d', *)]), NoneIsLeaf)) + + Args: + tree (pytree): A pytree to flatten. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A pair ``(leaves, treespec)`` where the first element is a list of leaf values and the + second element is a treespec representing the structure of the pytree. + """ + return optree.tree_flatten( # type: ignore[return-value] + tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree: + """Reconstruct a pytree from the treespec and the leaves. + + The inverse of :func:`tree_flatten`. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> leaves, treespec = tree_flatten(tree) + >>> tree == tree_unflatten(leaves, treespec) + True + + Args: + leaves (iterable): The list of leaves to use for reconstruction. The list must match the + number of leaves of the treespec. + treespec (TreeSpec): The treespec to reconstruct. + + Returns: + The reconstructed pytree, containing the ``leaves`` placed in the structure described by + ``treespec``. + """ + if not isinstance(treespec, TreeSpec): + raise TypeError( + f"tree_unflatten(values, spec): Expected `spec` to be instance of " + f"TreeSpec but got item of type {type(treespec)}." + ) + return optree.tree_unflatten(treespec, leaves) # type: ignore[arg-type] + + +def tree_leaves( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Any]: + """Get the leaves of a pytree. + + See also :func:`tree_flatten`. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> tree_leaves(tree) + [1, 2, 3, 4, None, 5] + >>> tree_leaves(1) + [1] + >>> tree_leaves(None) + [None] + + Args: + tree (pytree): A pytree to flatten. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A list of leaf values. + """ + return optree.tree_leaves( + tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_structure( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> TreeSpec: + """Get the treespec for a pytree. + + See also :func:`tree_flatten`. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> tree_structure(tree) + PyTreeSpec({'a': *, 'b': (*, [*, *]), 'c': *, 'd': *}, NoneIsLeaf) + >>> tree_structure(1) + PyTreeSpec(*, NoneIsLeaf) + >>> tree_structure(None) + PyTreeSpec(*, NoneIsLeaf) + + Args: + tree (pytree): A pytree to flatten. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A treespec object representing the structure of the pytree. + """ + return optree.tree_structure( # type: ignore[return-value] + tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_map( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Map a multi-input function over pytree args to produce a new pytree. + + See also :func:`tree_map_`. + + >>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)}) + {'x': 8, 'y': (43, 65)} + >>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None}) + {'x': False, 'y': (False, False), 'z': True} + + If multiple inputs are given, the structure of the tree is taken from the first input; + subsequent inputs need only have ``tree`` as a prefix: + + >>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]]) + [[5, 7, 9], [6, 1, 2]] + + Args: + func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. + tree (pytree): A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A new pytree with the same structure as ``tree`` but with the value at each leaf given by + ``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs`` + is the tuple of values at corresponding nodes in ``rests``. + """ + return optree.tree_map( + func, + tree, + *rests, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_map_( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree. + + See also :func:`tree_map`. + + Args: + func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. + tree (pytree): A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + The original ``tree`` with the value at each leaf is given by the side-effect of function + ``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf + in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``. + """ + return optree.tree_map_( + func, + tree, + *rests, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +Type2 = Tuple[Type[T], Type[S]] +Type3 = Tuple[Type[T], Type[S], Type[U]] +if sys.version_info >= (3, 10): + TypeAny = Union[Type[Any], Tuple[Type[Any], ...], types.UnionType] +else: + TypeAny = Union[Type[Any], Tuple[Type[Any], ...]] + +Fn2 = Callable[[Union[T, S]], R] +Fn3 = Callable[[Union[T, S, U]], R] +Fn = Callable[[T], R] +FnAny = Callable[[Any], R] + +MapOnlyFn = Callable[[T], Callable[[Any], Any]] + + +# These specializations help with type inference on the lambda passed to this +# function +@overload +def map_only(__type_or_types_or_pred: Type2[T, S]) -> MapOnlyFn[Fn2[T, S, Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Type3[T, S, U]) -> MapOnlyFn[Fn3[T, S, U, Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Type[T]) -> MapOnlyFn[Fn[T, Any]]: + ... + + +# This specialization is needed for the implementations below that call +@overload +def map_only(__type_or_types_or_pred: TypeAny) -> MapOnlyFn[FnAny[Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Callable[[Any], bool]) -> MapOnlyFn[FnAny[Any]]: + ... + + +def map_only( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]] +) -> MapOnlyFn[FnAny[Any]]: + """ + Suppose you are writing a tree_map over tensors, leaving everything + else unchanged. Ordinarily you would have to write: + + def go(t): + if isinstance(t, Tensor): + return ... + else: + return t + + With this function, you only need to write: + + @map_only(Tensor) + def go(t): + return ... + + You can also directly use 'tree_map_only' + """ + if isinstance(__type_or_types_or_pred, (type, tuple)) or ( + sys.version_info >= (3, 10) + and isinstance(__type_or_types_or_pred, types.UnionType) + ): + + def pred(x: Any) -> bool: + return isinstance(x, __type_or_types_or_pred) # type: ignore[arg-type] + + elif callable(__type_or_types_or_pred): + pred = __type_or_types_or_pred # type: ignore[assignment] + else: + raise TypeError("Argument must be a type, a tuple of types, or a callable.") + + def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]: + @functools.wraps(func) + def wrapped(x: T) -> Any: + if pred(x): + return func(x) + return x + + return wrapped + + return wrapper + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type[T], + func: Fn[T, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type2[T, S], + func: Fn2[T, S, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type3[T, S, U], + func: Fn3[T, S, U, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Callable[[Any], bool], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +def tree_map_only( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + return tree_map(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf) + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type[T], + func: Fn[T, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type2[T, S], + func: Fn2[T, S, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type3[T, S, U], + func: Fn3[T, S, U, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Callable[[Any], bool], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +def tree_map_only_( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + return tree_map_(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf) + + +def tree_all( + pred: Callable[[Any], bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return all(map(pred, flat_args)) + + +def tree_any( + pred: Callable[[Any], bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return any(map(pred, flat_args)) + + +@overload +def tree_all_only( + __type_or_types: Type[T], + pred: Fn[T, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_all_only( + __type_or_types: Type2[T, S], + pred: Fn2[T, S, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_all_only( + __type_or_types: Type3[T, S, U], + pred: Fn3[T, S, U, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +def tree_all_only( + __type_or_types: TypeAny, + pred: FnAny[bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return all(pred(x) for x in flat_args if isinstance(x, __type_or_types)) + + +@overload +def tree_any_only( + __type_or_types: Type[T], + pred: Fn[T, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_any_only( + __type_or_types: Type2[T, S], + pred: Fn2[T, S, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_any_only( + __type_or_types: Type3[T, S, U], + pred: Fn3[T, S, U, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +def tree_any_only( + __type_or_types: TypeAny, + pred: FnAny[bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return any(pred(x) for x in flat_args if isinstance(x, __type_or_types)) + + +def broadcast_prefix( + prefix_tree: PyTree, + full_tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Any]: + """Return a list of broadcasted leaves in ``prefix_tree`` to match the number of leaves in ``full_tree``. + + If a ``prefix_tree`` is a prefix of a ``full_tree``, this means the ``full_tree`` can be + constructed by replacing the leaves of ``prefix_tree`` with appropriate **subtrees**. + + This function returns a list of leaves with the same size as ``full_tree``. The leaves are + replicated from ``prefix_tree``. The number of replicas is determined by the corresponding + subtree in ``full_tree``. + + >>> broadcast_prefix(1, [1, 2, 3]) + [1, 1, 1] + >>> broadcast_prefix([1, 2, 3], [1, 2, 3]) + [1, 2, 3] + >>> broadcast_prefix([1, 2, 3], [1, 2, 3, 4]) + Traceback (most recent call last): + ... + ValueError: list arity mismatch; expected: 3, got: 4; list: [1, 2, 3, 4]. + >>> broadcast_prefix([1, 2, 3], [1, 2, (3, 4)]) + [1, 2, 3, 3] + >>> broadcast_prefix([1, 2, 3], [1, 2, {'a': 3, 'b': 4, 'c': (None, 5)}]) + [1, 2, 3, 3, 3, 3] + + Args: + prefix_tree (pytree): A pytree with the same structure as a prefix of ``full_tree``. + full_tree (pytree): A pytree with the same structure as a suffix of ``prefix_tree``. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A list of leaves in ``prefix_tree`` broadcasted to match the number of leaves in ``full_tree``. + """ + return optree.broadcast_prefix( + prefix_tree, + full_tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +# Broadcasts a pytree to the provided TreeSpec and returns the flattened +# values. If this is not possible, then this function returns None. +# +# For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]), +# would return [0, 0]. This is useful for part of the vmap implementation: +# a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be +# broadcastable to the tree structure of `inputs` and we use +# _broadcast_to_and_flatten to check this. +def _broadcast_to_and_flatten( + tree: PyTree, + treespec: TreeSpec, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Optional[List[Any]]: + assert isinstance(treespec, TreeSpec) + full_tree = tree_unflatten([0] * treespec.num_leaves, treespec) + try: + return broadcast_prefix(tree, full_tree, is_leaf=is_leaf) + except ValueError: + return None + + +def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str: + """Serialize a treespec to a JSON string.""" + if not isinstance(treespec, TreeSpec): + raise TypeError( + f"treespec_dumps(spec): Expected `spec` to be instance of " + f"TreeSpec but got item of type {type(treespec)}." + ) + from ._pytree import ( + tree_structure as _tree_structure, + treespec_dumps as _treespec_dumps, + ) + + orig_treespec = _tree_structure(tree_unflatten([0] * treespec.num_leaves, treespec)) + return _treespec_dumps(orig_treespec, protocol=protocol) + + +def treespec_loads(serialized: str) -> TreeSpec: + """Deserialize a treespec from a JSON string.""" + from ._pytree import ( + tree_unflatten as _tree_unflatten, + treespec_loads as _treespec_loads, + ) + + orig_treespec = _treespec_loads(serialized) + dummy_tree = _tree_unflatten([0] * orig_treespec.num_leaves, orig_treespec) + treespec = tree_structure(dummy_tree) + return treespec + + +class _DummyLeaf: + def __repr__(self) -> str: + return "*" + + +def treespec_pprint(treespec: TreeSpec) -> str: + dummy_tree = tree_unflatten( + [_DummyLeaf() for _ in range(treespec.num_leaves)], + treespec, + ) + return repr(dummy_tree) + + +class LeafSpecMeta(type(TreeSpec)): # type: ignore[misc] + def __instancecheck__(self, instance: object) -> bool: + return isinstance(instance, TreeSpec) and instance.is_leaf() + + +class LeafSpec(TreeSpec, metaclass=LeafSpecMeta): + def __new__(cls) -> "LeafSpec": + return optree.treespec_leaf(none_is_leaf=True) # type: ignore[return-value] + + +def tree_flatten_with_path( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Tuple[List[Tuple[KeyPath, Any]], TreeSpec]: + """Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path. + + Args: + tree: a pytree to flatten. If it contains a custom type, that type must be + registered with an appropriate `tree_flatten_with_path_fn` when registered + with :func:`register_pytree_node`. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + Returns: + A tuple where the first element is a list of (key path, leaf) pairs, and the + second element is a :class:`TreeSpec` representing the structure of the flattened + tree. + """ + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def tree_leaves_with_path( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Tuple[KeyPath, Any]]: + """Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path. + + Args: + tree: a pytree. If it contains a custom type, that type must be + registered with an appropriate `tree_flatten_with_path_fn` when registered + with :func:`register_pytree_node`. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + Returns: + A list of (key path, leaf) pairs. + """ + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def tree_map_with_path( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Like :func:`tree_map`, but the provided callable takes an additional key path argument. + + Args: + func: A function that takes ``2 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. The first positional argument + to ``func`` is the key path of the leaf in question. The second + positional argument is the value of the leaf. + tree: A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests: A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns + A new pytree with the same structure as ``tree`` but with the value at each leaf given by + ``func(keypath, x, *xs)`` where ``keypath`` is the key path at the + corresponding leaf in ``tree``, ``x`` is the value at that leaf, and + ``xs`` is the tuple of values at corresponding nodes in ``rests``. + """ + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def keystr(kp: KeyPath) -> str: + """Given a key path, return a pretty-printed representation.""" + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def key_get(obj: Any, kp: KeyPath) -> Any: + """Given an object and a key path, return the value at the key path.""" + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") diff --git a/venv/lib/python3.10/site-packages/torch/utils/_device.py b/venv/lib/python3.10/site-packages/torch/utils/_device.py new file mode 100644 index 0000000000000000000000000000000000000000..d4909e54c267c8daac6dd37c52196c9870178140 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_device.py @@ -0,0 +1,91 @@ +from typing import Optional +import torch +from torch.overrides import TorchFunctionMode +from torch.utils._contextlib import context_decorator +import functools + +CURRENT_DEVICE: Optional[torch.device] = None + +@functools.lru_cache(1) +def _device_constructors(): + return { + # standard ones + torch.empty, + torch.empty_permuted, + torch.empty_strided, + torch.empty_quantized, + torch.ones, + torch.arange, + torch.bartlett_window, + torch.blackman_window, + torch.eye, + torch.fft.fftfreq, + torch.fft.rfftfreq, + torch.full, + torch.fill, + torch.hamming_window, + torch.hann_window, + torch.kaiser_window, + torch.linspace, + torch.logspace, + torch.nested.nested_tensor, + # This function doesn't actually take a device argument + # torch.normal, + torch.ones, + torch.rand, + torch.randn, + torch.randint, + torch.randperm, + torch.range, + torch.sparse_coo_tensor, + torch.sparse_compressed_tensor, + torch.sparse_csr_tensor, + torch.sparse_csc_tensor, + torch.sparse_bsr_tensor, + torch.sparse_bsc_tensor, + torch.tril_indices, + torch.triu_indices, + torch.vander, + torch.zeros, + torch.asarray, + # weird ones + torch.tensor, + torch.as_tensor, + torch.scalar_tensor, + torch.asarray, + } + +# NB: This is directly called from C++ in torch/csrc/Device.cpp +class DeviceContext(TorchFunctionMode): + def __init__(self, device): + self.device = torch.device(device) + + def __enter__(self): + global CURRENT_DEVICE + self.old_device = CURRENT_DEVICE + CURRENT_DEVICE = self.device + return super().__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + global CURRENT_DEVICE + CURRENT_DEVICE = self.old_device + return super().__exit__(exc_type, exc_val, exc_tb) + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + if func in _device_constructors() and kwargs.get('device') is None: + kwargs['device'] = self.device + return func(*args, **kwargs) + +# NB: This is directly called from C++ in torch/csrc/Device.cpp +def device_decorator(device, func): + return context_decorator(lambda: device, func) + +def set_device(device): + """ + Set the default device inside of the wrapped function by decorating it with this function. + + If you would like to use this as a context manager, use device as a + context manager directly, e.g., ``with torch.device(device)``. + """ + return lambda func: device_decorator(torch.device(device), func) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_foreach_utils.py b/venv/lib/python3.10/site-packages/torch/utils/_foreach_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9a6a15cada17805f12f36ea8c932a33f4606b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_foreach_utils.py @@ -0,0 +1,47 @@ +from typing import List, Dict, Tuple, Optional + +import torch +from torch import Tensor +from torch.autograd.grad_mode import no_grad +from typing_extensions import TypeAlias + +def _get_foreach_kernels_supported_devices() -> List[str]: + r"""Return the device type list that supports foreach kernels.""" + return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()] + +def _get_fused_kernels_supported_devices() -> List[str]: + r"""Return the device type list that supports fused kernels in optimizer.""" + return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()] + +TensorListList: TypeAlias = List[List[Optional[Tensor]]] +Indices: TypeAlias = List[int] + +# This util function splits tensors into groups by device and dtype, which is useful before sending +# tensors off to a foreach implementation, which requires tensors to be on one device and dtype. +# If tensorlistlist contains more than one tensorlist, the following assumptions are made BUT NOT verified: +# - tensorlists CAN be None +# - all tensors in the first specified list cannot be None +# - given an index i, all specified tensorlist[i]s match in dtype and device +# with_indices (bool, optional): whether to track previous indices as the last list per dictionary entry. +# It comes in handy if there are Nones or literals in the tensorlists that are getting scattered out. +# Whereas mutating a tensor in the resulting split-up tensorlists WILL propagate changes back to the +# original input tensorlists, changing up Nones/literals WILL NOT propagate, and manual propagation +# may be necessary. Check out torch/optim/sgd.py for an example. +@no_grad() +def _group_tensors_by_device_and_dtype( + tensorlistlist: TensorListList, + with_indices: bool = False, +) -> Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]]: + return { + (device, getattr(torch, str_dtype)): value + for (device, str_dtype), value in + torch._C._group_tensors_by_device_and_dtype(tensorlistlist, with_indices).items() + } + + +def _device_has_foreach_support(device: torch.device) -> bool: + return device.type in (_get_foreach_kernels_supported_devices() + ["cpu"]) and not torch.jit.is_scripting() + + +def _has_foreach_support(tensors: List[Tensor], device: torch.device) -> bool: + return _device_has_foreach_support(device) and all(t is None or type(t) == torch.Tensor for t in tensors) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_freeze.py b/venv/lib/python3.10/site-packages/torch/utils/_freeze.py new file mode 100644 index 0000000000000000000000000000000000000000..c7be90a4baee6d0f8e70d6d12a197eb160146975 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_freeze.py @@ -0,0 +1,289 @@ +""" +Freeze Python packages. + +Freezing makes it possible to ship arbitrary Python modules as part of a C++ +library. The Python source of the module is compiled to bytecode and written +to `.c` files, to be imported by Python's built-in FrozenImporter. + +In a normal Python installation, FrozenImporter is only used to bootstrap the +initialization of the import machinery. Python's importers are defined in +Python (see `_bootstrap.py` and `_bootstrap_external.py`) but need to be +retrieved before any importers are available. Freezing the module bytecode +resolves this circular dependency. + +This script will freeze the Python standard library. It produces two things: +- Bytecode files: A set of `.c` that define C variables containing Python bytecode. +- Main file: A `main.c` file listing all of these modules in the right form to be + consumed by FrozenImporter. + +The library that wishes to these modules make them available to the local +Python instance by extending `PyImport_FrozenModules` appropriately (see +https://docs.python.org/3/c-api/import.html#c.PyImport_FrozenModules). +""" + +import argparse +import functools +import itertools +import marshal +import os +import types +from dataclasses import dataclass +from pathlib import Path +from typing import List + + +PATH_MARKER = "" +MAIN_INCLUDES = """#include + +""" + +MAIN_PREFIX_TEMPLATE = """ +// Compiled standard library modules. These should be appended to the existing +// `PyImport_FrozenModules` that ships with CPython. +struct _frozen {}[] = {{ +""" + +FAKE_PREFIX = MAIN_PREFIX_TEMPLATE.format("_PyImport_FrozenModules") + +MAIN_SUFFIX = """\ + {0, 0, 0} /* sentinel */ +}; +""" + +# Exclude some standard library modules to: +# 1. Slim down the final frozen lib. +# 2. Remove functionality we don't want to support. +DENY_LIST = [ + # Interface to unix databases + "dbm", + # ncurses bindings (terminal interfaces) + "curses", + # Tcl/Tk GUI + "tkinter", + "tkinter", + # Tests for the standard library + "test", + "tests", + "idle_test", + "__phello__.foo.py", + # importlib frozen modules. These are already baked into CPython. + "_bootstrap.py", + "_bootstrap_external.py", +] + +NUM_BYTECODE_FILES = 5 + + +def indent_msg(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + args[0].indent += 1 + ret = fn(*args, **kwargs) + args[0].indent -= 1 + return ret + + return wrapper + + +@dataclass +class FrozenModule: + # The fully qualified module name, e.g. 'foo.bar.baz' + module_name: str + # The name of the C variable that holds the bytecode, e.g. 'M_foo__bar__baz' + c_name: str + # The size of the C variable. Negative if this module is a package. + size: int + # The frozen bytecode + bytecode: bytes + + +class Freezer: + def __init__(self, verbose: bool): + self.frozen_modules: List[FrozenModule] = [] + self.indent: int = 0 + self.verbose: bool = verbose + + def msg(self, path: Path, code: str): + if not self.verbose: + return + # P: package dir + # F: python file + # S: skipped (not a package dir) + # X: skipped (deny-listed) + # N: skipped (not a python file) + for i in range(self.indent): + print(" ", end="") + print(f"{code} {path}") + + def write_bytecode(self, install_root): + """ + Write the `.c` files containing the frozen bytecode. + + Shared frozen modules evenly across the files. + """ + bytecode_file_names = [f"bytecode_{i}.c" for i in range(NUM_BYTECODE_FILES)] + bytecode_files = [ + open(os.path.join(install_root, name), "w") for name in bytecode_file_names + ] + it = itertools.cycle(bytecode_files) + for m in self.frozen_modules: + self.write_frozen(m, next(it)) + + for f in bytecode_files: + f.close() + + def write_main(self, install_root, oss, symbol_name): + """Write the `main.c` file containing a table enumerating all the frozen modules.""" + with open(os.path.join(install_root, "main.c"), "w") as outfp: + outfp.write(MAIN_INCLUDES) + for m in self.frozen_modules: + outfp.write(f"extern unsigned char {m.c_name}[];\n") + + outfp.write(MAIN_PREFIX_TEMPLATE.format(symbol_name)) + for m in self.frozen_modules: + outfp.write(f'\t{{"{m.module_name}", {m.c_name}, {m.size}}},\n') + outfp.write(MAIN_SUFFIX) + if oss: + outfp.write(FAKE_PREFIX) + outfp.write(MAIN_SUFFIX) + + def write_frozen(self, m: FrozenModule, outfp): + """Write a single frozen module's bytecode out to a C variable.""" + outfp.write(f"unsigned char {m.c_name}[] = {{") + for i in range(0, len(m.bytecode), 16): + outfp.write("\n\t") + for c in bytes(m.bytecode[i : i + 16]): + outfp.write("%d," % c) + outfp.write("\n};\n") + + def compile_path(self, path: Path, top_package_path: Path): + """Entry point for compiling a Path object.""" + if path.is_dir(): + self.compile_package(path, top_package_path) + else: + self.compile_file(path, top_package_path) + + @indent_msg + def compile_package(self, path: Path, top_package_path: Path): + """Compile all the files within a Python package dir.""" + assert path.is_dir() + if path.name in DENY_LIST: + self.msg(path, "X") + return + + # Python packages are directories that have __init__.py in them. + is_package_dir = any(child.name == "__init__.py" for child in path.iterdir()) + if not is_package_dir: + self.msg(path, "S") + return + + self.msg(path, "P") + # Recursively compile all children in this dir + for child in path.iterdir(): + self.compile_path(child, top_package_path) + + def get_module_qualname(self, file_path: Path, top_package_path: Path) -> List[str]: + # `path` looks like 'Lib/foo/bar/baz.py' + + # chop off 'Lib/' to get something that represents a Python module hierarchy. + # e.g. 'foo/bar/baz.py', which maps to 'foo.bar.baz' + normalized_path = file_path.relative_to(top_package_path.parent) + + if normalized_path.name == "__init__.py": + # Special handling for `__init__.py`. In this case, this file + # specifies that the containing directory should be treated as a package. + # For 'foo/bar/baz/__init__.py': + # - The module name is 'baz' + module_basename = normalized_path.parent.name + # - The parent is foo.bar (need to shave off the 'baz') + module_parent = normalized_path.parent.parent.parts + else: + module_basename = normalized_path.stem + module_parent = normalized_path.parent.parts + return list(module_parent) + [module_basename] + + def compile_string(self, file_content: str) -> types.CodeType: + # instead of passing in the real build time path to 'compile', we + # pass in a marker instead. This prevents the build time path being + # leaked to runtime. That path may not be available at runtime. + # Setting the path to a mark make sure it's a hard error rather + # than a flaky error when inspect module tries to retrieve python source + # code during torchscripting. + path_marker = PATH_MARKER + return compile(file_content, path_marker, "exec") + + @indent_msg + def compile_file(self, path: Path, top_package_path: Path): + """ + Compile a Python source file to frozen bytecode. + + Append the result to `self.frozen_modules`. + """ + assert path.is_file() + if path.suffix != ".py": + self.msg(path, "N") + return + + if path.name in DENY_LIST: + self.msg(path, "X") + return + + self.msg(path, "F") + module_qualname = self.get_module_qualname(path, top_package_path) + module_mangled_name = "__".join(module_qualname) + c_name = "M_" + module_mangled_name + + with open(path) as src_file: + co = self.compile_string(src_file.read()) + + bytecode = marshal.dumps(co) + size = len(bytecode) + if path.name == "__init__.py": + # Python packages are signified by negative size. + size = -size + self.frozen_modules.append( + FrozenModule(".".join(module_qualname), c_name, size, bytecode) + ) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Compile py source") + parser.add_argument("paths", nargs="*", help="Paths to freeze.") + parser.add_argument("--verbose", action="store_true", help="Print debug logs") + parser.add_argument( + "--install-dir", "--install_dir", help="Root directory for all output files" + ) + parser.add_argument( + "--oss", + action="store_true", + help="If it's OSS build, add a fake _PyImport_FrozenModules", + ) + parser.add_argument( + "--symbol-name", + "--symbol_name", + help="The name of the frozen module array symbol to generate", + default="_PyImport_FrozenModules_torch", + ) + + args = parser.parse_args() + + f = Freezer(args.verbose) + + for p in args.paths: + path = Path(p) + if path.is_dir() and not Path.exists(path / "__init__.py"): + # this 'top level path p' is a standard directory containing modules, + # not a module itself + # each 'mod' could be a dir containing __init__.py or .py file + # NB: sorted to make sure this is deterministic + for mod in sorted(path.glob("*")): + f.compile_path(mod, mod) + else: + f.compile_path(path, path) + + f.write_bytecode(args.install_dir) + f.write_main(args.install_dir, args.oss, args.symbol_name) + + +if __name__ == "__main__": + main() # pragma: no cover diff --git a/venv/lib/python3.10/site-packages/torch/utils/_import_utils.py b/venv/lib/python3.10/site-packages/torch/utils/_import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b7756a6fa62f94d76ad0149a93511aa56c4468d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_import_utils.py @@ -0,0 +1,42 @@ +import functools +import importlib.util + +import torch + + +def _check_module_exists(name: str) -> bool: + r"""Returns if a top-level module with :attr:`name` exists *without** + importing it. This is generally safer than try-catch block around a + `import X`. It avoids third party libraries breaking assumptions of some of + our tests, e.g., setting multiprocessing start method when imported + (see librosa/#747, torchvision/#544). + """ + try: + spec = importlib.util.find_spec(name) + return spec is not None + except ImportError: + return False + + +@functools.lru_cache +def dill_available(): + return ( + _check_module_exists("dill") + # dill fails to import under torchdeploy + and not torch._running_with_deploy() + ) + + +@functools.lru_cache +def import_dill(): + if not dill_available(): + return None + + import dill + + # XXX: By default, dill writes the Pickler dispatch table to inject its + # own logic there. This globally affects the behavior of the standard library + # pickler for any user who transitively depends on this module! + # Undo this extension to avoid altering the behavior of the pickler globally. + dill.extend(use_dill=False) + return dill diff --git a/venv/lib/python3.10/site-packages/torch/utils/_mode_utils.py b/venv/lib/python3.10/site-packages/torch/utils/_mode_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e3cbb5e9403cb3dda102a85665da15a4f10482 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_mode_utils.py @@ -0,0 +1,10 @@ +import torch +from typing import TypeVar + +T = TypeVar('T') + +# returns if all are the same mode +def all_same_mode(modes): + return all(tuple(mode == modes[0] for mode in modes)) + +no_dispatch = torch._C._DisableTorchDispatch diff --git a/venv/lib/python3.10/site-packages/torch/utils/_python_dispatch.py b/venv/lib/python3.10/site-packages/torch/utils/_python_dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..4c774f2d0e16dfb1de6813a6abcdd30d4e251d33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_python_dispatch.py @@ -0,0 +1,495 @@ +import contextlib +from typing import Optional, Union, List, Set, Dict, Any + +import warnings +from dataclasses import dataclass +import torch +import torchgen +from torch._C import _len_torch_dispatch_stack, _get_dispatch_stack_at, \ + _pop_torch_dispatch_stack, _push_on_torch_dispatch_stack, DispatchKey + + +# TODO: Limitations and things about enable_torch_dispatch_mode we should fix before exposing it: +# - We need a better user-facing api for _DisableTorchDispatch that +# is able to selectively disable __torch_dispatch__ of a particular class. +# - It doesn't work with the tensor constructors (torch.tensor, torch.Tensor) +# - Better name (see https://github.com/pytorch/pytorch/pull/63496#discussion_r694091694) + +class TorchDispatchMode: + """ + A ``TorchDispatchMode`` allows you to override the meaning of all + ``__torch_dispatch__`` overrideable functions within a dynamic scope, + without having to actually create a tensor subclass or manually + monkey-patch functions in the PyTorch API. Some common situations + where you should use a mode: + + * You want to override the meaning of factory functions, or other + functions that do not otherwise take a tensor as an argument + (these cannot be overridden with tensor subclasses). + + * You want to override the behavior of all functions without needing + to wrap your inputs in tensor subclasses; e.g., if you are just + interested in logging intermediate computations. + + * You want to control the order of execution of various tensor + subclasses explicitly, rather than implicitly via the return of + ``NotImplemented``. + + Independent subclasses of :class:`TorchDispatchMode` are compositional: + modes can be pushed onto a stack using ``with MyMode():``. + When you call functions in the PyTorch API inside your + ``__torch_dispatch__`` implementation, by default, they will forward on to + the next mode on the mode stack. If you want recursively call back into + your current ``__torch_dispatch__`` implementation, either explicitly + invoke ``self.__torch_dispatch__(...)``, or use the context manager + ``__torch_dispatch__(self)`` to make PyTorch + API self-referential (beware of infinite loops, in this case!) + """ + + def __init__(self, _dispatch_key=None): + if _dispatch_key is not None: + assert isinstance(_dispatch_key, torch._C.DispatchKey) + self.__dict__['_dispatch_key'] = _dispatch_key + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + raise NotImplementedError() + + def __enter__(self): + _push_mode(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + mb_dk_or_mode_key = self.__dict__.get("_dispatch_key", None) + if mb_dk_or_mode_key is None: + # Today, mode keys are not used at all in the per-dispatch-key-mode logic (for pre-dispatch) + # We should probably revisit this. + mb_dk_or_mode_key = self.__dict__.get("_mode_key", None) + _pop_mode(mb_dk_or_mode_key) + + @classmethod + def push(cls, *args, **kwargs): + warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`") + instance = cls(*args, **kwargs) + return instance + +def _get_current_dispatch_mode(): + stack_len = _len_torch_dispatch_stack() + # Return a user mode on the stack if there are any + if stack_len > 0: + return _get_dispatch_stack_at(stack_len - 1) + return None + + +def _detect_functional_mode(): + from torch._ops import _get_dispatch_mode_pre_dispatch + pre_dispatch_functional_mode = _get_dispatch_mode_pre_dispatch(torch._C._TorchDispatchModeKey.FUNCTIONAL) + post_dispatch_functional_mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL) + + assert (pre_dispatch_functional_mode is None) or (post_dispatch_functional_mode is None) + + if pre_dispatch_functional_mode is None: + return post_dispatch_functional_mode + + return pre_dispatch_functional_mode + +def _unset_infra_mode(key): + from torch._ops import unset_mode_pre_dispatch, _get_dispatch_mode_pre_dispatch + pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(key) + post_dispatch_mode = torch._C._get_dispatch_mode(key) + if pre_dispatch_mode and post_dispatch_mode: + raise AssertionError("Can't have active infra mode on both pre and post dispatch mode stack") + + if pre_dispatch_mode: + mode = unset_mode_pre_dispatch(key) + return mode + if post_dispatch_mode: + return torch._C._unset_dispatch_mode(key) + + +def _disable_infra_mode(key): + assert key in (torch._C._TorchDispatchModeKey.FUNCTIONAL, torch._C._TorchDispatchModeKey.PROXY) + mode_unset = _unset_infra_mode(key) + try: + yield mode_unset + finally: + if mode_unset is not None: + _push_mode(mode_unset) + + +def _get_current_dispatch_mode_stack(): + stack_len = _len_torch_dispatch_stack() + return [_get_dispatch_stack_at(i) for i in range(stack_len)] + + +def _push_mode(mode): + k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None + assert k is None or k == torch._C.DispatchKey.PreDispatch + if k is None: + _push_on_torch_dispatch_stack(mode) + return + + from torch._ops import get_cached_ops, _set_mode_pre_dispatch + # See Note [Not Caching Per-Dispatch-Key Mode Handlers] + # Clear the cache of every op that has been used so far, for this particular key. + ks = torch._C._functionality_to_backend_keys(k) + for op in get_cached_ops(): + for key in ks: + op._uncache_dispatch(key) + _set_mode_pre_dispatch(mode) + + +def _pop_mode(k: Optional[Union[DispatchKey, torch._C._TorchDispatchModeKey]] = None): + if k == torch._C.DispatchKey.PreDispatch: # type: ignore[attr-defined] + from torch._ops import _pop_mode_from_pre_dispatch + return _pop_mode_from_pre_dispatch() + + if k is None or isinstance(k, torch._C._TorchDispatchModeKey): + return _pop_torch_dispatch_stack(k) + +@contextlib.contextmanager +def _pop_mode_temporarily(k: Optional[DispatchKey] = None): + old = _pop_mode(k) + try: + yield old + finally: + _push_mode(old) + +@contextlib.contextmanager +def _disable_current_modes(): + from torch._ops import _len_torch_dispatch_stack_pre_dispatch, _pop_mode_from_pre_dispatch + from torch._subclasses.functional_tensor import FunctionalTensorMode + from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode + mode_len_pre_dispatch = _len_torch_dispatch_stack_pre_dispatch() + old_pre_dispatch_modes = [_pop_mode_from_pre_dispatch() for _ in range(mode_len_pre_dispatch)] + + has_proxy_mode_in_pre_dispatch = False + has_functional_mode_in_pre_dispatch = False + + for i in old_pre_dispatch_modes: + if isinstance(i, ProxyTorchDispatchMode): + has_proxy_mode_in_pre_dispatch = True + if isinstance(i, FunctionalTensorMode): + has_functional_mode_in_pre_dispatch = True + + mode_len = _len_torch_dispatch_stack() + old_modes = [_pop_mode() for _ in range(mode_len)] + + for old in old_modes: + if isinstance(old, FunctionalTensorMode) and has_functional_mode_in_pre_dispatch: + raise AssertionError("Can't have FunctionalMode available both in PreDispatch and Python Key") + if isinstance(old, ProxyTorchDispatchMode) and has_proxy_mode_in_pre_dispatch: + raise AssertionError("Can't have ProxyTorchDispatchMode available both in PreDispatch and Python Key") + + # Manually disable proxy and fake modes, if any are active + try: + yield old_pre_dispatch_modes + old_modes + finally: + for mode in reversed(old_modes): + _push_mode(mode) + for mode in reversed(old_pre_dispatch_modes): + _push_mode(mode) + + +class BaseTorchDispatchMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + return func(*args, **kwargs) + +def is_traceable_wrapper_subclass(t): + """ + Returns whether or not a tensor subclass that implements __torch_dispatch__ + is 'traceable' with torch.compile. + In order for a tensor subclass to support TorchDispatchMode-style tracing in PT2, + It must implement two magic methods: __tensor_flatten__ and __tensor_unflatten__. + It is also expected to obey some restrictions around traceability and aliasing: + * The subclass's __torch_dispatch__() implementation should desugar into pytorch + dispatcher operations that can be traced into a graph. + * The subclass should use return_and_correct_aliasing(). This is needed today to make + sure that torch.compile does the right thing in a few cases around input mutation + and output aliasing. + + Expected magic method signatures: + attrs, ctx = t.__tensor_flatten__() + attrs: list of attribute name strings for inner tensors + ctx: dict containing any other subclass-specific metadata needed for unflattening + + t = MySubClass.__tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride) + inner_tensors: dict mapping attribute name -> tensor for each inner tensor + ctx: dict with subclass metadata in the form that __tensor_flatten__() produces + outer_size: expected (possibly symbolic) size that the returned subclass + instance should have. Note that this arg is useful for certain subclasses + that require the shape info to be constructed. In most cases, this arg can be + safely ignored. + outer_stride: expected (possibly symbolic) stride that the returned subclass + instance should have. Note that this arg is useful for certain subclasses + that require the stride info to be constructed. In most cases, this arg can be + safely ignored. + """ + is_subclass = isinstance(t, torch.Tensor) and type(t) != torch.Tensor + return is_subclass and hasattr(t, "__tensor_flatten__") and hasattr(t, "__tensor_unflatten__") + +def transform_subclass(t, callback, outer_size=None, outer_stride=None): + """ + Given a traceable, wrapper tensor subclass ``t`` that implements + ``__torch_dispatch__`` and holds some inner tensors, + and a callback of type ``Callable[[str, torch.Tensor], torch.Tensor]``, + `transform_subclass` will construct a fresh instance of the wrapper tensor subclass. + It will do so by grabbing each inner tensor attribute from the wrapper, + passing them into ``callback`` to get a transformed tensor, + and putting each transformed tensor into the fresh tensor subclass instance. + + Note: this function will not handle ensuring that the fresh subclass + gets the same (autograd, and aliasing) metadata as the original tensor. + This is generally handled in other subsystems like AOTAutograd. + """ + outer_size = outer_size if outer_size is not None else t.size() + outer_stride = outer_stride if outer_stride is not None else t.stride() + + attrs, ctx = t.__tensor_flatten__() + transformed_tensors_dict = {} + for attr in attrs: + transformed_tensors_dict[attr] = callback(attr, getattr(t, attr)) + sub = type(t).__tensor_unflatten__( + transformed_tensors_dict, ctx, outer_size, outer_stride + ) + + # NB: Purposefully guard here to simplify the inner / outer symbols. + # Using sym_eq() for symbolic comparison can result in an expression that's too + # difficult to guard on, so we use == here. + assert sub.shape == outer_size, \ + f"Expected return value from {type(t)}__tensor_unflatten__() to have " \ + f"shape equal to {outer_size}, but got: {sub.shape}" + assert sub.stride() == outer_stride, \ + f"Expected return value from {type(t)}__tensor_unflatten__() to have " \ + f"stride equal to {outer_stride}, but got: {sub.stride()}" + + return sub + +def _correct_storage_aliasing(func, schema_info, args, outs): + """ + Given: an OpOverload, a SchemaInfo (cached information from torchgen about schema), + and the inputs/outputs to the OpOverload, + this function checks to see if func is a view operator + (by checking if any of the outputs in the op's schema + are immutable aliases of inputs). + If so, this function manually aliases the storage of the output tensor + with its corresponding input tensor alias. + It does this by unsafely overwriting the storage field of the output tensor + to be the same storage as the input. + """ + assert isinstance(func, torch._ops.OpOverload) + assert isinstance(args, tuple) + assert isinstance(outs, (list, tuple)) + flat_outs = torch.utils._pytree.tree_leaves(outs) + + def alias_non_inplace_storage(arg, ret): + # This is hopefully a reasonable assert: + # subclasses that rely on this API for output aliasing + # should always return wrapper tensor subclasses for us to manually alias. + # in theory if a subclass that needs this API wants to sometimes return + # plain tensors, we could remove the assert and just not perform the aliasing, + # but it seems safer to learn more about this case first. + if is_traceable_wrapper_subclass(arg) or is_traceable_wrapper_subclass(ret): + ret_list = ret if isinstance(ret, list) else [ret] + for r in ret_list: + assert type(arg) == type(r), f"""Called {str(func)} with input of type {type(arg)} +and output of type {type(ret)}. But expected types to match.""" + # Need to run under no_dispatch, because we explicitly do **not** + # want our subclass to intercept the set_() call. + # instead, our subclass should directly have its storage swapped out. + with torch.utils._mode_utils.no_dispatch(): + # See Note: [Fake Tensor Dispatch Keys] + # we're borrowing the way it modifies dispatch key TLS. + meta_in_tls = torch._C._meta_in_tls_dispatch_include() + torch._C._set_meta_in_tls_dispatch_include(True) + try: + # directly calling this overload, and passing ret.shape, because we **explicitly** + # don't want to reset the sizes on ret, if the storage implies a size change. + # Why? + # The purpose of this API is *not* to change the size/strides of our output- we assume it's already correct. + # We just want to "fix up" the storage aliasing, without modifying or output's metadata. + # Example: out = inp.expand(inp.shape[0], inp.shape[0]) + # This requires swapping the storage of out to be the same as inp, + # but we do *not* want it to change the sizes/strides that were compute for out. + + if isinstance(ret, list): + for r in ret: + torch.ops.aten.set_.source_Storage_storage_offset( + r, arg.untyped_storage(), r.storage_offset(), r.shape, r.stride()) + else: + assert isinstance(ret, torch.Tensor), f"type: {type(ret)}" + torch.ops.aten.set_.source_Storage_storage_offset( + ret, arg.untyped_storage(), ret.storage_offset(), ret.shape, ret.stride() + ) + finally: + torch._C._set_meta_in_tls_dispatch_include(meta_in_tls) + + def is_read_only_alias_match(arg, ret): + shared_aliases = arg.alias_set & ret.alias_set + return len(shared_aliases) > 0 and not arg.is_write + + num_args = len(func._schema.arguments) + num_returns = len(func._schema.returns) + for arg_idx in range(num_args): + for return_idx in range(num_returns): + if is_read_only_alias_match(schema_info.args[arg_idx], schema_info.outs[return_idx]): + alias_non_inplace_storage(args[arg_idx], outs[return_idx]) + +# This abstracts over the fact that in return_and_correct_aliasing, +# we sometimes use torchgen schema parsing (for aten ops, since torchscript's schema parsing is sometimes buggy), +# and sometimes use torchscript schema parsing (for custom ops, for which torchgen parsing is untested). +@dataclass +class AliasInfo: + alias_set: Set[str] + is_write: bool + name: Optional[str] + +@dataclass +class SchemaInfo: + args: List[AliasInfo] + outs: List[AliasInfo] + +# Can't import torch._ops.OpOverload due to circular reference +parsed_schema_map: Dict[Any, SchemaInfo] = {} + +# Given an OpOverload, returns schema information on it. +# This is cached for efficiency, since it can involve running torchgen +def get_alias_info(func) -> SchemaInfo: + if func in parsed_schema_map: + return parsed_schema_map[func] + # For ATen ops: use torchgen (since torchscript parser doesn't handle alias annotations + # properly for some ops that output tensorlists) + if func.namespace == "aten": + torchgen_schema_str = str(func._schema) + assert torchgen_schema_str.startswith("aten::") + # remove the aten:: namespace, which is added by the torchscript parser, + # and torchgen doesn't know how to handle + torchgen_schema_str = torchgen_schema_str[6:] + import re + # the torchscript parser ends up converting int[2]=1 into int[2]=[1, 1], + # which torchgen chokes on. + torchgen_schema_str = re.sub(r'=\[[0, ]+\]', '=0', torchgen_schema_str) + torchgen_schema_str = re.sub(r'=\[[1, ]+\]', '=1', torchgen_schema_str) + # for aten::rot90 + torchgen_schema_str = torchgen_schema_str.replace("=[0, 1]", "=[0,1]") + torchgen_schema = torchgen.model.FunctionSchema.parse(torchgen_schema_str) + arg_schemas = [AliasInfo( + alias_set=set() if a.annotation is None else set(a.annotation.alias_set), + is_write=a.annotation is not None and a.annotation.is_write, + name=a.name, + ) for a in torchgen_schema.arguments.flat_all] + out_schemas = [AliasInfo( + alias_set=set() if a.annotation is None else set(a.annotation.alias_set), + is_write=a.annotation is not None and a.annotation.is_write, + name=a.name, + ) for a in torchgen_schema.returns] + else: + # For non-aten ops, torchgen is untested so we rely on torchscript schema parsing + arg_schemas = [AliasInfo( + alias_set=set() if a.alias_info is None else set(a.alias_info.before_set), + is_write=a.alias_info is not None and a.alias_info.is_write, + name=a.name, + ) for a in func._schema.arguments] + out_schemas = [AliasInfo( + alias_set=set() if a.alias_info is None else set(a.alias_info.before_set), + is_write=a.alias_info is not None and a.alias_info.is_write, + name=a.name, + ) for a in func._schema.returns] + schema_info = SchemaInfo(args=arg_schemas, outs=out_schemas) + parsed_schema_map[func] = schema_info + return schema_info + +def return_and_correct_aliasing(func, args, kwargs, out): + """ + This function should be used by wrapper tensor ``__torch_dispatch__`` subclasses + that would like to work with torch.compile. It ensures that the subclass + properly implements the aliasing behavior of every op, + which is needed for correctness in AOTAutograd. + This function will handle: + + * When we see a view op, we will alias the storages of any + input and output tensor subclasses + + * When we see an inplace or out= op, we will directly + return the corresponding input tensor, instead of returning + a (potentially) fresh output tensor. + """ + + # Caching here because torchgen parsing is definitely not fast, and this function is called + # once for every op in the graph during functionalization. + schema_info = get_alias_info(func) + + def get_write_alias(x): + if len(x.alias_set) == 0: + return None + alias_set = list(x.alias_set) + # torchscript allows for complicated alias sets, but our dispatcher ops only really involve simple aliasing + assert len(alias_set) == 1 + if x.is_write: + return alias_set[0] + return None + + def get_arg_from_alias(output_alias, schema_info, args, kwargs): + new_args, new_kwargs = torch.fx.operator_schemas.normalize_function(func, args=args, kwargs=kwargs) + + arg_indices = [ + i for i, a in enumerate(schema_info.args) + if output_alias in a.alias_set + ] + # For any dispatcher op with an output alias, we expect it to map to exactly one alias in the schema's input arguments. + assert len(arg_indices) == 1 + idx = arg_indices[0] + arg_info = schema_info.args[idx] + if arg_info.name is not None and arg_info.name in new_kwargs: + return new_kwargs[arg_info.name] + return new_args[idx] + + # Fix up the storages of any outs so that they point to the same storage as the input, + # if func is a view op. + _correct_storage_aliasing(func, schema_info, args, (out,) if not isinstance(out, tuple) else out) + + # For inplace_view ops in particular, we'll try hard to make sure that the wrapper subclass's + # metadata is set correctly. + if torch.Tag.inplace_view in func.tags: + # no_dispatch() to make sure that we secretly change the metadata on the wrapper, + # but don't end up dispatching the op anywhere else. + mutated_args = [x for i, x in enumerate(args) if get_write_alias(schema_info.args[i]) is not None] + # Assumption: we have a very small number of inplace_view ops that follow a strict schema: + # there is only a single argument that gets its metadata mutated. + assert len(mutated_args) == 1 + # This check exists because we generally *do* want to update the metadata of any wrapper subclasses, + # but FunctionalTensor is special: it overrides all size/stride calls to plumb to the inner tensor. + # so we don't actually need to update the metadata (and attempting to do so causes errors) + from torch._subclasses.functional_tensor import FunctionalTensor + if not isinstance(mutated_args[0], FunctionalTensor): + with torch.utils._mode_utils.no_dispatch(): + # See Note: [Fake Tensor Dispatch Keys] + # we're borrowing the way it modifies dispatch key TLS. + meta_in_tls = torch._C._meta_in_tls_dispatch_include() + torch._C._set_meta_in_tls_dispatch_include(True) + try: + func(*args, **kwargs) + finally: + torch._C._set_meta_in_tls_dispatch_include(meta_in_tls) + + # Next: we need to make sure to return inputs directly, if the output is a mutable alias (e.g. add_()). + + # simple case: none of our outputs have mutable aliases, so we can return the output as-is + if not any(get_write_alias(r) is not None for r in schema_info.outs): + return out + + # simplifying assumption: we don't have **any** ops with return types like "-> (Tensor(a!), Tensor)" + if not all(get_write_alias(r) is not None for r in schema_info.outs): + raise RuntimeError("Unsupported schema: " + str(func._schema)) + + if len(func._schema.returns) == 1: + return get_arg_from_alias(get_write_alias(schema_info.outs[0]), schema_info, args, kwargs) + + # In the multi-return case, all aten ops return a tuple / list, so cast accordingly. + outs_to_return = type(out)([ + get_arg_from_alias(get_write_alias(schema_info.outs[i]), schema_info, args, kwargs) + if get_write_alias(r) is not None else o + for ((i, r), o) in zip(enumerate(schema_info.outs), out) + ]) + return outs_to_return diff --git a/venv/lib/python3.10/site-packages/torch/utils/_pytree.py b/venv/lib/python3.10/site-packages/torch/utils/_pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..861e8875d4bdc44c06c62963b3bd172d62d77b69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_pytree.py @@ -0,0 +1,1550 @@ +""" +Contains utility functions for working with nested python data structures. + +A *pytree* is Python nested data structure. It is a tree in the sense that +nodes are Python collections (e.g., list, tuple, dict) and the leaves are +Python values. Furthermore, a pytree should not contain reference cycles. + +pytrees are useful for working with nested collections of Tensors. For example, +one can use `tree_map` to map a function over all Tensors inside some nested +collection of Tensors and `tree_leaves` to get a flat list of all Tensors +inside some nested collection. pytrees are helpful for implementing nested +collection support for PyTorch APIs. + +This pytree implementation is not very performant due to Python overhead +To improve the performance we can move parts of the implementation to C++. +""" + +import dataclasses +import importlib +import json +import sys +import threading +import types +import warnings +from collections import defaultdict, deque, namedtuple, OrderedDict +from typing import ( + Any, + Callable, + cast, + DefaultDict, + Deque, + Dict, + FrozenSet, + Generic, + Hashable, + Iterable, + List, + Mapping, + NamedTuple, + Optional, + OrderedDict as GenericOrderedDict, + overload, + Protocol, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) + + +__all__ = [ + "PyTree", + "Context", + "FlattenFunc", + "UnflattenFunc", + "DumpableContext", + "ToDumpableContextFn", + "FromDumpableContextFn", + "TreeSpec", + "LeafSpec", + "keystr", + "key_get", + "register_pytree_node", + "tree_flatten", + "tree_flatten_with_path", + "tree_unflatten", + "tree_leaves", + "tree_leaves_with_path", + "tree_structure", + "tree_map", + "tree_map_with_path", + "tree_map_", + "tree_map_only", + "tree_map_only_", + "tree_all", + "tree_any", + "tree_all_only", + "tree_any_only", + "treespec_dumps", + "treespec_loads", + "treespec_pprint", +] + + +T = TypeVar("T") +S = TypeVar("S") +U = TypeVar("U") +R = TypeVar("R") + + +DEFAULT_TREESPEC_SERIALIZATION_PROTOCOL = 1 +NO_SERIALIZED_TYPE_NAME_FOUND = "NO_SERIALIZED_TYPE_NAME_FOUND" + + +class KeyEntry(Protocol): + def __hash__(self) -> int: + ... + + def __eq__(self, other: object) -> bool: + ... + + def __str__(self) -> str: + ... + + def get(self, parent: Any) -> Any: + ... + + +Context = Any +PyTree = Any +FlattenFunc = Callable[[PyTree], Tuple[List[Any], Context]] +UnflattenFunc = Callable[[Iterable[Any], Context], PyTree] +DumpableContext = Any # Any json dumpable text +ToDumpableContextFn = Callable[[Context], DumpableContext] +FromDumpableContextFn = Callable[[DumpableContext], Context] +ToStrFunc = Callable[["TreeSpec", List[str]], str] +MaybeFromStrFunc = Callable[[str], Optional[Tuple[Any, Context, str]]] +KeyPath = Tuple[KeyEntry, ...] +FlattenWithKeysFunc = Callable[[PyTree], Tuple[List[Tuple[KeyEntry, Any]], Any]] + + +# A NodeDef holds two callables: +# - flatten_fn should take the collection and return a flat list of values. +# It can also return some context that is used in reconstructing the +# collection. +# - unflatten_fn should take a flat list of values and some context +# (returned by flatten_fn). It returns the collection by reconstructing +# it from the list and the context. +# - flatten_with_keys_fn, which is a callable that takes a +# pytree and returns a list of (keypath, value) pairs and a context. +class NodeDef(NamedTuple): + type: Type[Any] + flatten_fn: FlattenFunc + unflatten_fn: UnflattenFunc + flatten_with_keys_fn: Optional[FlattenWithKeysFunc] + + +_NODE_REGISTRY_LOCK = threading.Lock() +SUPPORTED_NODES: Dict[Type[Any], NodeDef] = {} + + +# _SerializeNodeDef holds the following: +# - typ: the type of the node (e.g., "Dict", "List", etc) +# - serialized_type_name: the fully qualified name of the type, e.g. "collections.OrderedDict" +# - to_dumpable_context takes a TreeSpec, and returns a serialized string format of the +# context, and the version number +# - from_dumpable_context takes in a string representation of the context, and the +# version, and returns the deserialized context +class _SerializeNodeDef(NamedTuple): + typ: Type[Any] + serialized_type_name: str + to_dumpable_context: Optional[ToDumpableContextFn] + from_dumpable_context: Optional[FromDumpableContextFn] + + +SUPPORTED_SERIALIZED_TYPES: Dict[Type[Any], _SerializeNodeDef] = {} +SERIALIZED_TYPE_TO_PYTHON_TYPE: Dict[str, Type[Any]] = {} + + +def register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None, +) -> None: + """Register a container-like type as pytree node. + + Args: + cls: the type to register + flatten_fn: A callable that takes a pytree and returns a flattened + representation of the pytree and additional context to represent the + flattened pytree. + unflatten_fn: A callable that takes a flattened version of the pytree, + additional context, and returns an unflattened pytree. + serialized_type_name: A keyword argument used to specify the fully qualified + name used when serializing the tree spec. + to_dumpable_context: An optional keyword argument to custom specify how + to convert the context of the pytree to a custom json dumpable + representation. This is used for json serialization, which is being + used in torch.export right now. + from_dumpable_context: An optional keyword argument to custom specify how + to convert the custom json dumpable representation of the context + back to the original context. This is used for json deserialization, + which is being used in torch.export right now. + flatten_with_keys_fn: An optional keyword argument to specify how to + access each pytree leaf's keypath when flattening and tree-mapping. + Like ``flatten_fn``, but in place of a List[leaf], it should return + a List[(keypath, leaf)]. + """ + with _NODE_REGISTRY_LOCK: + if cls in SUPPORTED_NODES: + raise ValueError(f"{cls} is already registered as pytree node.") + + _private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + flatten_with_keys_fn=flatten_with_keys_fn, + ) + + try: + from . import _cxx_pytree as cxx + except ImportError: + pass + else: + cxx._private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def _register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + to_str_fn: Optional[ToStrFunc] = None, # deprecated + maybe_from_str_fn: Optional[MaybeFromStrFunc] = None, # deprecated + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None, +) -> None: + """Register a container-like type as pytree node for the Python pytree only. + + Args: + cls: the type to register + flatten_fn: A callable that takes a pytree and returns a flattened + representation of the pytree and additional context to represent the + flattened pytree. + unflatten_fn: A callable that takes a flattened version of the pytree, + additional context, and returns an unflattened pytree. + serialized_type_name: A keyword argument used to specify the fully qualified + name used when serializing the tree spec. + to_dumpable_context: An optional keyword argument to custom specify how + to convert the context of the pytree to a custom json dumpable + representation. This is used for json serialization, which is being + used in torch.export right now. + from_dumpable_context: An optional keyword argument to custom specify how + to convert the custom json dumpable representation of the context + back to the original context. This is used for json deserialization, + which is being used in torch.export right now. + flatten_with_keys_fn: An optional keyword argument to specify how to + access each pytree leaf's keypath when flattening and tree-mapping. + Like ``flatten_fn``, but in place of a List[leaf], it should return + a List[(keypath, leaf)]. + """ + warnings.warn( + "torch.utils._pytree._register_pytree_node is deprecated. " + "Please use torch.utils._pytree.register_pytree_node instead.", + stacklevel=2, + ) + + if to_str_fn is not None or maybe_from_str_fn is not None: + warnings.warn( + "to_str_fn and maybe_from_str_fn is deprecated. " + "Please use to_dumpable_context and from_dumpable_context instead." + ) + + _private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + flatten_with_keys_fn=flatten_with_keys_fn, + ) + + +def _private_register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None, +) -> None: + """This is an internal function that is used to register a pytree node type + for the Python pytree only. End-users should use :func:`register_pytree_node` + instead. + """ + with _NODE_REGISTRY_LOCK: + if cls in SUPPORTED_NODES: + # TODO: change this warning to an error after OSS/internal stabilize + warnings.warn( + f"{cls} is already registered as pytree node. " + "Overwriting the previous registration.", + ) + + node_def = NodeDef(cls, flatten_fn, unflatten_fn, flatten_with_keys_fn) + SUPPORTED_NODES[cls] = node_def + + if (to_dumpable_context is None) ^ (from_dumpable_context is None): + raise ValueError( + f"Both to_dumpable_context and from_dumpable_context for {cls} must " + "be None or registered." + ) + + if serialized_type_name is None: + serialized_type_name = NO_SERIALIZED_TYPE_NAME_FOUND + + serialize_node_def = _SerializeNodeDef( + cls, + serialized_type_name, + to_dumpable_context, + from_dumpable_context, + ) + SUPPORTED_SERIALIZED_TYPES[cls] = serialize_node_def + SERIALIZED_TYPE_TO_PYTHON_TYPE[serialized_type_name] = cls + + +@dataclasses.dataclass(frozen=True) +class SequenceKey(Generic[T]): + idx: int + + def __str__(self) -> str: + return f"[{self.idx!r}]" + + def get(self, sequence: Sequence[T]) -> T: + return sequence[self.idx] + + +K = TypeVar("K", bound=Hashable) + + +@dataclasses.dataclass(frozen=True) +class MappingKey(Generic[K, T]): + key: K + + def __str__(self) -> str: + return f"[{self.key!r}]" + + def get(self, mapping: Mapping[K, T]) -> T: + return mapping[self.key] + + +@dataclasses.dataclass(frozen=True) +class GetAttrKey: + name: str + + def __str__(self) -> str: + return f".{self.name}" + + def get(self, obj: Any) -> Any: + return getattr(obj, self.name) + + +def _tuple_flatten(d: Tuple[Any, ...]) -> Tuple[List[Any], Context]: + return list(d), None + + +def _tuple_flatten_with_keys( + d: Tuple[Any, ...] +) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _tuple_flatten(d) + return [(SequenceKey(i), v) for i, v in enumerate(values)], context + + +def _tuple_unflatten(values: Iterable[Any], context: Context) -> Tuple[Any, ...]: + return tuple(values) + + +def _list_flatten(d: List[Any]) -> Tuple[List[Any], Context]: + return d, None + + +def _list_flatten_with_keys(d: List[Any]) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _list_flatten(d) + return [(SequenceKey(i), v) for i, v in enumerate(values)], context + + +def _list_unflatten(values: Iterable[Any], context: Context) -> List[Any]: + return list(values) + + +def _dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]: + return list(d.values()), list(d.keys()) + + +def _dict_flatten_with_keys( + d: Dict[Any, Any] +) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _dict_flatten(d) + return [(MappingKey(k), v) for k, v in zip(context, values)], context + + +def _dict_unflatten(values: Iterable[Any], context: Context) -> Dict[Any, Any]: + return dict(zip(context, values)) + + +def _namedtuple_flatten(d: NamedTuple) -> Tuple[List[Any], Context]: + return list(d), type(d) + + +def _namedtuple_flatten_with_keys( + d: NamedTuple, +) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _namedtuple_flatten(d) + return ( + [(GetAttrKey(field), v) for field, v in zip(context._fields, values)], + context, + ) + + +def _namedtuple_unflatten(values: Iterable[Any], context: Context) -> NamedTuple: + return cast(NamedTuple, context(*values)) + + +def _namedtuple_serialize(context: Context) -> DumpableContext: + json_namedtuple = { + "class_name": context.__name__, + "fields": context._fields, + } + return json_namedtuple + + +def _namedtuple_deserialize(dumpable_context: DumpableContext) -> Context: + class_name = dumpable_context["class_name"] + assert isinstance(class_name, str) + context = namedtuple(class_name, dumpable_context["fields"]) # type: ignore[misc] + return context + + +def _ordereddict_flatten(d: GenericOrderedDict[Any, Any]) -> Tuple[List[Any], Context]: + return list(d.values()), list(d.keys()) + + +def _ordereddict_flatten_with_keys( + d: GenericOrderedDict[Any, Any] +) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _ordereddict_flatten(d) + return [(MappingKey(k), v) for k, v in zip(context, values)], context + + +def _ordereddict_unflatten( + values: Iterable[Any], + context: Context, +) -> GenericOrderedDict[Any, Any]: + return OrderedDict((key, value) for key, value in zip(context, values)) + + +_odict_flatten = _ordereddict_flatten +_odict_unflatten = _ordereddict_unflatten + + +def _defaultdict_flatten(d: DefaultDict[Any, Any]) -> Tuple[List[Any], Context]: + values, dict_context = _dict_flatten(d) + return values, [d.default_factory, dict_context] + + +def _defaultdict_flatten_with_keys( + d: DefaultDict[Any, Any] +) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _defaultdict_flatten(d) + _, dict_context = context + return [(MappingKey(k), v) for k, v in zip(dict_context, values)], context + + +def _defaultdict_unflatten( + values: Iterable[Any], + context: Context, +) -> DefaultDict[Any, Any]: + default_factory, dict_context = context + return defaultdict(default_factory, _dict_unflatten(values, dict_context)) + + +def _defaultdict_serialize(context: Context) -> DumpableContext: + default_factory, dict_context = context + json_defaultdict = { + "default_factory_module": default_factory.__module__, + "default_factory_name": default_factory.__qualname__, + "dict_context": dict_context, + } + return json_defaultdict + + +def _defaultdict_deserialize(dumpable_context: DumpableContext) -> Context: + assert isinstance(dumpable_context, dict) + assert set(dumpable_context) == { + "default_factory_module", + "default_factory_name", + "dict_context", + } + + default_factory_module = dumpable_context["default_factory_module"] + default_factory_name = dumpable_context["default_factory_name"] + assert isinstance(default_factory_module, str) + assert isinstance(default_factory_name, str) + module = importlib.import_module(default_factory_module) + default_factory = getattr(module, default_factory_name) + + dict_context = dumpable_context["dict_context"] + return [default_factory, dict_context] + + +def _deque_flatten(d: Deque[Any]) -> Tuple[List[Any], Context]: + return list(d), d.maxlen + + +def _deque_flatten_with_keys( + d: Deque[Any], +) -> Tuple[List[Tuple[KeyEntry, Any]], Context]: + values, context = _deque_flatten(d) + return [(SequenceKey(i), v) for i, v in enumerate(values)], context + + +def _deque_unflatten(values: Iterable[Any], context: Context) -> Deque[Any]: + return deque(values, maxlen=context) + + +_private_register_pytree_node( + tuple, + _tuple_flatten, + _tuple_unflatten, + serialized_type_name="builtins.tuple", + flatten_with_keys_fn=_tuple_flatten_with_keys, +) +_private_register_pytree_node( + list, + _list_flatten, + _list_unflatten, + serialized_type_name="builtins.list", + flatten_with_keys_fn=_list_flatten_with_keys, +) +_private_register_pytree_node( + dict, + _dict_flatten, + _dict_unflatten, + serialized_type_name="builtins.dict", + flatten_with_keys_fn=_dict_flatten_with_keys, +) +_private_register_pytree_node( + namedtuple, # type: ignore[arg-type] + _namedtuple_flatten, + _namedtuple_unflatten, + serialized_type_name="collections.namedtuple", + to_dumpable_context=_namedtuple_serialize, + from_dumpable_context=_namedtuple_deserialize, + flatten_with_keys_fn=_namedtuple_flatten_with_keys, +) +_private_register_pytree_node( + OrderedDict, + _ordereddict_flatten, + _ordereddict_unflatten, + serialized_type_name="collections.OrderedDict", + flatten_with_keys_fn=_ordereddict_flatten_with_keys, +) +_private_register_pytree_node( + defaultdict, + _defaultdict_flatten, + _defaultdict_unflatten, + serialized_type_name="collections.defaultdict", + to_dumpable_context=_defaultdict_serialize, + from_dumpable_context=_defaultdict_deserialize, + flatten_with_keys_fn=_defaultdict_flatten_with_keys, +) +_private_register_pytree_node( + deque, + _deque_flatten, + _deque_unflatten, + serialized_type_name="collections.deque", + flatten_with_keys_fn=_deque_flatten_with_keys, +) + + +STANDARD_DICT_TYPES: FrozenSet[type] = frozenset( + {dict, OrderedDict, defaultdict}, +) +BUILTIN_TYPES: FrozenSet[type] = frozenset( + {tuple, list, dict, namedtuple, OrderedDict, defaultdict, deque}, # type: ignore[arg-type] +) + + +# h/t https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple +def _is_namedtuple_instance(tree: Any) -> bool: + typ = type(tree) + bases = typ.__bases__ + if len(bases) != 1 or bases[0] != tuple: + return False + fields = getattr(typ, "_fields", None) + if not isinstance(fields, tuple): + return False + return all(type(entry) == str for entry in fields) + + +def _get_node_type(tree: Any) -> Any: + if _is_namedtuple_instance(tree): + return namedtuple + return type(tree) + + +# A leaf is defined as anything that is not a Node. +def _is_leaf(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None) -> bool: + return (is_leaf is not None and is_leaf(tree)) or _get_node_type( + tree + ) not in SUPPORTED_NODES + + +# A TreeSpec represents the structure of a pytree. It holds: +# "type": the type of root Node of the pytree +# context: some context that is useful in unflattening the pytree +# children_specs: specs for each child of the root Node +# num_leaves: the number of leaves +@dataclasses.dataclass +class TreeSpec: + type: Any + context: Context + children_specs: List["TreeSpec"] + + num_nodes: int = dataclasses.field(init=False) + num_leaves: int = dataclasses.field(init=False) + num_children: int = dataclasses.field(init=False) + + def __post_init__(self) -> None: + self.num_nodes = 1 + sum(spec.num_nodes for spec in self.children_specs) + self.num_leaves = sum(spec.num_leaves for spec in self.children_specs) + self.num_children = len(self.children_specs) + + def __repr__(self, indent: int = 0) -> str: + repr_prefix: str = f"TreeSpec({self.type.__name__}, {self.context}, [" + children_specs_str: str = "" + if self.num_children > 0: + indent += 2 + children_specs_str += self.children_specs[0].__repr__(indent) + children_specs_str += "," if self.num_children > 1 else "" + children_specs_str += ",".join( + [ + "\n" + " " * indent + child.__repr__(indent) + for child in self.children_specs[1:] + ] + ) + repr_suffix: str = f"{children_specs_str}])" + return repr_prefix + repr_suffix + + def is_leaf(self) -> bool: + return self.num_nodes == 1 and self.num_leaves == 1 + + def _flatten_up_to_helper(self, tree: PyTree, subtrees: List[PyTree]) -> None: + if self.is_leaf(): + subtrees.append(tree) + return + + node_type = _get_node_type(tree) + if self.type not in BUILTIN_TYPES: + # Always require custom node types to match exactly + if node_type != self.type: + raise ValueError( + f"Type mismatch; " + f"expected {self.type!r}, but got {node_type!r}.", + ) + flatten_fn = SUPPORTED_NODES[node_type].flatten_fn + child_pytrees, context = flatten_fn(tree) + if len(child_pytrees) != self.num_children: + raise ValueError( + f"Node arity mismatch; " + f"expected {self.num_children}, but got {len(child_pytrees)}.", + ) + if context != self.context: + raise ValueError( + f"Node context mismatch for custom node type {self.type!r}.", + ) + else: + # For builtin dictionary types, we allow some flexibility + # Otherwise, we require exact matches + both_standard_dict = ( + self.type in STANDARD_DICT_TYPES and node_type in STANDARD_DICT_TYPES + ) + if node_type != self.type and not both_standard_dict: + raise ValueError( + f"Node type mismatch; " + f"expected {self.type!r}, but got {node_type!r}.", + ) + if len(tree) != self.num_children: + raise ValueError( + f"Node arity mismatch; " + f"expected {self.num_children}, but got {len(tree)}.", + ) + + if both_standard_dict: # dictionary types are compatible with each other + dict_context = ( + self.context + if self.type is not defaultdict + # ignore mismatch of `default_factory` for defaultdict + else self.context[1] + ) + expected_keys = dict_context + got_key_set = set(tree) + expected_key_set = set(expected_keys) + if got_key_set != expected_key_set: + missing_keys = expected_key_set.difference(got_key_set) + extra_keys = got_key_set.difference(expected_key_set) + message = "" + if missing_keys: + message += f"; missing key(s): {missing_keys}" + if extra_keys: + message += f"; extra key(s): {extra_keys}" + raise ValueError(f"Node keys mismatch{message}.") + child_pytrees = [tree[key] for key in expected_keys] + else: + flatten_fn = SUPPORTED_NODES[node_type].flatten_fn + child_pytrees, context = flatten_fn(tree) + if ( + context != self.context + and self.type is not deque # ignore mismatch of `maxlen` for deque + ): + raise ValueError( + f"Node context mismatch for node type {self.type!r}; " + f"expected {self.context!r}, but got {context!r}.", # namedtuple type mismatch + ) + + for child_pytree, child_spec in zip(child_pytrees, self.children_specs): + child_spec._flatten_up_to_helper(child_pytree, subtrees) + + def flatten_up_to(self, tree: PyTree) -> List[PyTree]: + subtrees: List[PyTree] = [] + self._flatten_up_to_helper(tree, subtrees) + return subtrees + + def unflatten(self, leaves: Iterable[Any]) -> PyTree: + if not isinstance(leaves, (list, tuple)): + leaves = list(leaves) + if len(leaves) != self.num_leaves: + raise ValueError( + f"treespec.unflatten(leaves): `leaves` has length {len(leaves)} " + f"but the spec refers to a pytree that holds {self.num_leaves} " + f"items ({self}).", + ) + if self.is_leaf(): + return leaves[0] + + unflatten_fn = SUPPORTED_NODES[self.type].unflatten_fn + + # Recursively unflatten the children + start = 0 + end = 0 + child_pytrees = [] + for child_spec in self.children_specs: + end += child_spec.num_leaves + child_pytrees.append(child_spec.unflatten(leaves[start:end])) + start = end + + return unflatten_fn(child_pytrees, self.context) + + +class LeafSpec(TreeSpec): + def __init__(self) -> None: + super().__init__(None, None, []) + + def __post_init__(self) -> None: + self.num_nodes = 1 + self.num_leaves = 1 + self.num_children = 0 + + def __repr__(self, indent: int = 0) -> str: + return "*" + + +# All leaves are equivalent, so represent with a single object to save on +# object construction time +_LEAF_SPEC = LeafSpec() + + +def _tree_flatten_helper( + tree: PyTree, + leaves: List[Any], + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> TreeSpec: + if _is_leaf(tree, is_leaf=is_leaf): + leaves.append(tree) + return _LEAF_SPEC + + node_type = _get_node_type(tree) + flatten_fn = SUPPORTED_NODES[node_type].flatten_fn + child_pytrees, context = flatten_fn(tree) + + # Recursively flatten the children + children_specs = [ + _tree_flatten_helper(child, leaves, is_leaf=is_leaf) for child in child_pytrees + ] + + return TreeSpec(node_type, context, children_specs) + + +def tree_flatten( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Tuple[List[Any], TreeSpec]: + """Flattens a pytree into a list of values and a TreeSpec that can be used + to reconstruct the pytree. + """ + leaves: List[Any] = [] + spec = _tree_flatten_helper(tree, leaves, is_leaf=is_leaf) + return leaves, spec + + +def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree: + """Given a list of values and a TreeSpec, builds a pytree. + This is the inverse operation of `tree_flatten`. + """ + if not isinstance(treespec, TreeSpec): + raise TypeError( + f"tree_unflatten(leaves, treespec): Expected `treespec` to be " + f"instance of TreeSpec but got item of type {type(treespec)}.", + ) + return treespec.unflatten(leaves) + + +def _tree_leaves_helper( + tree: PyTree, + leaves: List[Any], + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> None: + if _is_leaf(tree, is_leaf=is_leaf): + leaves.append(tree) + return + + node_type = _get_node_type(tree) + flatten_fn = SUPPORTED_NODES[node_type].flatten_fn + child_pytrees, _ = flatten_fn(tree) + + # Recursively flatten the children + for child in child_pytrees: + _tree_leaves_helper(child, leaves, is_leaf=is_leaf) + + +def tree_leaves( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Any]: + """Get a list of leaves of a pytree.""" + leaves: List[Any] = [] + _tree_leaves_helper(tree, leaves, is_leaf=is_leaf) + return leaves + + +def tree_structure( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> TreeSpec: + """Get the TreeSpec for a pytree.""" + return tree_flatten(tree, is_leaf=is_leaf)[1] + + +def tree_map( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Map a multi-input function over pytree args to produce a new pytree. + + See also :func:`tree_map_`. + + >>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)}) + {'x': 8, 'y': (43, 65)} + >>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None}) + {'x': False, 'y': (False, False), 'z': True} + + If multiple inputs are given, the structure of the tree is taken from the first input; + subsequent inputs need only have ``tree`` as a prefix: + + >>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]]) + [[5, 7, 9], [6, 1, 2]] + + Args: + func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. + tree (pytree): A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A new pytree with the same structure as ``tree`` but with the value at each leaf given by + ``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs`` + is the tuple of values at corresponding nodes in ``rests``. + """ + leaves, treespec = tree_flatten(tree, is_leaf=is_leaf) + flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests] + return treespec.unflatten(map(func, *flat_args)) + + +def tree_map_( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree. + + See also :func:`tree_map`. + + Args: + func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. + tree (pytree): A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + The original ``tree`` with the value at each leaf is given by the side-effect of function + ``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf + in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``. + """ + leaves, treespec = tree_flatten(tree, is_leaf=is_leaf) + flat_args = [leaves] + [treespec.flatten_up_to(r) for r in rests] + tuple(map(func, *flat_args)) # consume and exhaust the iterable + return tree + + +Type2 = Tuple[Type[T], Type[S]] +Type3 = Tuple[Type[T], Type[S], Type[U]] +if sys.version_info >= (3, 10): + TypeAny = Union[Type[Any], Tuple[Type[Any], ...], types.UnionType] +else: + TypeAny = Union[Type[Any], Tuple[Type[Any], ...]] + +Fn2 = Callable[[Union[T, S]], R] +Fn3 = Callable[[Union[T, S, U]], R] +Fn = Callable[[T], R] +FnAny = Callable[[Any], R] + +MapOnlyFn = Callable[[T], Callable[[Any], Any]] + + +# These specializations help with type inference on the lambda passed to this +# function +@overload +def map_only(__type_or_types_or_pred: Type2[T, S]) -> MapOnlyFn[Fn2[T, S, Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Type3[T, S, U]) -> MapOnlyFn[Fn3[T, S, U, Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Type[T]) -> MapOnlyFn[Fn[T, Any]]: + ... + + +# This specialization is needed for the implementations below that call +@overload +def map_only(__type_or_types_or_pred: TypeAny) -> MapOnlyFn[FnAny[Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Callable[[Any], bool]) -> MapOnlyFn[FnAny[Any]]: + ... + + +def map_only( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]] +) -> MapOnlyFn[FnAny[Any]]: + """ + Suppose you are writing a tree_map over tensors, leaving everything + else unchanged. Ordinarily you would have to write: + + def go(t): + if isinstance(t, Tensor): + return ... + else: + return t + + With this function, you only need to write: + + @map_only(Tensor) + def go(t): + return ... + + You can also directly use 'tree_map_only' + """ + if isinstance(__type_or_types_or_pred, (type, tuple)) or ( + sys.version_info >= (3, 10) + and isinstance(__type_or_types_or_pred, types.UnionType) + ): + + def pred(x: Any) -> bool: + return isinstance(x, __type_or_types_or_pred) # type: ignore[arg-type] + + elif callable(__type_or_types_or_pred): + pred = __type_or_types_or_pred # type: ignore[assignment] + else: + raise TypeError("Argument must be a type, a tuple of types, or a callable.") + + def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]: + # @functools.wraps(func) # torch dynamo doesn't support this yet + def wrapped(x: T) -> Any: + if pred(x): + return func(x) + return x + + return wrapped + + return wrapper + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type[T], + func: Fn[T, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type2[T, S], + func: Fn2[T, S, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type3[T, S, U], + func: Fn3[T, S, U, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Callable[[Any], bool], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +def tree_map_only( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + return tree_map(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf) + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type[T], + func: Fn[T, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type2[T, S], + func: Fn2[T, S, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type3[T, S, U], + func: Fn3[T, S, U, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Callable[[Any], bool], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +def tree_map_only_( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + return tree_map_(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf) + + +def tree_all( + pred: Callable[[Any], bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return all(map(pred, flat_args)) + + +def tree_any( + pred: Callable[[Any], bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return any(map(pred, flat_args)) + + +@overload +def tree_all_only( + __type_or_types: Type[T], + pred: Fn[T, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_all_only( + __type_or_types: Type2[T, S], + pred: Fn2[T, S, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_all_only( + __type_or_types: Type3[T, S, U], + pred: Fn3[T, S, U, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +def tree_all_only( + __type_or_types: TypeAny, + pred: FnAny[bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return all(pred(x) for x in flat_args if isinstance(x, __type_or_types)) + + +@overload +def tree_any_only( + __type_or_types: Type[T], + pred: Fn[T, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_any_only( + __type_or_types: Type2[T, S], + pred: Fn2[T, S, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_any_only( + __type_or_types: Type3[T, S, U], + pred: Fn3[T, S, U, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +def tree_any_only( + __type_or_types: TypeAny, + pred: FnAny[bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return any(pred(x) for x in flat_args if isinstance(x, __type_or_types)) + + +# Broadcasts a pytree to the provided TreeSpec and returns the flattened +# values. If this is not possible, then this function returns None. +# +# For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]), +# would return [0, 0]. This is useful for part of the vmap implementation: +# a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be +# broadcastable to the tree structure of `inputs` and we use +# _broadcast_to_and_flatten to check this. +def _broadcast_to_and_flatten( + tree: PyTree, + treespec: TreeSpec, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Optional[List[Any]]: + assert isinstance(treespec, TreeSpec) + + if _is_leaf(tree, is_leaf=is_leaf): + return [tree] * treespec.num_leaves + if treespec.is_leaf(): + return None + node_type = _get_node_type(tree) + if node_type != treespec.type: + return None + + flatten_fn = SUPPORTED_NODES[node_type].flatten_fn + child_pytrees, ctx = flatten_fn(tree) + + # Check if the Node is different from the spec + if len(child_pytrees) != treespec.num_children or ctx != treespec.context: + return None + + # Recursively flatten the children + result: List[Any] = [] + for child, child_spec in zip(child_pytrees, treespec.children_specs): + flat = _broadcast_to_and_flatten(child, child_spec, is_leaf=is_leaf) + if flat is not None: + result += flat + else: + return None + + return result + + +@dataclasses.dataclass +class _TreeSpecSchema: + """ + _TreeSpecSchema is the schema used to serialize the TreeSpec + It contains the following fields: + - type: A string name of the type. null for the case of a LeafSpec. + - context: Any format which is json dumpable + - children_spec: A list of children serialized specs. + """ + + type: Optional[str] + context: DumpableContext + children_spec: List["_TreeSpecSchema"] + + +class _ProtocolFn(NamedTuple): + treespec_to_json: Callable[[TreeSpec], DumpableContext] + json_to_treespec: Callable[[DumpableContext], TreeSpec] + + +_SUPPORTED_PROTOCOLS: Dict[int, _ProtocolFn] = {} + + +def _treespec_to_json(treespec: TreeSpec) -> _TreeSpecSchema: + if treespec.is_leaf(): + return _TreeSpecSchema(None, None, []) + + if treespec.type not in SUPPORTED_SERIALIZED_TYPES: + raise NotImplementedError( + f"Serializing {treespec.type} in pytree is not registered.", + ) + + serialize_node_def = SUPPORTED_SERIALIZED_TYPES[treespec.type] + + serialized_type_name = serialize_node_def.serialized_type_name + + if serialized_type_name == NO_SERIALIZED_TYPE_NAME_FOUND: + raise NotImplementedError( + f"No registered serialization name for {treespec.type} found. " + "Please update your _register_pytree_node call with a `serialized_type_name` kwarg." + ) + + if serialize_node_def.to_dumpable_context is None: + try: + serialized_context = json.dumps(treespec.context) + except TypeError as e: + raise TypeError( + "Unable to serialize context. " + "Please make the context json dump-able, or register a " + "custom serializer using _register_pytree_node." + ) from e + else: + serialized_context = serialize_node_def.to_dumpable_context(treespec.context) + + child_schemas = [_treespec_to_json(child) for child in treespec.children_specs] + + return _TreeSpecSchema(serialized_type_name, serialized_context, child_schemas) + + +def _json_to_treespec(json_schema: DumpableContext) -> TreeSpec: + if ( + json_schema["type"] is None + and json_schema["context"] is None + and len(json_schema["children_spec"]) == 0 + ): + return _LEAF_SPEC + + if json_schema["type"] not in SERIALIZED_TYPE_TO_PYTHON_TYPE: + raise NotImplementedError( + f'Deserializing {json_schema["type"]} in pytree is not registered.', + ) + + typ = SERIALIZED_TYPE_TO_PYTHON_TYPE[json_schema["type"]] + serialize_node_def = SUPPORTED_SERIALIZED_TYPES[typ] + + if serialize_node_def.from_dumpable_context is None: + try: + context = json.loads(json_schema["context"]) + except TypeError as ex: + raise TypeError( + "Unable to deserialize context. " + "Please make the context json load-able, or register a " + "custom serializer using _register_pytree_node.", + ) from ex + else: + context = serialize_node_def.from_dumpable_context(json_schema["context"]) + + children_specs = [] + for child_string in json_schema["children_spec"]: + children_specs.append(_json_to_treespec(child_string)) + + return TreeSpec(typ, context, children_specs) + + +_SUPPORTED_PROTOCOLS[1] = _ProtocolFn(_treespec_to_json, _json_to_treespec) + + +def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str: + if not isinstance(treespec, TreeSpec): + raise TypeError( + f"treespec_dumps(treespec, protocol): Expected `treespec` to be instance of " + f"TreeSpec but got item of type {type(treespec)}.", + ) + + if protocol is None: + protocol = DEFAULT_TREESPEC_SERIALIZATION_PROTOCOL + + if protocol in _SUPPORTED_PROTOCOLS: + json_spec = _SUPPORTED_PROTOCOLS[protocol].treespec_to_json(treespec) + else: + raise ValueError( + f"Unknown protocol {protocol}. " + f"Available protocols: {list(_SUPPORTED_PROTOCOLS.keys())}", + ) + + str_spec = json.dumps((protocol, dataclasses.asdict(json_spec))) + return str_spec + + +def treespec_loads(serialized: str) -> TreeSpec: + protocol, json_schema = json.loads(serialized) + + if protocol in _SUPPORTED_PROTOCOLS: + return _SUPPORTED_PROTOCOLS[protocol].json_to_treespec(json_schema) + raise ValueError( + f"Unknown protocol {protocol}. " + f"Available protocols: {list(_SUPPORTED_PROTOCOLS.keys())}", + ) + + +class _DummyLeaf: + def __repr__(self) -> str: + return "*" + + +def treespec_pprint(treespec: TreeSpec) -> str: + dummy_tree = tree_unflatten( + [_DummyLeaf() for _ in range(treespec.num_leaves)], + treespec, + ) + return repr(dummy_tree) + + +# TODO(angelayi): remove this function after OSS/internal stabilize +def pytree_to_str(treespec: TreeSpec) -> str: + warnings.warn("pytree_to_str is deprecated. Please use treespec_dumps") + return treespec_dumps(treespec) + + +# TODO(angelayi): remove this function after OSS/internal stabilize +def str_to_pytree(json: str) -> TreeSpec: + warnings.warn("str_to_pytree is deprecated. Please use treespec_loads") + return treespec_loads(json) + + +def arg_tree_leaves(*args: PyTree, **kwargs: PyTree) -> List[Any]: + """Get a flat list of arguments to this function + + A slightly faster version of tree_leaves((args, kwargs)) + """ + leaves: List[Any] = [] + for a in args: + _tree_leaves_helper(a, leaves) + for a in kwargs.values(): + _tree_leaves_helper(a, leaves) + return leaves + + +def tree_flatten_with_path( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Tuple[List[Tuple[KeyPath, Any]], TreeSpec]: + """Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path. + + Args: + tree: a pytree to flatten. If it contains a custom type, that type must be + registered with an appropriate `tree_flatten_with_path_fn` when registered + with :func:`register_pytree_node`. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + Returns: + A tuple where the first element is a list of (key path, leaf) pairs, and the + second element is a :class:`TreeSpec` representing the structure of the flattened + tree. + """ + _, treespec = tree_flatten(tree, is_leaf) + return list(_generate_key_paths((), tree, is_leaf)), treespec + + +def tree_leaves_with_path( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Tuple[KeyPath, Any]]: + """Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path. + + Args: + tree: a pytree. If it contains a custom type, that type must be + registered with an appropriate `tree_flatten_with_path_fn` when registered + with :func:`register_pytree_node`. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + Returns: + A list of (key path, leaf) pairs. + """ + return list(_generate_key_paths((), tree, is_leaf)) + + +def _generate_key_paths( + key_path: KeyPath, + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Iterable[Tuple[KeyPath, Any]]: + if is_leaf and is_leaf(tree): + yield key_path, tree + return + + node_type = _get_node_type(tree) + handler = SUPPORTED_NODES.get(node_type) + if not handler: + # This is a leaf + yield key_path, tree + return + + flatten_with_keys = handler.flatten_with_keys_fn + if flatten_with_keys: + key_children, _ = flatten_with_keys(tree) + for k, c in key_children: + yield from _generate_key_paths((*key_path, k), c, is_leaf) + else: + # We registered this pytree but didn't add a flatten_with_keys_fn, complain. + raise ValueError( + f"Did not find a flatten_with_keys_fn for type: {node_type}. " + "Please pass a flatten_with_keys_fn argument to register_pytree_node." + ) + + +def tree_map_with_path( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Like :func:`tree_map`, but the provided callable takes an additional key path argument. + + Args: + func: A function that takes ``2 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. The first positional argument + to ``func`` is the key path of the leaf in question. The second + positional argument is the value of the leaf. + tree: A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests: A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns + A new pytree with the same structure as ``tree`` but with the value at each leaf given by + ``func(keypath, x, *xs)`` where ``keypath`` is the key path at the + corresponding leaf in ``tree``, ``x`` is the value at that leaf, and + ``xs`` is the tuple of values at corresponding nodes in ``rests``. + """ + keypath_leaves, treespec = tree_flatten_with_path(tree, is_leaf) + keypath_leaves = list(zip(*keypath_leaves)) + all_keypath_leaves = keypath_leaves + [treespec.flatten_up_to(r) for r in rests] + return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves)) + + +def keystr(kp: KeyPath) -> str: + """Given a key path, return a pretty-printed representation.""" + return "".join([str(k) for k in kp]) + + +def key_get(obj: Any, kp: KeyPath) -> Any: + """Given an object and a key path, return the value at the key path.""" + for k in kp: + obj = k.get(obj) + return obj diff --git a/venv/lib/python3.10/site-packages/torch/utils/_stats.py b/venv/lib/python3.10/site-packages/torch/utils/_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..5b33f7b8cb025cfc2d0f249fc45a6f25bb1eea26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_stats.py @@ -0,0 +1,21 @@ +# NOTE! PLEASE KEEP THIS FILE *FREE* OF TORCH DEPS! IT SHOULD BE IMPORTABLE ANYWHERE. +# IF YOU FEEL AN OVERWHELMING URGE TO ADD A TORCH DEP, MAKE A TRAMPOLINE FILE A LA torch._dynamo.utils +# AND SCRUB AWAY TORCH NOTIONS THERE. +import collections +import functools +from typing import OrderedDict + +simple_call_counter: OrderedDict[str, int] = collections.OrderedDict() + +def count_label(label): + prev = simple_call_counter.setdefault(label, 0) + simple_call_counter[label] = prev + 1 + +def count(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if fn.__qualname__ not in simple_call_counter: + simple_call_counter[fn.__qualname__] = 0 + simple_call_counter[fn.__qualname__] = simple_call_counter[fn.__qualname__] + 1 + return fn(*args, **kwargs) + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/utils/_traceback.py b/venv/lib/python3.10/site-packages/torch/utils/_traceback.py new file mode 100644 index 0000000000000000000000000000000000000000..fa73b9f41cd66fb21bdb30bdcc100ca9cd648816 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_traceback.py @@ -0,0 +1,254 @@ +from types import TracebackType +from typing import List, Optional +import tempfile +import traceback +import contextlib +import inspect +import os.path + +# This file contains utilities for ensuring dynamically compile()'d +# code fragments display their line numbers in backtraces. +# +# The constraints: +# +# - We don't have control over the user exception printer (in particular, +# we cannot assume the linecache trick will work, c.f. +# https://stackoverflow.com/q/50515651/23845 ) +# +# - We don't want to create temporary files every time we compile() +# some code; file creation should happen lazily only at exception +# time. Arguably, you *should* be willing to write out your +# generated Python code to file system, but in some situations +# (esp. library code) it would violate user expectation to write +# to the file system, so we try to avoid it. In particular, we'd +# like to keep the files around, so users can open up the files +# mentioned in the trace; if the file is invisible, we want to +# avoid clogging up the filesystem. +# +# If this is not a constraint for you, there is a substantially simpler +# way to implement the functionality in this PR: instead of using +# eval/exec directly, just always write a Python file to filesystem +# and compile that. +# +# - You have control over a context where the compiled code will get +# executed, so that we can interpose while the stack is unwinding +# (otherwise, we have no way to interpose on the exception printing +# process.) +# +# There are two things you have to do to make use of the utilities here: +# +# - When you compile your source code, you must save its string source +# in its f_globals under the magic name "__compile_source__" +# +# - Before running the compiled code, enter the +# report_compile_source_on_error() context manager. + +@contextlib.contextmanager +def report_compile_source_on_error(): + try: + yield + except Exception as exc: + tb = exc.__traceback__ + + # Walk the traceback, looking for frames that have + # source attached + stack = [] + while tb is not None: + filename = tb.tb_frame.f_code.co_filename + source = tb.tb_frame.f_globals.get("__compile_source__") + + if filename == "" and source is not None: + # What black magic are we doing here? Intuitively, what + # we would like to do is overwrite the co_filename on any + # frames that were generated from exec/eval so that they + # point to a temporary file that has the actual line + # information, so Python's default error printer can print + # useful line information on it. + # + # Writing out the temporary file is easy. But overwriting + # co_filename is not! You can't modify the code object + # associated with a frame. You can, however, reconstruct + # a traceback with entirely new frames from scratch, so that's + # what we do. But there's another problem, which is how to + # make the frame? + # + # The black magic is we make a frankenstein frame and code + # object which resembles the original frame/code enough so + # that it will print properly under traceback and the default + # error printer, but IT IS NOT THE ORIGINAL FRAME (you + # couldn't, e.g., execute its code with different variables + # and expect it to work.) + + # Don't delete the temporary file so the user can inspect it + # TODO: This creates a temporary file for every frame, but we + # technically only need one per distinct __compile_source__ + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".py") as f: + f.write(source) + # Create a frame. Python doesn't let you construct + # FrameType directly, so just make one with compile + frame = tb.tb_frame + code = compile('__inspect_currentframe()', f.name, 'eval') + code = code.replace(co_name=frame.f_code.co_name) + # Python 3.11 only + if hasattr(frame.f_code, 'co_linetable'): + # We can't copy ALL of the metadata over, because you + # can cause Python to segfault this way. What exactly + # do we need? We need enough information for + # traceback to be able to print the exception + # correctly. Code reading Lib/traceback.py reveals + # that traceback calls code.co_positions() in order to + # get the augmented line/col numbers. Objects/codeobject.c, + # specifically _PyCode_InitAddressRange, reveals that + # this iterator is initialized from co_linetable and + # co_firstfileno. So copy these we must! + code = code.replace( # type: ignore[call-arg] + co_linetable=frame.f_code.co_linetable, # type: ignore[attr-defined] + co_firstlineno=frame.f_code.co_firstlineno, # type: ignore[attr-defined] + ) + fake_frame = eval( + code, + frame.f_globals, + { + **frame.f_locals, + '__inspect_currentframe': inspect.currentframe + } + ) + fake_tb = TracebackType( + None, fake_frame, tb.tb_lasti, tb.tb_lineno + ) + stack.append(fake_tb) + else: + stack.append(tb) + + tb = tb.tb_next + + # Reconstruct the linked list + tb_next = None + for tb in reversed(stack): + tb.tb_next = tb_next + tb_next = tb + + raise exc.with_traceback(tb_next) # noqa: TRY200 + +def shorten_filename(fn, *, base=None): + """Shorten a source filepath, with the assumption that torch/ subdirectories don't need to be shown to user.""" + if base is None: + base = os.path.dirname(os.path.dirname(__file__)) + # Truncate torch/foo.py to foo.py + try: + prefix = os.path.commonpath([fn, base]) + except ValueError: + return fn + else: + return fn[len(prefix) + 1:] + +def format_frame(frame, *, base=None, line=False): + """ + Format a FrameSummary in a short way, without printing full absolute path or code. + + The idea is the result fits on a single line. + """ + extra_line = "" + if line: + extra_line = f"{frame.line} # " + return f"{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}" + +def format_traceback_short(tb): + """Format a TracebackType in a short way, printing only the inner-most frame.""" + return format_frame(traceback.extract_tb(tb)[-1]) + +class CapturedTraceback: + __slots__ = ['tb', 'skip'] + + def __init__(self, tb, skip=0): + self.tb = tb + self.skip = skip + + def cleanup(self): + self.tb = None + + def summary(self): + import torch._C._profiler + + if self.tb is None: + # TODO: Maybe indicate that the traceback was elided? + return traceback.StackSummary() + + return _extract_symbolized_tb( + torch._C._profiler.symbolize_tracebacks([self.tb])[0], + self.skip + ) + + def __getstate__(self): + return (None, { + 'tb': None, # TB is not pickleable + 'skip': self.skip, + }) + + @staticmethod + def extract(*, script=False, cpp=False, skip=0): + """ + Like traceback.extract_stack(), but faster (approximately 20x faster); it + is fast enough that you can unconditionally log stacks this way as part of + normal execution. It returns a torch._C._profiler.CapturedTraceback + object that must be formatted specially with format_captured_tb. + + By default, this only reports Python backtraces (like extract_stack). You + can set the script/cpp kwargs to also turn on TorchScript/C++ trace + reporting. + """ + import torch._C._profiler + + if script or cpp: + assert skip == 0, "skip with script/cpp NYI" + + return CapturedTraceback( + torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp), + # Elide extract() frame if we don't have script/cpp frames. If + # we do have those frames, it doesn't work so force zero. + 0 if script or cpp else skip + 1 + ) + + def format(self): + """ + Formats a single torch._C._profiler.CapturedTraceback into a list of + strings equivalent to the output of traceback.format_list. Note that if + pass it CapturedTraceback with C++ traces, it is better not to use this + function and use the batch formatting API format_captured_tbs to amortize + the cost of symbolization + """ + return traceback.format_list(self.summary()) + + @staticmethod + def format_all(tbs): + """ + Bulk version of CapturedTraceback.format. Returns a list of list of strings. + """ + import torch._C._profiler + + # Directly populate tracebacks that already have cached summaries + rs: List[Optional[List[str]]] = [] + delayed_idxs = [] + for i, tb in enumerate(tbs): + if tb.tb is None: + rs.append([]) + else: + rs.append(None) + delayed_idxs.append(i) + + stbs = torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs]) + for i, stb in zip(delayed_idxs, stbs): + rs[i] = traceback.format_list(tbs[i].summary()) + + return rs + + +def _extract_symbolized_tb(tb, skip): + """ + Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of + pre-processed stack trace entries. + """ + stack = traceback.StackSummary() + for f in reversed(tb[skip:]): + stack.append(traceback.FrameSummary(f['filename'], f['line'], f['name'])) + return stack diff --git a/venv/lib/python3.10/site-packages/torch/utils/_triton.py b/venv/lib/python3.10/site-packages/torch/utils/_triton.py new file mode 100644 index 0000000000000000000000000000000000000000..865b34c28b3377266567b25bc44ae53f9927b7d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_triton.py @@ -0,0 +1,103 @@ +import functools +import hashlib +import os + +from torch._dynamo.device_interface import get_interface_for_device + + +@functools.lru_cache(None) +def has_triton_package() -> bool: + try: + import triton + + return triton is not None + except ImportError: + return False + + +@functools.lru_cache(None) +def has_triton() -> bool: + def cuda_extra_check(device_interface): + return device_interface.Worker.get_device_properties().major >= 7 + + triton_supported_devices = {"cuda": cuda_extra_check} + + def is_device_compatible_with_triton(): + for device, extra_check in triton_supported_devices.items(): + device_interface = get_interface_for_device(device) + if device_interface.is_available() and extra_check(device_interface): + return True + return False + + return is_device_compatible_with_triton() and has_triton_package() + + +@functools.lru_cache(None) +def triton_backend_hash(): + from triton.common.backend import get_backend, get_cuda_version_key + + import torch + + if torch.version.hip: + # Does not work with ROCm + return None + + if not torch.cuda.is_available(): + return None + + backend = get_backend("cuda") + if backend is None: + return get_cuda_version_key() + else: + return backend.get_version_key() + + +@functools.lru_cache +def triton_key(): + import pkgutil + + import triton + + TRITON_PATH = os.path.dirname(os.path.abspath(triton.__file__)) + contents = [] + # This is redundant. Doing it to be consistent with upstream. + # frontend + with open(os.path.join(TRITON_PATH, "compiler", "compiler.py"), "rb") as f: + contents += [hashlib.sha256(f.read()).hexdigest()] + + # compiler + compiler_path = os.path.join(TRITON_PATH, "compiler") + backends_path = os.path.join(TRITON_PATH, "compiler", "backends") + for lib in pkgutil.iter_modules([compiler_path, backends_path]): + with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: # type: ignore[call-arg, union-attr, arg-type] + contents += [hashlib.sha256(f.read()).hexdigest()] + # backend + libtriton_hash = hashlib.sha256() + with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f: + while True: + chunk = f.read(1024**2) + if not chunk: + break + libtriton_hash.update(chunk) + contents.append(libtriton_hash.hexdigest()) + # language + language_path = os.path.join(TRITON_PATH, "language") + for lib in pkgutil.iter_modules([language_path]): + with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: # type: ignore[call-arg, union-attr, arg-type] + contents += [hashlib.sha256(f.read()).hexdigest()] + from triton import __version__ + + return f"{__version__}" + "-".join(contents) + + +@functools.lru_cache(None) +def triton_hash_with_backend(): + import torch + + if torch.version.hip: + # Does not work with ROCm + return None + + backend_hash = triton_backend_hash() + key = f"{triton_key()}-{backend_hash}" + return hashlib.sha256(key.encode("utf-8")).hexdigest() diff --git a/venv/lib/python3.10/site-packages/torch/utils/_typing_utils.py b/venv/lib/python3.10/site-packages/torch/utils/_typing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fd1b6ca5785ff5d90dfed7cf3c152dfb17c616f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_typing_utils.py @@ -0,0 +1,13 @@ +"""Miscellaneous utilities to aid with typing.""" + +from typing import Optional, TypeVar + +# Helper to turn Optional[T] into T when we know None either isn't +# possible or should trigger an exception. +T = TypeVar("T") + + +def not_none(obj: Optional[T]) -> T: + if obj is None: + raise TypeError("Invariant encountered: value was None when it should not be") + return obj diff --git a/venv/lib/python3.10/site-packages/torch/utils/_zip.py b/venv/lib/python3.10/site-packages/torch/utils/_zip.py new file mode 100644 index 0000000000000000000000000000000000000000..f37ddb44987889fdf6730b800592e13652e46aed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_zip.py @@ -0,0 +1,85 @@ +import argparse +import glob +import os +from pathlib import Path +from zipfile import ZipFile + +# Exclude some standard library modules to: +# 1. Slim down the final zipped file size +# 2. Remove functionality we don't want to support. +DENY_LIST = [ + # Interface to unix databases + "dbm", + # ncurses bindings (terminal interfaces) + "curses", + # Tcl/Tk GUI + "tkinter", + "tkinter", + # Tests for the standard library + "test", + "tests", + "idle_test", + "__phello__.foo.py", + # importlib frozen modules. These are already baked into CPython. + "_bootstrap.py", + "_bootstrap_external.py", +] + +strip_file_dir = "" + + +def remove_prefix(text, prefix): + if text.startswith(prefix): + return text[len(prefix) :] + return text + + +def write_to_zip(file_path, strip_file_path, zf, prepend_str=""): + stripped_file_path = prepend_str + remove_prefix(file_path, strip_file_dir + "/") + path = Path(stripped_file_path) + if path.name in DENY_LIST: + return + zf.write(file_path, stripped_file_path) + + +def main() -> None: + global strip_file_dir + parser = argparse.ArgumentParser(description="Zip py source") + parser.add_argument("paths", nargs="*", help="Paths to zip.") + parser.add_argument( + "--install-dir", "--install_dir", help="Root directory for all output files" + ) + parser.add_argument( + "--strip-dir", + "--strip_dir", + help="The absolute directory we want to remove from zip", + ) + parser.add_argument( + "--prepend-str", + "--prepend_str", + help="A string to prepend onto all paths of a file in the zip", + default="", + ) + parser.add_argument("--zip-name", "--zip_name", help="Output zip name") + + args = parser.parse_args() + + zip_file_name = args.install_dir + "/" + args.zip_name + strip_file_dir = args.strip_dir + prepend_str = args.prepend_str + zf = ZipFile(zip_file_name, mode="w") + + for p in sorted(args.paths): + if os.path.isdir(p): + files = glob.glob(p + "/**/*.py", recursive=True) + for file_path in sorted(files): + # strip the absolute path + write_to_zip( + file_path, strip_file_dir + "/", zf, prepend_str=prepend_str + ) + else: + write_to_zip(p, strip_file_dir + "/", zf, prepend_str=prepend_str) + + +if __name__ == "__main__": + main() # pragma: no cover diff --git a/venv/lib/python3.10/site-packages/torch/utils/backend_registration.py b/venv/lib/python3.10/site-packages/torch/utils/backend_registration.py new file mode 100644 index 0000000000000000000000000000000000000000..aee7964c42589acd01ade950caede06671df5861 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/backend_registration.py @@ -0,0 +1,339 @@ +import torch +from torch._C import _rename_privateuse1_backend, _get_privateuse1_backend_name +from typing import List, Optional, Union + +__all__ = ["rename_privateuse1_backend", "generate_methods_for_privateuse1_backend"] + +# TODO: Should use `torch._C._get_privateuse1_backend_name()` to get +# renamed-backend name for `privateuse1`, but the func will cause an +# error with torch.jit.script, so we use the global variable named +# `_privateuse1_backend_name`. +_privateuse1_backend_name = "privateuseone" + +def rename_privateuse1_backend(backend_name: str) -> None: + r""" + Rename the privateuse1 backend device to make it more convenient to use as a device name within PyTorch APIs. + + The steps are: + + (1) (In C++) implement kernels for various torch operations, and register them + to the PrivateUse1 dispatch key. + (2) (In python) call torch.utils.rename_privateuse1_backend("foo") + + You can now use "foo" as an ordinary device string in python. + + Note: this API can only be called once per process. Attempting to change + the external backend after it's already been set will result in an error. + + Note(AMP): If you want to support AMP on your device, you can register a custom backend module. + The backend must register a custom backend module with ``torch._register_device_module("foo", BackendModule)``. + BackendModule needs to have the following API's: + + (1) ``get_amp_supported_dtype() -> List[torch.dtype]`` + get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype. + + (2) ``is_autocast_enabled() -> bool`` + check the AMP is enabled or not on your "foo" device. + + (3) ``get_autocast_dtype() -> torch.dtype`` + get the supported dtype on your "foo" device in AMP, which is set by ``set_autocast_dtype`` or the + default dtype, and the default dtype is ``torch.float16``. + + (4) ``set_autocast_enabled(bool) -> None`` + enable the AMP or not on your "foo" device. + + (5) ``set_autocast_dtype(dtype) -> None`` + set the supported dtype on your "foo" device in AMP, and the dtype be contained in the dtypes got + from ``get_amp_supported_dtype``. + + Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's: + + (1) ``_is_in_bad_fork() -> bool`` + Return ``True`` if now it is in bad_fork, else return ``False``. + + (2) ``manual_seed_all(seed int) -> None`` + Sets the seed for generating random numbers for your devices. + + (3) ``device_count() -> int`` + Returns the number of "foo"s available. + + (4) ``get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor`` + Returns a list of ByteTensor representing the random number states of all devices. + + (5) ``set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None`` + Sets the random number generator state of the specified "foo" device. + + And there are some common funcs: + + (1) ``is_available() -> bool`` + Returns a bool indicating if "foo" is currently available. + + (2) ``current_device() -> int`` + Returns the index of a currently selected device. + + For more details, see https://pytorch.org/tutorials/advanced/extend_dispatcher.html#get-a-dispatch-key-for-your-backend + For an existing example, see https://github.com/bdhirsh/pytorch_open_registration_example + + Example:: + + >>> # xdoctest: +SKIP("failing") + >>> torch.utils.rename_privateuse1_backend("foo") + # This will work, assuming that you've implemented the right C++ kernels + # to implement torch.ones. + >>> a = torch.ones(2, device="foo") + + """ + _rename_privateuse1_backend(backend_name) + global _privateuse1_backend_name + _privateuse1_backend_name = backend_name + +def _check_register_once(module, attr): + if hasattr(module, attr): + raise RuntimeError(f"The custom device module of {module} has already been registered with {attr}") + + +def _normalization_device(custom_backend_name: str, device: Optional[Union[int, str, torch.device]] = None) -> int: + def _get_current_device_index(): + _get_device_index = "current_device" + if hasattr(torch, custom_backend_name) and \ + hasattr(getattr(torch, custom_backend_name), _get_device_index): + return getattr(getattr(torch, custom_backend_name), _get_device_index)() + else: + # The default device index is 0. + return 0 + + if device is None: + return _get_current_device_index() + # if isinstance(device, str), this means that the parameter passed in is in the string format "foo:0" + # convert str object to torch.device object, and then process it uniformly + elif isinstance(device, str): + device = torch.device(device) + + # variable devcie can only be torch.device type or int type + if isinstance(device, torch.device): + if device.type != custom_backend_name: + raise RuntimeError(f"Invalid device, must be {custom_backend_name} device") + elif device.index is None: + device_idx = _get_current_device_index() + else: + device_idx = device.index + # if isinstance(device, int), we can take the index number directly + else: + device_idx = device + return device_idx + + +def _generate_tensor_methods_for_privateuse1_backend(custom_backend_name: str) -> None: + @property # type: ignore[misc] + def wrap_tensor_backend(self: torch.Tensor) -> bool: + return self.device.type == custom_backend_name + + _check_register_once(torch.Tensor, f'is_{custom_backend_name}') + setattr(torch.Tensor, f'is_{custom_backend_name}', wrap_tensor_backend) + + def wrap_tensor_to(self: torch.Tensor, device: Optional[Union[int, torch.device]] = None, non_blocking=False, + **kwargs) -> torch.Tensor: + r"""Perform Tensor device conversion. Call the to operator implementation. + + .. note:: + If the ``self`` Tensor already + has the correct :class:`torch.device`, then ``self`` is returned. + Otherwise, the returned tensor is a copy of ``self`` with the desired :class:`torch.device`. + + Args: + device (int, optional): if specified, all parameters will be copied to that device + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. Otherwise, + the argument has no effect. + **kwargs (dict): For compatibility, may contain the key ``memory_format`` argument. + """ + device_idx = _normalization_device(custom_backend_name, device) + return self.to(device=torch.device(f'{custom_backend_name}:{device_idx}'), non_blocking=non_blocking, **kwargs) + + _check_register_once(torch.Tensor, custom_backend_name) + setattr(torch.Tensor, custom_backend_name, wrap_tensor_to) + + +def _generate_module_methods_for_privateuse1_backend(custom_backend_name: str) -> None: + # Generate Module attributes and methods depends on Tensor methods, + # so we need to check whether Tensor methods is already registered. + if not hasattr(torch.Tensor, custom_backend_name): + raise RuntimeError( + f"Can not automatically generate {custom_backend_name}() method for torch.nn.Module." + f"Because torch.Tensor doesn't has the method {custom_backend_name}()." + f"For this error, you can try setting for_tensor=True.") + + def wrap_module_to(self: torch.nn.modules.module.T, + device: Optional[Union[int, torch.device]] = None) -> torch.nn.modules.module.T: + r"""Move all model parameters and buffers to the custom device. + + This also makes associated parameters and buffers different objects. So + it should be called before constructing optimizer if the module will + live on device while being optimized. + + .. note:: + This method modifies the module in-place. + + Args: + device (int, optional): if specified, all parameters will be copied to that device + """ + return self._apply(lambda t: getattr(t, custom_backend_name)(device)) + + _check_register_once(torch.nn.Module, custom_backend_name) + setattr(torch.nn.Module, custom_backend_name, wrap_module_to) + + +def _generate_storage_methods_for_privateuse1_backend(custom_backend_name: str, + unsupported_dtype: Optional[List[torch.dtype]] = None) -> None: + # Attribute is registered in the _StorageBase class + # and UntypedStorage obtains through inheritance. + @property # type: ignore[misc] + def wrap_storage_backend(self: torch.storage._StorageBase) -> bool: + r"""Return the internal :class:`torch.UntypedStorage`.""" + return self.device.type == custom_backend_name + + _check_register_once(torch.storage._StorageBase, f'is_{custom_backend_name}') + setattr(torch.storage._StorageBase, f'is_{custom_backend_name}', wrap_storage_backend) + + def wrap_storage_to(self, device=None, non_blocking=False): + r"""Return a copy of this object in custom device memory. + + If this object is already in device memory and on the correct device, then + no copy is performed and the original object is returned. + + Args: + device (int): The destination device id. Defaults to the current device. + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. Otherwise, + the argument has no effect. + """ + # There should be a judgment related to storage device and a judgment related to storage type, + # but it depends on the extended function, so this part is temporarily omitted in the automatic generation. + device_idx = _normalization_device(custom_backend_name, device) + + if getattr(self, f'is_{custom_backend_name}'): + # storage has already on expected device. + if self.get_device() == device_idx: + return self + # For sparse storage, custom need to extend the implementation by themselves. + if self.is_sparse: + raise RuntimeError(f"Can not support a sparse storage move to {custom_backend_name} backend") + # create untyped_storage and copy data + untyped_storage = torch.UntypedStorage( + self.size(), device=torch.device(f'{custom_backend_name}:{device_idx}') + ) + untyped_storage.copy_(self, non_blocking) + return untyped_storage + + _check_register_once(torch.storage._StorageBase, custom_backend_name) + setattr(torch.storage._StorageBase, custom_backend_name, wrap_storage_to) + + # Register the corresponding attribute for the TypedStorage class. + # When the TypedStorage class is removed, the registration is also removed. + + @property # type: ignore[misc] + def wrap_typed_storage_backend(self: torch.storage.TypedStorage) -> bool: + torch.storage._warn_typed_storage_removal() + return self._untyped_storage.device.type == custom_backend_name + + _check_register_once(torch.TypedStorage, f'is_{custom_backend_name}') + setattr(torch.storage.TypedStorage, f'is_{custom_backend_name}', wrap_typed_storage_backend) + + def wrap_typed_storage_to(self: torch.storage.TypedStorage, + device=None, non_blocking=False, **kwargs) -> torch.storage.TypedStorage: + torch.storage._warn_typed_storage_removal() + if unsupported_dtype and self.dtype in unsupported_dtype: + raise RuntimeError(f"Cannot create {custom_backend_name} storage " + f"as {self.dtype} dtype is not supported by this backend") + custom_backend_storage: torch.UntypedStorage = getattr( + self._untyped_storage, custom_backend_name)(device, non_blocking, **kwargs) + return self._new_wrapped_storage(custom_backend_storage) + + _check_register_once(torch.TypedStorage, custom_backend_name) + setattr(torch.TypedStorage, custom_backend_name, wrap_typed_storage_to) + + +def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module: bool = True, + for_storage: bool = False, + unsupported_dtype: Optional[List[torch.dtype]] = None) -> None: + r""" + Automatically generate attributes and methods for the custom backend after rename privateuse1 backend. + + In the default scenario, storage-related methods will not be generated automatically. + + When you implement kernels for various torch operations, and register them to the PrivateUse1 dispatch key. + And call the function torch.rename_privateuse1_backend("foo") to rename your backend name. + At this point, you can easily register specific methods and attributes by calling this function. + Just like torch.Tensor.foo(), torch.Tensor.is_foo, torch.Storage.foo(), torch.Storage.is_foo. + + Note: We recommend you use generic functions (check devices are equal or to(device=)). + We provide these methods for convenience only and they will be "monkey patched" onto the objects + and so will not be properly typed. For Storage methods generate, if you need to support sparse data storage, + you need to extend the implementation yourself. + + Args: + for_tensor (bool): whether register related methods for torch.Tensor class. + for_module (bool): whether register related methods for torch.nn.Module class. + for_storage (bool): whether register related methods for torch.Storage class. + unsupported_dtype (List[torch.dtype]): takes effect only when the storage method needs to be generated, + indicating that the storage does not support the torch.dtype type. + + Example:: + + >>> # xdoctest: +SKIP("failing") + >>> torch.utils.rename_privateuse1_backend("foo") + >>> torch.utils.generate_methods_for_privateuse1_backend() + # Then automatically generate backend-related attributes and methods. + >>> a = torch.tensor(2).foo() + >>> a.is_foo + >>> hasattr(torch.nn.Module, 'foo') + """ + custom_backend_name = _get_privateuse1_backend_name() + + if for_tensor: + _generate_tensor_methods_for_privateuse1_backend(custom_backend_name) + + if for_module: + _generate_module_methods_for_privateuse1_backend(custom_backend_name) + + if for_storage: + _generate_storage_methods_for_privateuse1_backend(custom_backend_name, unsupported_dtype) + +def _get_custom_mod_func(func_name: str): + r""" + Return the func named `func_name` defined in custom device module. If not defined, + return `None`. And the func is registered with `torch.utils.rename_privateuse1_backend('foo')` + and `torch._register_device_module('foo', BackendModule)`. + If the custom device module or the func is not defined, it will give warning or error message. + Args: + func_name (str): return the callable func named func_name defined in custom device module. + Example:: + class DummyfooModule: + @staticmethod + def is_available(): + return True + @staticmethod + def func_name(*args, **kwargs): + .... + torch.utils.rename_privateuse1_backend("foo") + torch._register_device_module("foo", DummyfooModule) + foo_is_available_func = torch.utils.backend_registration._get_custom_mod_func("is_available") + if foo_is_available_func: + foo_is_available = foo_is_available_func() + func_ = torch.utils.backend_registration._get_custom_mod_func("func_name") + if func_: + result = func_(*args, **kwargs) + Attention: This function is not meant to be used directly by users, which is why + it is marked as private. It is a convenience function for backend implementers to + more easily call the hooks into their backend extensions. + """ + assert isinstance(func_name, str), f"func_name must be `str`, but got `{type(func_name)}`." + backend_name = _get_privateuse1_backend_name() + custom_device_mod = getattr(torch, backend_name, None) # type: ignore[arg-type] + function = getattr(custom_device_mod, func_name, None) # type: ignore[arg-type] + if custom_device_mod is None or function is None: + message = f'Try to call torch.{backend_name}.{func_name}. The backend must register a custom backend ' + message += f"module with `torch._register_device_module('{backend_name}', BackendModule)`. And " + message += f"BackendModule needs to have the following API's:\n `{func_name}(*args, **kwargs)`. \n" + raise RuntimeError(message) + return function diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e814aaf4671ca35484c43bc38677849d02a81ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/__init__.py @@ -0,0 +1,6 @@ +from torch.utils.benchmark.utils.common import * # noqa: F403 +from torch.utils.benchmark.utils.timer import * # noqa: F403 +from torch.utils.benchmark.utils.compare import * # noqa: F403 +from torch.utils.benchmark.utils.fuzzer import * # noqa: F403 +from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import * # noqa: F403 +from torch.utils.benchmark.utils.sparse_fuzzer import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..1849931ee55c692ef7608d606d5293ae9d96de13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/common.py @@ -0,0 +1,355 @@ +"""Base shared classes and utilities.""" + +import collections +import contextlib +import dataclasses +import os +import shutil +import tempfile +import textwrap +import time +from typing import cast, Any, DefaultDict, Dict, Iterable, Iterator, List, Optional, Tuple +import uuid + +import torch + + +__all__ = ["TaskSpec", "Measurement", "select_unit", "unit_to_english", "trim_sigfig", "ordered_unique", "set_torch_threads"] + + +_MAX_SIGNIFICANT_FIGURES = 4 +_MIN_CONFIDENCE_INTERVAL = 25e-9 # 25 ns + +# Measurement will include a warning if the distribution is suspect. All +# runs are expected to have some variation; these parameters set the +# thresholds. +_IQR_WARN_THRESHOLD = 0.1 +_IQR_GROSS_WARN_THRESHOLD = 0.25 + + +@dataclasses.dataclass(init=True, repr=False, eq=True, frozen=True) +class TaskSpec: + """Container for information used to define a Timer. (except globals)""" + stmt: str + setup: str + global_setup: str = "" + label: Optional[str] = None + sub_label: Optional[str] = None + description: Optional[str] = None + env: Optional[str] = None + num_threads: int = 1 + + @property + def title(self) -> str: + """Best effort attempt at a string label for the measurement.""" + if self.label is not None: + return self.label + (f": {self.sub_label}" if self.sub_label else "") + elif "\n" not in self.stmt: + return self.stmt + (f": {self.sub_label}" if self.sub_label else "") + return ( + f"stmt:{f' ({self.sub_label})' if self.sub_label else ''}\n" + f"{textwrap.indent(self.stmt, ' ')}" + ) + + def setup_str(self) -> str: + return ( + "" if (self.setup == "pass" or not self.setup) + else f"setup:\n{textwrap.indent(self.setup, ' ')}" if "\n" in self.setup + else f"setup: {self.setup}" + ) + + def summarize(self) -> str: + """Build TaskSpec portion of repr string for other containers.""" + sections = [ + self.title, + self.description or "", + self.setup_str(), + ] + return "\n".join([f"{i}\n" if "\n" in i else i for i in sections if i]) + +_TASKSPEC_FIELDS = tuple(i.name for i in dataclasses.fields(TaskSpec)) + + +@dataclasses.dataclass(init=True, repr=False) +class Measurement: + """The result of a Timer measurement. + + This class stores one or more measurements of a given statement. It is + serializable and provides several convenience methods + (including a detailed __repr__) for downstream consumers. + """ + number_per_run: int + raw_times: List[float] + task_spec: TaskSpec + metadata: Optional[Dict[Any, Any]] = None # Reserved for user payloads. + + def __post_init__(self) -> None: + self._sorted_times: Tuple[float, ...] = () + self._warnings: Tuple[str, ...] = () + self._median: float = -1.0 + self._mean: float = -1.0 + self._p25: float = -1.0 + self._p75: float = -1.0 + + def __getattr__(self, name: str) -> Any: + # Forward TaskSpec fields for convenience. + if name in _TASKSPEC_FIELDS: + return getattr(self.task_spec, name) + return super().__getattribute__(name) + + # ========================================================================= + # == Convenience methods for statistics =================================== + # ========================================================================= + # + # These methods use raw time divided by number_per_run; this is an + # extrapolation and hides the fact that different number_per_run will + # result in different amortization of overheads, however if Timer has + # selected an appropriate number_per_run then this is a non-issue, and + # forcing users to handle that division would result in a poor experience. + @property + def times(self) -> List[float]: + return [t / self.number_per_run for t in self.raw_times] + + @property + def median(self) -> float: + self._lazy_init() + return self._median + + @property + def mean(self) -> float: + self._lazy_init() + return self._mean + + @property + def iqr(self) -> float: + self._lazy_init() + return self._p75 - self._p25 + + @property + def significant_figures(self) -> int: + """Approximate significant figure estimate. + + This property is intended to give a convenient way to estimate the + precision of a measurement. It only uses the interquartile region to + estimate statistics to try to mitigate skew from the tails, and + uses a static z value of 1.645 since it is not expected to be used + for small values of `n`, so z can approximate `t`. + + The significant figure estimation used in conjunction with the + `trim_sigfig` method to provide a more human interpretable data + summary. __repr__ does not use this method; it simply displays raw + values. Significant figure estimation is intended for `Compare`. + """ + self._lazy_init() + n_total = len(self._sorted_times) + lower_bound = int(n_total // 4) + upper_bound = int(torch.tensor(3 * n_total / 4).ceil()) + interquartile_points: Tuple[float, ...] = self._sorted_times[lower_bound:upper_bound] + std = torch.tensor(interquartile_points).std(unbiased=False).item() + sqrt_n = torch.tensor(len(interquartile_points)).sqrt().item() + + # Rough estimates. These are by no means statistically rigorous. + confidence_interval = max(1.645 * std / sqrt_n, _MIN_CONFIDENCE_INTERVAL) + relative_ci = torch.tensor(self._median / confidence_interval).log10().item() + num_significant_figures = int(torch.tensor(relative_ci).floor()) + return min(max(num_significant_figures, 1), _MAX_SIGNIFICANT_FIGURES) + + @property + def has_warnings(self) -> bool: + self._lazy_init() + return bool(self._warnings) + + def _lazy_init(self) -> None: + if self.raw_times and not self._sorted_times: + self._sorted_times = tuple(sorted(self.times)) + _sorted_times = torch.tensor(self._sorted_times, dtype=torch.float64) + self._median = _sorted_times.quantile(.5).item() + self._mean = _sorted_times.mean().item() + self._p25 = _sorted_times.quantile(.25).item() + self._p75 = _sorted_times.quantile(.75).item() + + def add_warning(msg: str) -> None: + rel_iqr = self.iqr / self.median * 100 + self._warnings += ( + f" WARNING: Interquartile range is {rel_iqr:.1f}% " + f"of the median measurement.\n {msg}", + ) + + if not self.meets_confidence(_IQR_GROSS_WARN_THRESHOLD): + add_warning("This suggests significant environmental influence.") + elif not self.meets_confidence(_IQR_WARN_THRESHOLD): + add_warning("This could indicate system fluctuation.") + + + def meets_confidence(self, threshold: float = _IQR_WARN_THRESHOLD) -> bool: + return self.iqr / self.median < threshold + + @property + def title(self) -> str: + return self.task_spec.title + + @property + def env(self) -> str: + return ( + "Unspecified env" if self.taskspec.env is None + else cast(str, self.taskspec.env) + ) + + @property + def as_row_name(self) -> str: + return self.sub_label or self.stmt or "[Unknown]" + + def __repr__(self) -> str: + """ + Example repr: + + Broadcasting add (4x8) + Median: 5.73 us + IQR: 2.25 us (4.01 to 6.26) + 372 measurements, 100 runs per measurement, 1 thread + WARNING: Interquartile range is 39.4% of the median measurement. + This suggests significant environmental influence. + """ + self._lazy_init() + skip_line, newline = "MEASUREMENT_REPR_SKIP_LINE", "\n" + n = len(self._sorted_times) + time_unit, time_scale = select_unit(self._median) + iqr_filter = '' if n >= 4 else skip_line + + repr_str = f""" +{super().__repr__()} +{self.task_spec.summarize()} + {'Median: ' if n > 1 else ''}{self._median / time_scale:.2f} {time_unit} + {iqr_filter}IQR: {self.iqr / time_scale:.2f} {time_unit} ({self._p25 / time_scale:.2f} to {self._p75 / time_scale:.2f}) + {n} measurement{'s' if n > 1 else ''}, {self.number_per_run} runs {'per measurement,' if n > 1 else ','} {self.num_threads} thread{'s' if self.num_threads > 1 else ''} +{newline.join(self._warnings)}""".strip() # noqa: B950 + + return "\n".join(l for l in repr_str.splitlines(keepends=False) if skip_line not in l) + + @staticmethod + def merge(measurements: Iterable["Measurement"]) -> List["Measurement"]: + """Convenience method for merging replicates. + + Merge will extrapolate times to `number_per_run=1` and will not + transfer any metadata. (Since it might differ between replicates) + """ + grouped_measurements: DefaultDict[TaskSpec, List[Measurement]] = collections.defaultdict(list) + for m in measurements: + grouped_measurements[m.task_spec].append(m) + + def merge_group(task_spec: TaskSpec, group: List["Measurement"]) -> "Measurement": + times: List[float] = [] + for m in group: + # Different measurements could have different `number_per_run`, + # so we call `.times` which normalizes the results. + times.extend(m.times) + + return Measurement( + number_per_run=1, + raw_times=times, + task_spec=task_spec, + metadata=None, + ) + + return [merge_group(t, g) for t, g in grouped_measurements.items()] + + +def select_unit(t: float) -> Tuple[str, float]: + """Determine how to scale times for O(1) magnitude. + + This utility is used to format numbers for human consumption. + """ + time_unit = {-3: "ns", -2: "us", -1: "ms"}.get(int(torch.tensor(t).log10().item() // 3), "s") + time_scale = {"ns": 1e-9, "us": 1e-6, "ms": 1e-3, "s": 1}[time_unit] + return time_unit, time_scale + + +def unit_to_english(u: str) -> str: + return { + "ns": "nanosecond", + "us": "microsecond", + "ms": "millisecond", + "s": "second", + }[u] + + +def trim_sigfig(x: float, n: int) -> float: + """Trim `x` to `n` significant figures. (e.g. 3.14159, 2 -> 3.10000)""" + assert n == int(n) + magnitude = int(torch.tensor(x).abs().log10().ceil().item()) + scale = 10 ** (magnitude - n) + return float(torch.tensor(x / scale).round() * scale) + + +def ordered_unique(elements: Iterable[Any]) -> List[Any]: + return list(collections.OrderedDict(dict.fromkeys(elements)).keys()) + + +@contextlib.contextmanager +def set_torch_threads(n: int) -> Iterator[None]: + prior_num_threads = torch.get_num_threads() + try: + torch.set_num_threads(n) + yield + finally: + torch.set_num_threads(prior_num_threads) + + +def _make_temp_dir(prefix: Optional[str] = None, gc_dev_shm: bool = False) -> str: + """Create a temporary directory. The caller is responsible for cleanup. + + This function is conceptually similar to `tempfile.mkdtemp`, but with + the key additional feature that it will use shared memory if the + `BENCHMARK_USE_DEV_SHM` environment variable is set. This is an + implementation detail, but an important one for cases where many Callgrind + measurements are collected at once. (Such as when collecting + microbenchmarks.) + + This is an internal utility, and is exported solely so that microbenchmarks + can reuse the util. + """ + use_dev_shm: bool = (os.getenv("BENCHMARK_USE_DEV_SHM") or "").lower() in ("1", "true") + if use_dev_shm: + root = "/dev/shm/pytorch_benchmark_utils" + assert os.name == "posix", f"tmpfs (/dev/shm) is POSIX only, current platform is {os.name}" + assert os.path.exists("/dev/shm"), "This system does not appear to support tmpfs (/dev/shm)." + os.makedirs(root, exist_ok=True) + + # Because we're working in shared memory, it is more important than + # usual to clean up ALL intermediate files. However we don't want every + # worker to walk over all outstanding directories, so instead we only + # check when we are sure that it won't lead to contention. + if gc_dev_shm: + for i in os.listdir(root): + owner_file = os.path.join(root, i, "owner.pid") + if not os.path.exists(owner_file): + continue + + with open(owner_file) as f: + owner_pid = int(f.read()) + + if owner_pid == os.getpid(): + continue + + try: + # https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python + os.kill(owner_pid, 0) + + except OSError: + print(f"Detected that {os.path.join(root, i)} was orphaned in shared memory. Cleaning up.") + shutil.rmtree(os.path.join(root, i)) + + else: + root = tempfile.gettempdir() + + # We include the time so names sort by creation time, and add a UUID + # to ensure we don't collide. + name = f"{prefix or tempfile.gettempprefix()}__{int(time.time())}__{uuid.uuid4()}" + path = os.path.join(root, name) + os.makedirs(path, exist_ok=False) + + if use_dev_shm: + with open(os.path.join(path, "owner.pid"), "w") as f: + f.write(str(os.getpid())) + + return path diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7863e6a740e09692c6e28e3fffeb41f02ab36e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/compare.py @@ -0,0 +1,320 @@ +"""Display class to aggregate and print the results of many measurements.""" +import collections +import enum +import itertools as it +from typing import DefaultDict, List, Optional, Tuple + +from torch.utils.benchmark.utils import common +from torch import tensor as _tensor + +__all__ = ["Colorize", "Compare"] + +BEST = "\033[92m" +GOOD = "\033[34m" +BAD = "\033[2m\033[91m" +VERY_BAD = "\033[31m" +BOLD = "\033[1m" +TERMINATE = "\033[0m" + + +class Colorize(enum.Enum): + NONE = "none" + COLUMNWISE = "columnwise" + ROWWISE = "rowwise" + + +# Classes to separate internal bookkeeping from what is rendered. +class _Column: + def __init__( + self, + grouped_results: List[Tuple[Optional[common.Measurement], ...]], + time_scale: float, + time_unit: str, + trim_significant_figures: bool, + highlight_warnings: bool, + ): + self._grouped_results = grouped_results + self._flat_results = list(it.chain(*grouped_results)) + self._time_scale = time_scale + self._time_unit = time_unit + self._trim_significant_figures = trim_significant_figures + self._highlight_warnings = ( + highlight_warnings + and any(r.has_warnings for r in self._flat_results if r) + ) + leading_digits = [ + int(_tensor(r.median / self._time_scale).log10().ceil()) if r else None + for r in self._flat_results + ] + unit_digits = max(d for d in leading_digits if d is not None) + decimal_digits = min( + max(m.significant_figures - digits, 0) + for digits, m in zip(leading_digits, self._flat_results) + if (m is not None) and (digits is not None) + ) if self._trim_significant_figures else 1 + length = unit_digits + decimal_digits + (1 if decimal_digits else 0) + self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}" + + def get_results_for(self, group): + return self._grouped_results[group] + + def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]): + if value is None: + return " " * len(self.num_to_str(1, estimated_sigfigs, None)) + + if self._trim_significant_figures: + value = common.trim_sigfig(value, estimated_sigfigs) + + return self._template.format( + value, + f" (! {spread * 100:.0f}%)" if self._highlight_warnings and spread is not None else "") + + +def optional_min(seq): + l = list(seq) + return None if len(l) == 0 else min(l) + + +class _Row: + def __init__(self, results, row_group, render_env, env_str_len, + row_name_str_len, time_scale, colorize, num_threads=None): + super().__init__() + self._results = results + self._row_group = row_group + self._render_env = render_env + self._env_str_len = env_str_len + self._row_name_str_len = row_name_str_len + self._time_scale = time_scale + self._colorize = colorize + self._columns: Tuple[_Column, ...] = () + self._num_threads = num_threads + + def register_columns(self, columns: Tuple[_Column, ...]): + self._columns = columns + + def as_column_strings(self): + concrete_results = [r for r in self._results if r is not None] + env = f"({concrete_results[0].env})" if self._render_env else "" + env = env.ljust(self._env_str_len + 4) + output = [" " + env + concrete_results[0].as_row_name] + for m, col in zip(self._results, self._columns or ()): + if m is None: + output.append(col.num_to_str(None, 1, None)) + else: + output.append(col.num_to_str( + m.median / self._time_scale, + m.significant_figures, + m.iqr / m.median if m.has_warnings else None + )) + return output + + @staticmethod + def color_segment(segment, value, best_value): + if value <= best_value * 1.01 or value <= best_value + 100e-9: + return BEST + BOLD + segment + TERMINATE * 2 + if value <= best_value * 1.1: + return GOOD + BOLD + segment + TERMINATE * 2 + if value >= best_value * 5: + return VERY_BAD + BOLD + segment + TERMINATE * 2 + if value >= best_value * 2: + return BAD + segment + TERMINATE * 2 + + return segment + + def row_separator(self, overall_width): + return ( + [f"{self._num_threads} threads: ".ljust(overall_width, "-")] + if self._num_threads is not None else [] + ) + + def finalize_column_strings(self, column_strings, col_widths): + best_values = [-1 for _ in column_strings] + if self._colorize == Colorize.ROWWISE: + row_min = min(r.median for r in self._results if r is not None) + best_values = [row_min for _ in column_strings] + elif self._colorize == Colorize.COLUMNWISE: + best_values = [ + optional_min(r.median for r in column.get_results_for(self._row_group) if r is not None) + for column in (self._columns or ()) + ] + + row_contents = [column_strings[0].ljust(col_widths[0])] + for col_str, width, result, best_value in zip(column_strings[1:], col_widths[1:], self._results, best_values): + col_str = col_str.center(width) + if self._colorize != Colorize.NONE and result is not None and best_value is not None: + col_str = self.color_segment(col_str, result.median, best_value) + row_contents.append(col_str) + return row_contents + + +class Table: + def __init__( + self, + results: List[common.Measurement], + colorize: Colorize, + trim_significant_figures: bool, + highlight_warnings: bool + ): + assert len({r.label for r in results}) == 1 + + self.results = results + self._colorize = colorize + self._trim_significant_figures = trim_significant_figures + self._highlight_warnings = highlight_warnings + self.label = results[0].label + self.time_unit, self.time_scale = common.select_unit( + min(r.median for r in results) + ) + + self.row_keys = common.ordered_unique([self.row_fn(i) for i in results]) + self.row_keys.sort(key=lambda args: args[:2]) # preserve stmt order + self.column_keys = common.ordered_unique([self.col_fn(i) for i in results]) + self.rows, self.columns = self.populate_rows_and_columns() + + @staticmethod + def row_fn(m: common.Measurement) -> Tuple[int, Optional[str], str]: + return m.num_threads, m.env, m.as_row_name + + @staticmethod + def col_fn(m: common.Measurement) -> Optional[str]: + return m.description + + def populate_rows_and_columns(self) -> Tuple[Tuple[_Row, ...], Tuple[_Column, ...]]: + rows: List[_Row] = [] + columns: List[_Column] = [] + ordered_results: List[List[Optional[common.Measurement]]] = [ + [None for _ in self.column_keys] + for _ in self.row_keys + ] + row_position = {key: i for i, key in enumerate(self.row_keys)} + col_position = {key: i for i, key in enumerate(self.column_keys)} + for r in self.results: + i = row_position[self.row_fn(r)] + j = col_position[self.col_fn(r)] + ordered_results[i][j] = r + + unique_envs = {r.env for r in self.results} + render_env = len(unique_envs) > 1 + env_str_len = max(len(i) for i in unique_envs) if render_env else 0 + + row_name_str_len = max(len(r.as_row_name) for r in self.results) + + prior_num_threads = -1 + prior_env = "" + row_group = -1 + rows_by_group: List[List[List[Optional[common.Measurement]]]] = [] + for (num_threads, env, _), row in zip(self.row_keys, ordered_results): + thread_transition = (num_threads != prior_num_threads) + if thread_transition: + prior_num_threads = num_threads + prior_env = "" + row_group += 1 + rows_by_group.append([]) + rows.append( + _Row( + results=row, + row_group=row_group, + render_env=(render_env and env != prior_env), + env_str_len=env_str_len, + row_name_str_len=row_name_str_len, + time_scale=self.time_scale, + colorize=self._colorize, + num_threads=num_threads if thread_transition else None, + ) + ) + rows_by_group[-1].append(row) + prior_env = env + + for i in range(len(self.column_keys)): + grouped_results = [tuple(row[i] for row in g) for g in rows_by_group] + column = _Column( + grouped_results=grouped_results, + time_scale=self.time_scale, + time_unit=self.time_unit, + trim_significant_figures=self._trim_significant_figures, + highlight_warnings=self._highlight_warnings,) + columns.append(column) + + rows_tuple, columns_tuple = tuple(rows), tuple(columns) + for ri in rows_tuple: + ri.register_columns(columns_tuple) + return rows_tuple, columns_tuple + + def render(self) -> str: + string_rows = [[""] + self.column_keys] + for r in self.rows: + string_rows.append(r.as_column_strings()) + num_cols = max(len(i) for i in string_rows) + for sr in string_rows: + sr.extend(["" for _ in range(num_cols - len(sr))]) + + col_widths = [max(len(j) for j in i) for i in zip(*string_rows)] + finalized_columns = [" | ".join(i.center(w) for i, w in zip(string_rows[0], col_widths))] + overall_width = len(finalized_columns[0]) + for string_row, row in zip(string_rows[1:], self.rows): + finalized_columns.extend(row.row_separator(overall_width)) + finalized_columns.append(" | ".join(row.finalize_column_strings(string_row, col_widths))) + + newline = "\n" + has_warnings = self._highlight_warnings and any(ri.has_warnings for ri in self.results) + return f""" +[{(' ' + (self.label or '') + ' ').center(overall_width - 2, '-')}] +{newline.join(finalized_columns)} + +Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}). +{'(! XX%) Measurement has high variance, where XX is the IQR / median * 100.' + newline if has_warnings else ""}"""[1:] + + +class Compare: + def __init__(self, results: List[common.Measurement]): + self._results: List[common.Measurement] = [] + self.extend_results(results) + self._trim_significant_figures = False + self._colorize = Colorize.NONE + self._highlight_warnings = False + + def __str__(self): + return "\n".join(self._render()) + + def extend_results(self, results): + for r in results: + if not isinstance(r, common.Measurement): + raise ValueError( + "Expected an instance of `Measurement`, " f"got {type(r)} instead." + ) + self._results.extend(results) + + def trim_significant_figures(self): + self._trim_significant_figures = True + + def colorize(self, rowwise=False): + self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE + + def highlight_warnings(self): + self._highlight_warnings = True + + def print(self): + print(str(self)) + + def _render(self): + results = common.Measurement.merge(self._results) + grouped_results = self._group_by_label(results) + output = [] + for group in grouped_results.values(): + output.append(self._layout(group)) + return output + + def _group_by_label(self, results: List[common.Measurement]): + grouped_results: DefaultDict[str, List[common.Measurement]] = collections.defaultdict(list) + for r in results: + grouped_results[r.label].append(r) + return grouped_results + + def _layout(self, results: List[common.Measurement]): + table = Table( + results, + self._colorize, + self._trim_significant_figures, + self._highlight_warnings + ) + return table.render() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py new file mode 100644 index 0000000000000000000000000000000000000000..dcee32ace4031a6401e6c117fb072552af889cb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/compile.py @@ -0,0 +1,187 @@ +import torch + +__all__ = ["bench_all", "benchmark_compile"] + +import torch._dynamo +from torch._dynamo.testing import CompileCounterWithBackend +from torch.utils.benchmark import Timer + +from typing import Optional, List, Callable, Union, Any, cast + +_warned_tensor_cores = False +_default_float_32_precision = torch.get_float32_matmul_precision() + +try: + from tabulate import tabulate + HAS_TABULATE = True +except ImportError: + HAS_TABULATE = False + print("tabulate is not installed, please pip install tabulate to use this utility") + +if HAS_TABULATE: + def _enable_tensor_cores(): + global _warned_tensor_cores + + if torch.cuda.is_available(): + if torch.backends.cuda.matmul.allow_tf32 is False and torch.cuda.get_device_capability() >= (8, 0): + torch.set_float32_matmul_precision("high") + if not _warned_tensor_cores: + print("Your GPU supports tensor cores") + print("we will enable it automatically by setting `torch.set_float32_matmul_precision('high')`") + _warned_tensor_cores = True + + def _disable_tensor_cores(): + torch.set_float32_matmul_precision(_default_float_32_precision) + + def bench_loop( + model: Union[torch.nn.Module, Callable], + sample_input: Union[torch.Tensor, Any], + num_iters: int = 5, + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn: Optional[Callable] = None, + ): + # Define the statement and setup for the benchmark + if optimizer and loss_fn: + # Training mode + stmt = """ + output = model(sample_input) + loss = loss_fn(output) if loss_fn else output.sum() + loss.backward() + optimizer.step() + optimizer.zero_grad() + """ + else: + # Inference mode + stmt = "model(sample_input)" + + # Create the Timer object + timer = Timer( + stmt=stmt, + globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn}, + ) + + + result = timer.timeit(number=num_iters) + + # Get the average time per iteration in milliseconds + avg_time = result.mean * 1000 + return round(avg_time, 2) + + def benchmark_compile( + model: Union[torch.nn.Module, Callable], + sample_input: Union[torch.Tensor, Any], + num_iters: int = 5, + backend: Optional[str] = None, + mode: Optional[str] = "default", + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn : Union[torch.nn.Module, Callable, None] = None, + ): + """ + Use this utility to benchmark torch.compile + """ + if backend: + try: + torch._dynamo.reset() + compile_counter_with_backend = CompileCounterWithBackend(backend) + opt_model = torch.compile(model, backend=compile_counter_with_backend, mode=mode) + + # Compilation only happens after the first inference + compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn) + + running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn) + + if compile_counter_with_backend.frame_count == 0: + raise RuntimeError("No compilation occurred during benchmarking.") + + if compile_counter_with_backend.frame_count > 1: + raise RuntimeError("Recompilation occurred during benchmarking.") + + except Exception as e: + print(e) + print(f"Failed to compile {backend} with mode {mode}") + return None, None + else: + opt_model = model + compilation_time = None + running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn) + + compilation_time = round(compilation_time, 2) if compilation_time else None + running_time = round(running_time, 2) if running_time else None + + + return compilation_time, running_time + + + def bench_all( + model : Union[torch.nn.Module, Callable], + sample_input: Union[torch.Tensor, Any], + num_iters : int = 5, + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn : Union[torch.nn.Module, Callable, None] = None, + ): + """ + This is a simple utility that can be used to benchmark torch.compile + In particular it ensures that your GPU is setup to use tensor cores if it supports its + It also tries out all the main backends and prints a table of results so you can easily compare them all + Many of the backendds have their own optional dependencies so please pip install them seperately + + You will get one table for inference and another for training + If you'd like to leverage this utility for training make sure to pass in a torch.optim.Optimizer + + The important warnings are + Your GPU supports tensor cores + we will enable it automatically by setting `torch.set_float32_matmul_precision('high')` + + If a compilation fails for any reason including the dependency not being included + then we will print Failed to compile {backend} with mode {mode} + """ + field_names = ["Train/Inference", "Backend", "Mode", "Compilation Time", "Average Running Time"] + table = [] + + + eager_time = None + torch._dynamo.reset() + _, eager_time = benchmark_compile(model, sample_input, num_iters, None, None, optimizer) + table.append( + [("Training" if optimizer else "Inference"), "Eager", "-", "-", f"{eager_time} ms"] + ) + + for backend in torch._dynamo.list_backends(): + + if backend == "inductor": + mode_options = cast(List[Optional[str]], list(torch._inductor.list_mode_options().keys())) + [None] + for mode in mode_options: + if mode == "default": + continue + torch._dynamo.reset() + try: + if torch.cuda.is_available(): + _enable_tensor_cores() + compilation_time, running_time = benchmark_compile( + model, sample_input, num_iters, backend, mode, optimizer, loss_fn) + finally: + if torch.cuda.is_available(): + _disable_tensor_cores() + table.append([ + ("Training" if optimizer else "Inference"), + backend if backend else "-", + mode if mode is not None else "-", + f"{compilation_time} ms " if compilation_time else "-", + f"{running_time} ms " if running_time else "-", + ]) + + else: + torch._dynamo.reset() + compilation_time, running_time = benchmark_compile( + model, sample_input, num_iters, backend, None, optimizer, loss_fn) + + if running_time is not None: + table.append([ + ("Training" if optimizer else "Inference"), + backend, "-", + f"{compilation_time} ms " or "-", + f"{running_time} ms ", + ]) + + + return tabulate(table, headers=field_names, tablefmt="github") diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/cpp_jit.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/cpp_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..a09f1a00aace6f1cf1a8a2f9d3480fdc8f07d4f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/cpp_jit.py @@ -0,0 +1,172 @@ +"""JIT C++ strings into executables.""" +import atexit +import os +import re +import shutil +import textwrap +import threading +from typing import Any, List, Optional + +import torch +from torch.utils.benchmark.utils._stubs import CallgrindModuleType, TimeitModuleType +from torch.utils.benchmark.utils.common import _make_temp_dir +from torch.utils import cpp_extension + + +LOCK = threading.Lock() +SOURCE_ROOT = os.path.split(os.path.abspath(__file__))[0] + +# We calculate uuid once at import time so that separate processes will have +# separate build roots, but threads will share the same build root. +# `cpp_extension` uses build root as part of the cache key, so per-invocation +# uuid's (e.g. different build root per _compile_template call) would lead to +# a 0% cache hit rate and spurious recompilation. Consider the following: +# ``` +# setup = "auto x = torch::ones({1024, 1024});" +# stmt = "torch::mm(x, x);" +# for num_threads in [1, 2, 4, 8]: +# print(Timer(stmt, setup, num_threads=num_threads, language="c++").blocked_autorange()) +# ```` +# `setup` and `stmt` do not change, so we can reuse the executable from the +# first pass through the loop. +_BUILD_ROOT: Optional[str] = None + +def _get_build_root() -> str: + global _BUILD_ROOT + if _BUILD_ROOT is None: + _BUILD_ROOT = _make_temp_dir(prefix="benchmark_utils_jit_build") + atexit.register(shutil.rmtree, _BUILD_ROOT) + return _BUILD_ROOT + + +# BACK_TESTING_NOTE: +# There are two workflows where this code could be used. One is the obvious +# case where someone simply builds or installs PyTorch and uses Timer. +# The other is that the entire `torch/utils/benchmark` folder from a CURRENT +# PyTorch checkout is copy-pasted into a much OLDER version of the PyTorch +# source code. This is what we refer to here as "back testing". The rationale +# is that we might want to use current tooling to study some aspect of an +# earlier version of PyTorch. (e.g. a regression.) +# +# The problem is that Timer relies on several aspects of core PyTorch, namely +# some binding functions for Valgrind symbols in `torch._C` and the +# `torch.__config__._cxx_flags()` method. If we were to naively copy code +# around this wouldn't work as the symbols of interest aren't present in +# earlier versions of PyTorch. In order to work around this, we must add back +# testing shims. These shims will never activate during normal use, but will +# allow Timer to function outside of the "correct" version of PyTorch by +# emulating functionality that was added later. +# +# These shims are temporary, and as Timer becomes more integrated with +# PyTorch the cost and complexity of such shims will increase. Once back +# testing is no longer required (which is to say we have done enough historic +# analysis and the shims no longer justify their maintenance and code +# complexity costs) back testing paths will be removed. + +CXX_FLAGS: Optional[List[str]] +if hasattr(torch.__config__, "_cxx_flags"): + try: + CXX_FLAGS = torch.__config__._cxx_flags().strip().split() + if CXX_FLAGS is not None and "-g" not in CXX_FLAGS: + CXX_FLAGS.append("-g") + # remove "-W" flags to allow build benchmarks + # with a relaxed constraint of compiler versions + if CXX_FLAGS is not None: + CXX_FLAGS = list(filter(lambda x: not x.startswith("-W"), CXX_FLAGS)) + + except RuntimeError: + # We are in FBCode. + CXX_FLAGS = None +else: + # FIXME: Remove when back testing is no longer required. + CXX_FLAGS = ["-O2", "-fPIC", "-g"] + +EXTRA_INCLUDE_PATHS: List[str] = [os.path.join(SOURCE_ROOT, "valgrind_wrapper")] +CONDA_PREFIX = os.getenv("CONDA_PREFIX") +if CONDA_PREFIX is not None: + # Load will automatically search /usr/include, but not conda include. + EXTRA_INCLUDE_PATHS.append(os.path.join(CONDA_PREFIX, "include")) + + +COMPAT_CALLGRIND_BINDINGS: Optional[CallgrindModuleType] = None +def get_compat_bindings() -> CallgrindModuleType: + with LOCK: + global COMPAT_CALLGRIND_BINDINGS + if COMPAT_CALLGRIND_BINDINGS is None: + COMPAT_CALLGRIND_BINDINGS = cpp_extension.load( + name="callgrind_bindings", + sources=[os.path.join( + SOURCE_ROOT, + "valgrind_wrapper", + "compat_bindings.cpp" + )], + extra_cflags=CXX_FLAGS, + extra_include_paths=EXTRA_INCLUDE_PATHS, + ) + return COMPAT_CALLGRIND_BINDINGS + + +def _compile_template( + *, + stmt: str, + setup: str, + global_setup: str, + src: str, + is_standalone: bool +) -> Any: + for before, after, indentation in ( + ("// GLOBAL_SETUP_TEMPLATE_LOCATION", global_setup, 0), + ("// SETUP_TEMPLATE_LOCATION", setup, 4), + ("// STMT_TEMPLATE_LOCATION", stmt, 8) + ): + # C++ doesn't care about indentation so this code isn't load + # bearing the way it is with Python, but this makes the source + # look nicer if a human has to look at it. + src = re.sub( + before, + textwrap.indent(after, " " * indentation)[indentation:], + src + ) + + # We want to isolate different Timers. However `cpp_extension` will + # cache builds which will significantly reduce the cost of repeated + # invocations. + with LOCK: + name = f"timer_cpp_{abs(hash(src))}" + build_dir = os.path.join(_get_build_root(), name) + os.makedirs(build_dir, exist_ok=True) + + src_path = os.path.join(build_dir, "timer_src.cpp") + with open(src_path, "w") as f: + f.write(src) + + # `cpp_extension` has its own locking scheme, so we don't need our lock. + return cpp_extension.load( + name=name, + sources=[src_path], + build_directory=build_dir, + extra_cflags=CXX_FLAGS, + extra_include_paths=EXTRA_INCLUDE_PATHS, + is_python_module=not is_standalone, + is_standalone=is_standalone, + ) + + +def compile_timeit_template(*, stmt: str, setup: str, global_setup: str) -> TimeitModuleType: + template_path: str = os.path.join(SOURCE_ROOT, "timeit_template.cpp") + with open(template_path) as f: + src: str = f.read() + + module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False) + assert isinstance(module, TimeitModuleType) + return module + + +def compile_callgrind_template(*, stmt: str, setup: str, global_setup: str) -> str: + template_path: str = os.path.join(SOURCE_ROOT, "valgrind_wrapper", "timer_callgrind_template.cpp") + with open(template_path) as f: + src: str = f.read() + + target = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=True) + assert isinstance(target, str) + return target diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py new file mode 100644 index 0000000000000000000000000000000000000000..7d1ee8ebb8f8b596f0f6fa53b862e29a07c6d179 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/fuzzer.py @@ -0,0 +1,457 @@ +import functools +import itertools as it +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + + +__all__ = [ + "Fuzzer", + "FuzzedParameter", "ParameterAlias", + "FuzzedTensor", +] + + +_DISTRIBUTIONS = ( + "loguniform", + "uniform", +) + + +class FuzzedParameter: + """Specification for a parameter to be generated during fuzzing.""" + def __init__( + self, + name: str, + minval: Optional[Union[int, float]] = None, + maxval: Optional[Union[int, float]] = None, + distribution: Optional[Union[str, Dict[Any, float]]] = None, + strict: bool = False, + ): + """ + Args: + name: + A string name with which to identify the parameter. + FuzzedTensors can reference this string in their + specifications. + minval: + The lower bound for the generated value. See the description + of `distribution` for type behavior. + maxval: + The upper bound for the generated value. Type behavior is + identical to `minval`. + distribution: + Specifies the distribution from which this parameter should + be drawn. There are three possibilities: + - "loguniform" + Samples between `minval` and `maxval` (inclusive) such + that the probabilities are uniform in log space. As a + concrete example, if minval=1 and maxval=100, a sample + is as likely to fall in [1, 10) as it is [10, 100]. + - "uniform" + Samples are chosen with uniform probability between + `minval` and `maxval` (inclusive). If either `minval` + or `maxval` is a float then the distribution is the + continuous uniform distribution; otherwise samples + are constrained to the integers. + - dict: + If a dict is passed, the keys are taken to be choices + for the variables and the values are interpreted as + probabilities. (And must sum to one.) + If a dict is passed, `minval` and `maxval` must not be set. + Otherwise, they must be set. + strict: + If a parameter is strict, it will not be included in the + iterative resampling process which Fuzzer uses to find a + valid parameter configuration. This allows an author to + prevent skew from resampling for a given parameter (for + instance, a low size limit could inadvertently bias towards + Tensors with fewer dimensions) at the cost of more iterations + when generating parameters. + """ + self._name = name + self._minval = minval + self._maxval = maxval + self._distribution = self._check_distribution(distribution) + self.strict = strict + + @property + def name(self): + return self._name + + def sample(self, state): + if self._distribution == "loguniform": + return self._loguniform(state) + + if self._distribution == "uniform": + return self._uniform(state) + + if isinstance(self._distribution, dict): + return self._custom_distribution(state) + + def _check_distribution(self, distribution): + if not isinstance(distribution, dict): + assert distribution in _DISTRIBUTIONS + else: + assert not any(i < 0 for i in distribution.values()), "Probabilities cannot be negative" + assert abs(sum(distribution.values()) - 1) <= 1e-5, "Distribution is not normalized" + assert self._minval is None + assert self._maxval is None + + return distribution + + def _loguniform(self, state): + output = int(2 ** state.uniform( + low=np.log2(self._minval) if self._minval is not None else None, + high=np.log2(self._maxval) if self._maxval is not None else None, + )) + if self._minval is not None and output < self._minval: + return self._minval + if self._maxval is not None and output > self._maxval: + return self._maxval + return output + + def _uniform(self, state): + if isinstance(self._minval, int) and isinstance(self._maxval, int): + return int(state.randint(low=self._minval, high=self._maxval + 1)) + return state.uniform(low=self._minval, high=self._maxval) + + def _custom_distribution(self, state): + # If we directly pass the keys to `choice`, numpy will convert + # them to numpy dtypes. + index = state.choice( + np.arange(len(self._distribution)), + p=tuple(self._distribution.values())) + return list(self._distribution.keys())[index] + + +class ParameterAlias: + """Indicates that a parameter should alias the value of another parameter. + + When used in conjunction with a custom distribution, this allows fuzzed + tensors to represent a broader range of behaviors. For example, the + following sometimes produces Tensors which broadcast: + + Fuzzer( + parameters=[ + FuzzedParameter("x_len", 4, 1024, distribution="uniform"), + + # `y` will either be size one, or match the size of `x`. + FuzzedParameter("y_len", distribution={ + 0.5: 1, + 0.5: ParameterAlias("x_len") + }), + ], + tensors=[ + FuzzedTensor("x", size=("x_len",)), + FuzzedTensor("y", size=("y_len",)), + ], + ) + + Chains of alias' are allowed, but may not contain cycles. + """ + def __init__(self, alias_to): + self.alias_to = alias_to + + def __repr__(self): + return f"ParameterAlias[alias_to: {self.alias_to}]" + + +def dtype_size(dtype): + if dtype == torch.bool: + return 1 + if dtype.is_floating_point or dtype.is_complex: + return int(torch.finfo(dtype).bits / 8) + return int(torch.iinfo(dtype).bits / 8) + + +def prod(values, base=1): + """np.prod can overflow, so for sizes the product should be done in Python. + + Even though np.prod type promotes to int64, it can still overflow in which + case the negative value will pass the size check and OOM when attempting to + actually allocate the Tensor. + """ + return functools.reduce(lambda x, y: int(x) * int(y), values, base) + + +class FuzzedTensor: + def __init__( + self, + name: str, + size: Tuple[Union[str, int], ...], + steps: Optional[Tuple[Union[str, int], ...]] = None, + probability_contiguous: float = 0.5, + min_elements: Optional[int] = None, + max_elements: Optional[int] = None, + max_allocation_bytes: Optional[int] = None, + dim_parameter: Optional[str] = None, + roll_parameter: Optional[str] = None, + dtype=torch.float32, + cuda=False, + tensor_constructor: Optional[Callable] = None + ): + """ + Args: + name: + A string identifier for the generated Tensor. + size: + A tuple of integers or strings specifying the size of the generated + Tensor. String values will replaced with a concrete int during the + generation process, while ints are simply passed as literals. + steps: + An optional tuple with the same length as `size`. This indicates + that a larger Tensor should be allocated, and then sliced to + produce the generated Tensor. For instance, if size is (4, 8) + and steps is (1, 4), then a tensor `t` of size (4, 32) will be + created and then `t[:, ::4]` will be used. (Allowing one to test + Tensors with strided memory.) + probability_contiguous: + A number between zero and one representing the chance that the + generated Tensor has a contiguous memory layout. This is achieved by + randomly permuting the shape of a Tensor, calling `.contiguous()`, + and then permuting back. This is applied before `steps`, which can + also cause a Tensor to be non-contiguous. + min_elements: + The minimum number of parameters that this Tensor must have for a + set of parameters to be valid. (Otherwise they are resampled.) + max_elements: + Like `min_elements`, but setting an upper bound. + max_allocation_bytes: + Like `max_elements`, but for the size of Tensor that must be + allocated prior to slicing for `steps` (if applicable). For + example, a FloatTensor with size (1024, 1024) and steps (4, 4) + would have 1M elements, but would require a 64 MB allocation. + dim_parameter: + The length of `size` and `steps` will be truncated to this value. + This allows Tensors of varying dimensions to be generated by the + Fuzzer. + dtype: + The PyTorch dtype of the generated Tensor. + cuda: + Whether to place the Tensor on a GPU. + tensor_constructor: + Callable which will be used instead of the default Tensor + construction method. This allows the author to enforce properties + of the Tensor (e.g. it can only have certain values). The dtype and + concrete shape of the Tensor to be created will be passed, and + concrete values of all parameters will be passed as kwargs. Note + that transformations to the result (permuting, slicing) will be + performed by the Fuzzer; the tensor_constructor is only responsible + for creating an appropriately sized Tensor. + """ + self._name = name + self._size = size + self._steps = steps + self._probability_contiguous = probability_contiguous + self._min_elements = min_elements + self._max_elements = max_elements + self._max_allocation_bytes = max_allocation_bytes + self._dim_parameter = dim_parameter + self._dtype = dtype + self._cuda = cuda + self._tensor_constructor = tensor_constructor + + @property + def name(self): + return self._name + + @staticmethod + def default_tensor_constructor(size, dtype, **kwargs): + if dtype.is_floating_point or dtype.is_complex: + return torch.rand(size=size, dtype=dtype, device="cpu") + else: + return torch.randint(1, 127, size=size, dtype=dtype, device="cpu") + + def _make_tensor(self, params, state): + size, steps, allocation_size = self._get_size_and_steps(params) + constructor = ( + self._tensor_constructor or + self.default_tensor_constructor + ) + + raw_tensor = constructor(size=allocation_size, dtype=self._dtype, **params) + if self._cuda: + raw_tensor = raw_tensor.cuda() + + # Randomly permute the Tensor and call `.contiguous()` to force re-ordering + # of the memory, and then permute it back to the original shape. + dim = len(size) + order = np.arange(dim) + if state.rand() > self._probability_contiguous: + while dim > 1 and np.all(order == np.arange(dim)): + order = state.permutation(raw_tensor.dim()) + + raw_tensor = raw_tensor.permute(tuple(order)).contiguous() + raw_tensor = raw_tensor.permute(tuple(np.argsort(order))) + + slices = [slice(0, size * step, step) for size, step in zip(size, steps)] + tensor = raw_tensor[slices] + + properties = { + "numel": int(tensor.numel()), + "order": order, + "steps": steps, + "is_contiguous": tensor.is_contiguous(), + "dtype": str(self._dtype), + } + + return tensor, properties + + def _get_size_and_steps(self, params): + dim = ( + params[self._dim_parameter] + if self._dim_parameter is not None + else len(self._size) + ) + + def resolve(values, dim): + """Resolve values into concrete integers.""" + values = tuple(params.get(i, i) for i in values) + if len(values) > dim: + values = values[:dim] + if len(values) < dim: + values = values + tuple(1 for _ in range(dim - len(values))) + return values + + size = resolve(self._size, dim) + steps = resolve(self._steps or (), dim) + allocation_size = tuple(size_i * step_i for size_i, step_i in zip(size, steps)) + return size, steps, allocation_size + + def satisfies_constraints(self, params): + size, _, allocation_size = self._get_size_and_steps(params) + # Product is computed in Python to avoid integer overflow. + num_elements = prod(size) + assert num_elements >= 0 + + allocation_bytes = prod(allocation_size, base=dtype_size(self._dtype)) + + def nullable_greater(left, right): + if left is None or right is None: + return False + return left > right + + return not any(( + nullable_greater(num_elements, self._max_elements), + nullable_greater(self._min_elements, num_elements), + nullable_greater(allocation_bytes, self._max_allocation_bytes), + )) + + +class Fuzzer: + def __init__( + self, + parameters: List[Union[FuzzedParameter, List[FuzzedParameter]]], + tensors: List[Union[FuzzedTensor, List[FuzzedTensor]]], + constraints: Optional[List[Callable]] = None, + seed: Optional[int] = None + ): + """ + Args: + parameters: + List of FuzzedParameters which provide specifications + for generated parameters. Iterable elements will be + unpacked, though arbitrary nested structures will not. + tensors: + List of FuzzedTensors which define the Tensors which + will be created each step based on the parameters for + that step. Iterable elements will be unpacked, though + arbitrary nested structures will not. + constraints: + List of callables. They will be called with params + as kwargs, and if any of them return False the current + set of parameters will be rejected. + seed: + Seed for the RandomState used by the Fuzzer. This will + also be used to set the PyTorch random seed so that random + ops will create reproducible Tensors. + """ + if seed is None: + seed = np.random.RandomState().randint(0, 2 ** 32 - 1, dtype=np.int64) + self._seed = seed + self._parameters = Fuzzer._unpack(parameters, FuzzedParameter) + self._tensors = Fuzzer._unpack(tensors, FuzzedTensor) + self._constraints = constraints or () + + p_names = {p.name for p in self._parameters} + t_names = {t.name for t in self._tensors} + name_overlap = p_names.intersection(t_names) + if name_overlap: + raise ValueError(f"Duplicate names in parameters and tensors: {name_overlap}") + + self._rejections = 0 + self._total_generated = 0 + + @staticmethod + def _unpack(values, cls): + return tuple(it.chain( + *[[i] if isinstance(i, cls) else i for i in values] + )) + + def take(self, n): + state = np.random.RandomState(self._seed) + torch.manual_seed(state.randint(low=0, high=2 ** 63, dtype=np.int64)) + for _ in range(n): + params = self._generate(state) + tensors = {} + tensor_properties = {} + for t in self._tensors: + tensor, properties = t._make_tensor(params, state) + tensors[t.name] = tensor + tensor_properties[t.name] = properties + yield tensors, tensor_properties, params + + @property + def rejection_rate(self): + if not self._total_generated: + return 0. + return self._rejections / self._total_generated + + def _generate(self, state): + strict_params: Dict[str, Union[float, int, ParameterAlias]] = {} + for _ in range(1000): + candidate_params: Dict[str, Union[float, int, ParameterAlias]] = {} + for p in self._parameters: + if p.strict: + if p.name in strict_params: + candidate_params[p.name] = strict_params[p.name] + else: + candidate_params[p.name] = p.sample(state) + strict_params[p.name] = candidate_params[p.name] + else: + candidate_params[p.name] = p.sample(state) + + candidate_params = self._resolve_aliases(candidate_params) + + self._total_generated += 1 + if not all(f(candidate_params) for f in self._constraints): + self._rejections += 1 + continue + + if not all(t.satisfies_constraints(candidate_params) for t in self._tensors): + self._rejections += 1 + continue + + return candidate_params + raise ValueError("Failed to generate a set of valid parameters.") + + @staticmethod + def _resolve_aliases(params): + params = dict(params) + alias_count = sum(isinstance(v, ParameterAlias) for v in params.values()) + + keys = list(params.keys()) + while alias_count: + for k in keys: + v = params[k] + if isinstance(v, ParameterAlias): + params[k] = params[v.alias_to] + alias_count_new = sum(isinstance(v, ParameterAlias) for v in params.values()) + if alias_count == alias_count_new: + raise ValueError(f"ParameterAlias cycle detected\n{params}") + + alias_count = alias_count_new + + return params diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/sparse_fuzzer.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/sparse_fuzzer.py new file mode 100644 index 0000000000000000000000000000000000000000..eac6a6baf910d3dfb671e6015cff6059743be65b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/sparse_fuzzer.py @@ -0,0 +1,120 @@ +from typing import Optional, Tuple, Union +from numbers import Number +import torch +from torch.utils.benchmark import FuzzedTensor +import math + +class FuzzedSparseTensor(FuzzedTensor): + def __init__( + self, + name: str, + size: Tuple[Union[str, int], ...], + min_elements: Optional[int] = None, + max_elements: Optional[int] = None, + dim_parameter: Optional[str] = None, + sparse_dim: Optional[str] = None, + nnz: Optional[str] = None, + density: Optional[str] = None, + coalesced: Optional[str] = None, + dtype=torch.float32, + cuda=False + ): + """ + Args: + name: + A string identifier for the generated Tensor. + size: + A tuple of integers or strings specifying the size of the generated + Tensor. String values will replaced with a concrete int during the + generation process, while ints are simply passed as literals. + min_elements: + The minimum number of parameters that this Tensor must have for a + set of parameters to be valid. (Otherwise they are resampled.) + max_elements: + Like `min_elements`, but setting an upper bound. + dim_parameter: + The length of `size` will be truncated to this value. + This allows Tensors of varying dimensions to be generated by the + Fuzzer. + sparse_dim: + The number of sparse dimensions in a sparse tensor. + density: + This value allows tensors of varying sparsities to be generated by the Fuzzer. + coalesced: + The sparse tensor format permits uncoalesced sparse tensors, + where there may be duplicate coordinates in the indices. + dtype: + The PyTorch dtype of the generated Tensor. + cuda: + Whether to place the Tensor on a GPU. + """ + super().__init__(name=name, size=size, min_elements=min_elements, + max_elements=max_elements, dim_parameter=dim_parameter, dtype=dtype, cuda=cuda) + self._density = density + self._coalesced = coalesced + self._sparse_dim = sparse_dim + + @staticmethod + def sparse_tensor_constructor(size, dtype, sparse_dim, nnz, is_coalesced): + """sparse_tensor_constructor creates a sparse tensor with coo format. + + Note that when `is_coalesced` is False, the number of elements is doubled but the number of indices + represents the same amount of number of non zeros `nnz`, i.e, this is virtually the same tensor + with the same sparsity pattern. Moreover, most of the sparse operation will use coalesce() method + and what we want here is to get a sparse tensor with the same `nnz` even if this is coalesced or not. + + In the other hand when `is_coalesced` is True the number of elements is reduced in the coalescing process + by an unclear amount however the probability to generate duplicates indices are low for most of the cases. + This decision was taken on purpose to maintain the construction cost as low as possible. + """ + if isinstance(size, Number): + size = [size] * sparse_dim + assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' + v_size = [nnz] + list(size[sparse_dim:]) + if dtype.is_floating_point: + v = torch.rand(size=v_size, dtype=dtype, device="cpu") + else: + v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu") + + i = torch.rand(sparse_dim, nnz, device="cpu") + i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) + i = i.to(torch.long) + + if not is_coalesced: + v = torch.cat([v, torch.randn_like(v)], 0) + i = torch.cat([i, i], 1) + + x = torch.sparse_coo_tensor(i, v, torch.Size(size)) + if is_coalesced: + x = x.coalesce() + return x + + def _make_tensor(self, params, state): + size, _, _ = self._get_size_and_steps(params) + density = params['density'] + nnz = math.ceil(sum(size) * density) + assert nnz <= sum(size) + + is_coalesced = params['coalesced'] + sparse_dim = params['sparse_dim'] if self._sparse_dim else len(size) + sparse_dim = min(sparse_dim, len(size)) + tensor = self.sparse_tensor_constructor(size, self._dtype, sparse_dim, nnz, is_coalesced) + + if self._cuda: + tensor = tensor.cuda() + sparse_dim = tensor.sparse_dim() + dense_dim = tensor.dense_dim() + is_hybrid = len(size[sparse_dim:]) > 0 + + properties = { + "numel": int(tensor.numel()), + "shape": tensor.size(), + "is_coalesced": tensor.is_coalesced(), + "density": density, + "sparsity": 1.0 - density, + "sparse_dim": sparse_dim, + "dense_dim": dense_dim, + "is_hybrid": is_hybrid, + "dtype": str(self._dtype), + } + return tensor, properties diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp new file mode 100644 index 0000000000000000000000000000000000000000..30b6f79c0b5aebca676035ac0b7c08cfce639b23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/timeit_template.cpp @@ -0,0 +1,43 @@ +/* C++ template for Timer.timeit + +This template will be consumed by `cpp_jit.py`, and will replace: + `GLOBAL_SETUP_TEMPLATE_LOCATION`, + `SETUP_TEMPLATE_LOCATION` + and + `STMT_TEMPLATE_LOCATION` +sections with user provided statements. +*/ +#include + +#include +#include +#include +#include + +// Global setup. (e.g. #includes) +// GLOBAL_SETUP_TEMPLATE_LOCATION + +double timeit(int n) { + pybind11::gil_scoped_release no_gil; + + // Setup + // SETUP_TEMPLATE_LOCATION + + { + // Warmup + // STMT_TEMPLATE_LOCATION + } + + // Main loop + auto start_time = std::chrono::high_resolution_clock::now(); + for (const auto loop_idx : c10::irange(n)) { + (void)loop_idx; + // STMT_TEMPLATE_LOCATION + } + auto end_time = std::chrono::high_resolution_clock::now(); + return std::chrono::duration(end_time - start_time).count(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("timeit", &timeit); +} diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..f860d36ce0d0be435dea1da8c6886606d181b7fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/timer.py @@ -0,0 +1,537 @@ +"""Timer class based on the timeit.Timer class, but torch aware.""" +import enum +import timeit +import textwrap +from typing import overload, Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union + +import torch +from torch.utils.benchmark.utils import common, cpp_jit +from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType +from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface + + +__all__ = ["Timer", "timer", "Language"] + + +if torch.backends.cuda.is_built() and torch.cuda.is_available(): # type: ignore[no-untyped-call] + def timer() -> float: + torch.cuda.synchronize() + return timeit.default_timer() +elif torch._C._get_privateuse1_backend_name() != "privateuseone": + privateuse1_device_handler = getattr(torch, torch._C._get_privateuse1_backend_name(), None) \ + if torch._C._get_privateuse1_backend_name() != "cpu" else None + + def timer() -> float: + if privateuse1_device_handler: + privateuse1_device_handler.synchronize() + return timeit.default_timer() +else: + timer = timeit.default_timer + + +class Language(enum.Enum): + PYTHON = 0 + CPP = 1 + + +class CPPTimer: + def __init__( + self, + stmt: str, + setup: str, + global_setup: str, + timer: Callable[[], float], + globals: Dict[str, Any], + ) -> None: + if timer is not timeit.default_timer: + raise NotImplementedError( + "PyTorch was built with CUDA and a GPU is present; however " + "Timer does not yet support GPU measurements. If your " + "code is CPU only, pass `timer=timeit.default_timer` to the " + "Timer's constructor to indicate this. (Note that this will " + "produce incorrect results if the GPU is in fact used, as " + "Timer will not synchronize CUDA.)" + ) + + if globals: + raise ValueError("C++ timing does not support globals.") + + self._stmt: str = textwrap.dedent(stmt) + self._setup: str = textwrap.dedent(setup) + self._global_setup: str = textwrap.dedent(global_setup) + self._timeit_module: Optional[TimeitModuleType] = None + + def timeit(self, number: int) -> float: + if self._timeit_module is None: + self._timeit_module = cpp_jit.compile_timeit_template( + stmt=self._stmt, + setup=self._setup, + global_setup=self._global_setup, + ) + + return self._timeit_module.timeit(number) + + +class Timer: + """Helper class for measuring execution time of PyTorch statements. + + For a full tutorial on how to use this class, see: + https://pytorch.org/tutorials/recipes/recipes/benchmark.html + + The PyTorch Timer is based on `timeit.Timer` (and in fact uses + `timeit.Timer` internally), but with several key differences: + + 1) Runtime aware: + Timer will perform warmups (important as some elements of PyTorch are + lazily initialized), set threadpool size so that comparisons are + apples-to-apples, and synchronize asynchronous CUDA functions when + necessary. + + 2) Focus on replicates: + When measuring code, and particularly complex kernels / models, + run-to-run variation is a significant confounding factor. It is + expected that all measurements should include replicates to quantify + noise and allow median computation, which is more robust than mean. + To that effect, this class deviates from the `timeit` API by + conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`. + (Exact algorithms are discussed in method docstrings.) The `timeit` + method is replicated for cases where an adaptive strategy is not + desired. + + 3) Optional metadata: + When defining a Timer, one can optionally specify `label`, `sub_label`, + `description`, and `env`. (Defined later) These fields are included in + the representation of result object and by the `Compare` class to group + and display results for comparison. + + 4) Instruction counts + In addition to wall times, Timer can run a statement under Callgrind + and report instructions executed. + + Directly analogous to `timeit.Timer` constructor arguments: + + `stmt`, `setup`, `timer`, `globals` + + PyTorch Timer specific constructor arguments: + + `label`, `sub_label`, `description`, `env`, `num_threads` + + Args: + stmt: Code snippet to be run in a loop and timed. + + setup: Optional setup code. Used to define variables used in `stmt` + + global_setup: (C++ only) + Code which is placed at the top level of the file for things like + `#include` statements. + + timer: + Callable which returns the current time. If PyTorch was built + without CUDA or there is no GPU present, this defaults to + `timeit.default_timer`; otherwise it will synchronize CUDA before + measuring the time. + + globals: + A dict which defines the global variables when `stmt` is being + executed. This is the other method for providing variables which + `stmt` needs. + + label: + String which summarizes `stmt`. For instance, if `stmt` is + "torch.nn.functional.relu(torch.add(x, 1, out=out))" + one might set label to "ReLU(x + 1)" to improve readability. + + sub_label: + Provide supplemental information to disambiguate measurements + with identical stmt or label. For instance, in our example + above sub_label might be "float" or "int", so that it is easy + to differentiate: + "ReLU(x + 1): (float)" + + "ReLU(x + 1): (int)" + when printing Measurements or summarizing using `Compare`. + + description: + String to distinguish measurements with identical label and + sub_label. The principal use of `description` is to signal to + `Compare` the columns of data. For instance one might set it + based on the input size to create a table of the form: :: + + | n=1 | n=4 | ... + ------------- ... + ReLU(x + 1): (float) | ... | ... | ... + ReLU(x + 1): (int) | ... | ... | ... + + + using `Compare`. It is also included when printing a Measurement. + + env: + This tag indicates that otherwise identical tasks were run in + different environments, and are therefore not equivalent, for + instance when A/B testing a change to a kernel. `Compare` will + treat Measurements with different `env` specification as distinct + when merging replicate runs. + + num_threads: + The size of the PyTorch threadpool when executing `stmt`. Single + threaded performance is important as both a key inference workload + and a good indicator of intrinsic algorithmic efficiency, so the + default is set to one. This is in contrast to the default PyTorch + threadpool size which tries to utilize all cores. + """ + + _timer_cls: Type[TimerClass] = timeit.Timer + + def __init__( + self, + stmt: str = "pass", + setup: str = "pass", + global_setup: str = "", + timer: Callable[[], float] = timer, + globals: Optional[Dict[str, Any]] = None, + label: Optional[str] = None, + sub_label: Optional[str] = None, + description: Optional[str] = None, + env: Optional[str] = None, + num_threads: int = 1, + language: Union[Language, str] = Language.PYTHON, + ): + if not isinstance(stmt, str): + raise ValueError("Currently only a `str` stmt is supported.") + + # We copy `globals` to prevent mutations from leaking. + # (For instance, `eval` adds the `__builtins__` key) + self._globals = dict(globals or {}) + + timer_kwargs = {} + if language in (Language.PYTHON, "py", "python"): + # Include `torch` if not specified as a convenience feature. + self._globals.setdefault("torch", torch) + self._language: Language = Language.PYTHON + if global_setup: + raise ValueError( + f"global_setup is C++ only, got `{global_setup}`. Most " + "likely this code can simply be moved to `setup`." + ) + + elif language in (Language.CPP, "cpp", "c++"): + assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped." + self._timer_cls = CPPTimer + setup = ("" if setup == "pass" else setup) + self._language = Language.CPP + timer_kwargs["global_setup"] = global_setup + + else: + raise ValueError(f"Invalid language `{language}`.") + + # Convenience adjustment so that multi-line code snippets defined in + # functions do not IndentationError (Python) or look odd (C++). The + # leading newline removal is for the initial newline that appears when + # defining block strings. For instance: + # textwrap.dedent(""" + # print("This is a stmt") + # """) + # produces '\nprint("This is a stmt")\n'. + # + # Stripping this down to 'print("This is a stmt")' doesn't change + # what gets executed, but it makes __repr__'s nicer. + stmt = textwrap.dedent(stmt) + stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip() + setup = textwrap.dedent(setup) + setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip() + + self._timer = self._timer_cls( + stmt=stmt, + setup=setup, + timer=timer, + globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals), + **timer_kwargs, + ) + self._task_spec = common.TaskSpec( + stmt=stmt, + setup=setup, + global_setup=global_setup, + label=label, + sub_label=sub_label, + description=description, + env=env, + num_threads=num_threads, + ) + + def _timeit(self, number: int) -> float: + # Even calling a timer in C++ takes ~50 ns, so no real operation should + # take less than 1 ns. (And this prevents divide by zero errors.) + return max(self._timer.timeit(number), 1e-9) + + def timeit(self, number: int = 1000000) -> common.Measurement: + """Mirrors the semantics of timeit.Timer.timeit(). + + Execute the main statement (`stmt`) `number` times. + https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit + """ + with common.set_torch_threads(self._task_spec.num_threads): + # Warmup + self._timeit(number=max(int(number // 100), 2)) + + return common.Measurement( + number_per_run=number, + raw_times=[self._timeit(number=number)], + task_spec=self._task_spec + ) + + def repeat(self, repeat: int = -1, number: int = -1) -> None: + raise NotImplementedError("See `Timer.blocked_autorange.`") + + def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None: + raise NotImplementedError("See `Timer.blocked_autorange.`") + + def _threaded_measurement_loop( + self, + number: int, + time_hook: Callable[[], float], + stop_hook: Callable[[List[float]], bool], + min_run_time: float, + max_run_time: Optional[float] = None, + callback: Optional[Callable[[int, float], NoReturn]] = None + ) -> List[float]: + total_time = 0.0 + can_stop = False + times: List[float] = [] + with common.set_torch_threads(self._task_spec.num_threads): + while (total_time < min_run_time) or (not can_stop): + time_spent = time_hook() + times.append(time_spent) + total_time += time_spent + if callback: + callback(number, time_spent) + can_stop = stop_hook(times) + if max_run_time and total_time > max_run_time: + break + return times + + def _estimate_block_size(self, min_run_time: float) -> int: + with common.set_torch_threads(self._task_spec.num_threads): + # Estimate the block size needed for measurement to be negligible + # compared to the inner loop. This also serves as a warmup. + overhead = torch.tensor([self._timeit(0) for _ in range(5)]).median().item() + number = 1 + while True: + time_taken = self._timeit(number) + relative_overhead = overhead / time_taken + if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000: + break + if time_taken > min_run_time: + break + # Avoid overflow in C++ pybind11 interface + if number * 10 > 2147483647: + break + number *= 10 + return number + + def blocked_autorange( + self, + callback: Optional[Callable[[int, float], NoReturn]] = None, + min_run_time: float = 0.2, + ) -> common.Measurement: + """Measure many replicates while keeping timer overhead to a minimum. + + At a high level, blocked_autorange executes the following pseudo-code:: + + `setup` + + total_time = 0 + while total_time < min_run_time + start = timer() + for _ in range(block_size): + `stmt` + total_time += (timer() - start) + + Note the variable `block_size` in the inner loop. The choice of block + size is important to measurement quality, and must balance two + competing objectives: + + 1) A small block size results in more replicates and generally + better statistics. + + 2) A large block size better amortizes the cost of `timer` + invocation, and results in a less biased measurement. This is + important because CUDA synchronization time is non-trivial + (order single to low double digit microseconds) and would + otherwise bias the measurement. + + blocked_autorange sets block_size by running a warmup period, + increasing block size until timer overhead is less than 0.1% of + the overall computation. This value is then used for the main + measurement loop. + + Returns: + A `Measurement` object that contains measured runtimes and + repetition counts, and can be used to compute statistics. + (mean, median, etc.) + """ + number = self._estimate_block_size(min_run_time) + + def time_hook() -> float: + return self._timeit(number) + + def stop_hook(times: List[float]) -> bool: + return True + + times = self._threaded_measurement_loop( + number, time_hook, stop_hook, + min_run_time=min_run_time, + callback=callback) + + return common.Measurement( + number_per_run=number, + raw_times=times, + task_spec=self._task_spec + ) + + def adaptive_autorange( + self, + threshold: float = 0.1, + *, + min_run_time: float = 0.01, + max_run_time: float = 10.0, + callback: Optional[Callable[[int, float], NoReturn]] = None, + ) -> common.Measurement: + """Similar to `blocked_autorange` but also checks for variablility in measurements + and repeats until iqr/median is smaller than `threshold` or `max_run_time` is reached. + + + At a high level, adaptive_autorange executes the following pseudo-code:: + + `setup` + + times = [] + while times.sum < max_run_time + start = timer() + for _ in range(block_size): + `stmt` + times.append(timer() - start) + + enough_data = len(times)>3 and times.sum > min_run_time + small_iqr=times.iqr/times.mean float: + return self._timeit(number) + + def stop_hook(times: List[float]) -> bool: + if len(times) > 3: + return common.Measurement( + number_per_run=number, + raw_times=times, + task_spec=self._task_spec + ).meets_confidence(threshold=threshold) + return False + times = self._threaded_measurement_loop( + number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback) + + return common.Measurement( + number_per_run=number, + raw_times=times, + task_spec=self._task_spec + ) + + @overload + def collect_callgrind( + self, + number: int, + *, + repeats: None, + collect_baseline: bool, + retain_out_file: bool, + ) -> valgrind_timer_interface.CallgrindStats: + ... + + @overload + def collect_callgrind( + self, + number: int, + *, + repeats: int, + collect_baseline: bool, + retain_out_file: bool, + ) -> Tuple[valgrind_timer_interface.CallgrindStats, ...]: + ... + + def collect_callgrind( + self, + number: int = 100, + *, + repeats: Optional[int] = None, + collect_baseline: bool = True, + retain_out_file: bool = False, + ) -> Any: + """Collect instruction counts using Callgrind. + + Unlike wall times, instruction counts are deterministic + (modulo non-determinism in the program itself and small amounts of + jitter from the Python interpreter.) This makes them ideal for detailed + performance analysis. This method runs `stmt` in a separate process + so that Valgrind can instrument the program. Performance is severely + degraded due to the instrumentation, however this is ameliorated by + the fact that a small number of iterations is generally sufficient to + obtain good measurements. + + In order to to use this method `valgrind`, `callgrind_control`, and + `callgrind_annotate` must be installed. + + Because there is a process boundary between the caller (this process) + and the `stmt` execution, `globals` cannot contain arbitrary in-memory + data structures. (Unlike timing methods) Instead, globals are + restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules + to reduce the surprise factor from serialization and subsequent + deserialization. The `GlobalsBridge` class provides more detail on this + subject. Take particular care with nn.Modules: they rely on pickle and + you may need to add an import to `setup` for them to transfer properly. + + By default, a profile for an empty statement will be collected and + cached to indicate how many instructions are from the Python loop which + drives `stmt`. + + Returns: + A `CallgrindStats` object which provides instruction counts and + some basic facilities for analyzing and manipulating results. + """ + if not isinstance(self._task_spec.stmt, str): + raise ValueError("`collect_callgrind` currently only supports string `stmt`") + + if repeats is not None and repeats < 1: + raise ValueError("If specified, `repeats` must be >= 1") + + # Check that the statement is valid. It doesn't guarantee success, but it's much + # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in + # the parent process rather than the valgrind subprocess. + self._timeit(1) + is_python = (self._language == Language.PYTHON) + assert is_python or not self._globals + result = valgrind_timer_interface.wrapper_singleton().collect_callgrind( + task_spec=self._task_spec, + globals=self._globals, + number=number, + repeats=repeats or 1, + collect_baseline=collect_baseline and is_python, + is_python=is_python, + retain_out_file=retain_out_file, + ) + + return (result[0] if repeats is None else result) diff --git a/venv/lib/python3.10/site-packages/torch/utils/bundled_inputs.py b/venv/lib/python3.10/site-packages/torch/utils/bundled_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..2a95c7828843cde842e59929edcdb602bf85ccfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/bundled_inputs.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +from typing import Any, TypeVar, Optional, Tuple, List, NamedTuple, Union, Sequence, Dict, Callable +import textwrap +import torch +from torch._C import TupleType, ListType +from torch.jit._recursive import wrap_cpp_module + + +T = TypeVar("T") + +MAX_RAW_TENSOR_SIZE = 16 + +class InflatableArg(NamedTuple): + """Helper type for bundled inputs. + + 'value' is the compressed/deflated input that is stored in the model. Value + must be of the same type as the argument to the function that it is a deflated + input for. + + 'fmt' is a formatable code string that is executed to inflate the compressed data into + the appropriate input. It can use 'value' as an input to the format str. It must result + in a value of the same type as 'value'. + + 'fmt_fn' is a formatable function code string that is executed to inflate the compressed + data into the appropriate input. It must result in a value of the same type as 'value'. + The function name should be the formatable part of the string. + + Note: Only top level InflatableArgs can be inflated. i.e. you cannot place + an inflatable arg inside of some other structure. You should instead create + an inflatable arg such that the fmt code string returns the full structure + of your input. + """ + + value: Any + fmt: str = "{}" + fmt_fn: str = "" + + +def bundle_inputs( + model: torch.jit.ScriptModule, + inputs: Union[Optional[Sequence[Tuple[Any, ...]]], Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]]], + info: Optional[Union[List[str], Dict[Callable, List[str]]]] = None, + *, + _receive_inflate_expr: Optional[List[str]] = None, +) -> torch.jit.ScriptModule: + """Create and return a copy of the specified model with inputs attached. + + The original model is not mutated or changed in any way. + + Models with bundled inputs can be invoked in a uniform manner by + benchmarking and code coverage tools. + + If inputs is passed in as a list then the inputs will be bundled for 'forward'. + If inputs is instead passed in as a map then all the methods specified in the map + will have their corresponding inputs bundled. Info should match watchever type is + chosen for the inputs. + + The returned model will support the following methods: + + `get_all_bundled_inputs_for_() -> List[Tuple[Any, ...]]` + Returns a list of tuples suitable for passing to the model like + `for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)` + + `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]` + Returns a dictionary mapping function names to a metadata dictionary. + This nested dictionary maps preset strings like: + 'get_inputs_function_name' -> the name of a function attribute in this model that can be + run to get back a list of inputs corresponding to that function. + 'info' -> the user provided extra information about the bundled inputs + + If forward has bundled inputs then these following functions will also be defined on the returned module: + + `get_all_bundled_inputs() -> List[Tuple[Any, ...]]` + Returns a list of tuples suitable for passing to the model like + `for inp in model.get_all_bundled_inputs(): model(*inp)` + + `get_num_bundled_inputs() -> int` + Equivalent to `len(model.get_all_bundled_inputs())`, + but slightly easier to call from C++. + + Inputs can be specified in one of two ways: + + - The model can define `_generate_bundled_inputs_for_`. + If the user chooses this method inputs[] should map to None + + - The `inputs` argument to this function can be a dictionary mapping functions to a + list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_. + Alternatively if only bundling inputs for forward the map can be omitted and a singular list of inputs + can be provided instead. + + The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a + list of inputs, the inner tuple is the list of args that together make up one input. + For inputs of functions that take one arg, this will be a tuple of length one. The Any, ... + is the actual data that makes up the args, e.g. a tensor. + + Info is an optional parameter that maps functions to a list of strings providing extra information about that + function's bundled inputs. Alternatively if only bundling inputs for forward the map can be omitted and + a singular list of information can be provided instead. This could be descriptions, expected outputs, etc. + - Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']} + + This function will attempt to optimize arguments so that (e.g.) + arguments like `torch.zeros(1000)` will be represented compactly. + Only top-level arguments will be optimized. + Tensors in lists or tuples will not. + """ + if not isinstance(model, torch.jit.ScriptModule): + raise Exception("Only ScriptModule is supported.") + + ignored_methods, ignored_attrs = _get_bundled_inputs_attributes_and_methods(model) + clone = torch._C._hack_do_not_use_clone_module_with_class( # type: ignore[attr-defined] + model._c, + ignored_methods, + ignored_attrs, + ) + + # The above cloning function returns a torch._C.scriptmodule and we need a torch.jit.scriptmodule. + # Fortunately theres a function in _recursive that does exactly that conversion. + cloned_module = wrap_cpp_module(clone) + if isinstance(inputs, dict): + assert isinstance(info, dict) or info is None + augment_many_model_functions_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info) + else: + assert isinstance(info, list) or info is None + augment_model_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info) + return cloned_module + +def augment_model_with_bundled_inputs( + model: torch.jit.ScriptModule, + inputs: Optional[Sequence[Tuple[Any, ...]]] = None, + _receive_inflate_expr: Optional[List[str]] = None, # For debugging. + info: Optional[List[str]] = None, # Optional argument to provide info about forward or its inputs + skip_size_check=False, +) -> None: + """Add bundled sample inputs to a model for the forward function. + + Models with bundled inputs can be invoked in a uniform manner by + benchmarking and code coverage tools. + + Augmented models will support the following methods: + + `get_all_bundled_inputs() -> List[Tuple[Any, ...]]` + Returns a list of tuples suitable for passing to the model like + `for inp in model.get_all_bundled_inputs(): model(*inp)` + + `get_num_bundled_inputs() -> int` + Equivalent to `len(model.get_all_bundled_inputs())`, + but slightly easier to call from C++. + + `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]` + Returns a dictionary mapping function names to a metadata dictionary. + This nested dictionary maps preset strings like: + 'get_inputs_function_name' -> the name of a function attribute in this model that can be + run to get back a list of inputs corresponding to that function. + 'info' -> the user provided extra information about the bundled inputs + + Inputs can be specified in one of two ways: + + - The model can define `_generate_bundled_inputs_for_forward`. + If the user chooses this method inputs should be None + + - `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements + of each tuple are the args that make up one input. + """ + if not isinstance(model, torch.jit.ScriptModule): + raise Exception("Only ScriptModule is supported.") + + forward: Callable = model.forward + + # Sometimes forward won't have a name attached so just in case + if not hasattr(forward, "__name__"): + forward.__name__ = 'forward' + augment_many_model_functions_with_bundled_inputs( + model, + inputs={forward : inputs}, + _receive_inflate_expr=_receive_inflate_expr, + info={forward : info} if info else None, + skip_size_check=skip_size_check, + ) + + +def augment_many_model_functions_with_bundled_inputs( + model: torch.jit.ScriptModule, + inputs: Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]], + _receive_inflate_expr: Optional[List[str]] = None, # For debugging. + info: Optional[Dict[Callable, List[str]]] = None, # Optional argument to provide info about the function or its inputs + skip_size_check=False, +) -> None: + """Add bundled sample inputs to a model for an arbitrary list of public functions. + + Models with bundled inputs can be invoked in a uniform manner by + benchmarking and code coverage tools. + + Augmented models will support the following methods: + + `get_all_bundled_inputs_for_() -> List[Tuple[Any, ...]]` + Returns a list of tuples suitable for passing to the model like + `for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)` + + `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]` + Returns a dictionary mapping function names to a metadata dictionary. + This nested dictionary maps preset strings like: + 'get_inputs_function_name' -> the name of a function attribute in this model that can be + run to get back a list of inputs corresponding to that function. + 'info' -> the user provided extra information about the bundled inputs + + If forward has bundled inputs then these following functions are also defined: + + `get_all_bundled_inputs() -> List[Tuple[Any, ...]]` + Returns a list of tuples suitable for passing to the model like + `for inp in model.get_all_bundled_inputs(): model(*inp)` + + `get_num_bundled_inputs() -> int` + Equivalent to `len(model.get_all_bundled_inputs())`, + but slightly easier to call from C++. + + Inputs can be specified in one of two ways: + + - The model can define `_generate_bundled_inputs_for_`. + If the user chooses this method inputs[] should map to None + + - The `inputs` argument to this function can be a dictionary mapping functions to a + list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_. + The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a + list of inputs, the inner tuple is the list of args that together make up one input. + For inputs of functions that take one arg, this will be a tuple of length one. The Any, ... + is the actual data that makes up the args, e.g. a tensor. + + Info is an optional parameter that maps functions to a list of strings providing extra information about that + function's bundled inputs. This could be descriptions, expected outputs, etc. + - Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']} + + This function will attempt to optimize arguments so that (e.g.) + arguments like `torch.zeros(1000)` will be represented compactly. + Only top-level arguments will be optimized. + Tensors in lists or tuples will not. + """ + if not isinstance(model, torch.jit.ScriptModule): + raise Exception("Only ScriptModule is supported.") + + if not inputs: + raise Exception("Please provide inputs for at least 1 function") + + if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"): + raise Exception( + "Models can only be augmented with bundled inputs once. " + "This Model seems to have already been augmented with " + "bundled inputs. Please start afresh with one that " + "doesn't have bundled inputs.", + ) + + get_bundled_inputs_functions_and_info_template = "" + + for function, input_list in inputs.items(): + if hasattr(function, "__name__"): + function_name = function.__name__ + else: + if hasattr(function, "name"): + function_name = function.name # type: ignore[attr-defined] + else: + raise Exception( + 'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"') + + + if input_list is not None and not isinstance(input_list, Sequence): + raise TypeError(f"Error inputs for function {function_name} is not a Sequence") + + function_arg_types = [arg.type for arg in function.schema.arguments[1:]] # type: ignore[attr-defined] + deflated_inputs_type: ListType = ListType(TupleType(function_arg_types)) + model._c._register_attribute(f"_bundled_inputs_deflated_{function_name}", deflated_inputs_type, []) + + if hasattr(model, "_generate_bundled_inputs_for_" + function_name): + if input_list is not None: + raise Exception( + "inputs[{name}] is not None, but _generate_bundled_inputs_for_{name} is already defined".format( + name=function_name + ) + ) + # Model author already defined _generate_bundled_inputs_for_. + elif input_list is None or len(input_list) == 0: + raise Exception( + "inputs for {name} must be specified if _generate_bundled_inputs_for_{name} is not already defined".format( + name=function_name, + ) + ) + else: + # Iterate over the inputs and args in each input. + # Accumulate `deflated_inputs` as (possibly) compressed values + # and `parts` to be joined into the expression that unpacks them. + deflated_inputs = [] + parts = [] + for inp_idx, args in enumerate(input_list): + if not isinstance(args, Tuple) and not isinstance(args, List): # type: ignore[arg-type] + raise TypeError( + f"Error bundled input for function {function_name} idx: {inp_idx} is not a Tuple or a List" + ) + deflated_args = [] + parts.append("(") + for arg_idx, arg in enumerate(args): + inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name) + deflated, inflater, helper_definition = _inflate_expr( + arg, + f"deflated[{inp_idx}][{arg_idx}]", + inflate_helper_fn_name, + skip_size_check=skip_size_check, + ) + deflated_args.append(deflated) + parts.append(f" {inflater},") + if helper_definition: + model.define(textwrap.dedent(helper_definition)) + deflated_inputs.append(tuple(deflated_args)) + parts.append("),") + parts.append("") + expr = "\n".join(parts) + + # Back-channel return this expr for debugging. + if _receive_inflate_expr is not None: + _receive_inflate_expr.append(expr) + setattr(model, f"_bundled_inputs_deflated_{function_name}", deflated_inputs) + definition = textwrap.dedent(""" + def _generate_bundled_inputs_for_{name}(self): + deflated = self._bundled_inputs_deflated_{name} + return [ + {expr} + ] + """).format(expr=expr, name=function_name) + model.define(definition) + + # Define get_all_bundled_inputs_for_ that caches the generated inputs. + model.define(textwrap.dedent(""" + def get_all_bundled_inputs_for_{name}(self): + all_inputs = self._generate_bundled_inputs_for_{name}() + assert all_inputs is not None + return all_inputs + """).format(name=function_name)) + + # Add to the high level helper methods + inputs_info = repr(info[function]) if info and function in info else '[]' + get_bundled_inputs_functions_and_info_template += f""" + temp_dict : Dict[str,List[str]] = {{}} + info: List[str] = {inputs_info} + + temp_dict['info'] = info + temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}'] + all_inputs['{function_name}'] = temp_dict + """ + + # To ensure backwards compatibility and a streamlined api for forward these wrappers are provided + if function_name == 'forward': + model.define(textwrap.dedent(""" + def get_all_bundled_inputs(self): + return self.get_all_bundled_inputs_for_forward() + """)) + model.define(textwrap.dedent(""" + def get_num_bundled_inputs(self): + return len(self.get_all_bundled_inputs_for_forward()) + """)) + + # Define some high level helper methods that act on all bundled inputs + model.define(textwrap.dedent(f""" + def get_bundled_inputs_functions_and_info(self): + all_inputs : Dict[str, Dict[str,List[str]]] = {{}} + {get_bundled_inputs_functions_and_info_template} + return all_inputs + """)) + +def _inflate_expr( + arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False +) -> Tuple[Union[T, torch.Tensor], str, Optional[str]]: + # Allow custom inflation expressions any object. + # For example, calling custom image-decoding ops. + # Or just use "{}" as the format string to ignore size limits. + if isinstance(arg, InflatableArg): + if arg.fmt_fn: + if arg.fmt not in ["{}", ""]: + raise Exception( + f"Bundled input argument at position '{ref}' has " + f"both arg.fmt_fn => \n{arg.fmt_fn} " + f"\n and arg.fmt => {arg.fmt}. " + "Please choose `arg.fmt` if the deflater is straightforward or " + "`arg.fmt_fn` if you need a function." + ) + + helper_definition = arg.fmt_fn.format(inflate_helper_fn_name) + expr = f"self.{inflate_helper_fn_name}({ref})" + + return arg.value, expr, helper_definition + else: + return arg.value, arg.fmt.format(ref), None + + if isinstance(arg, torch.Tensor): + # Small-storage tensors can just be saved directly. + if arg._typed_storage().size() <= MAX_RAW_TENSOR_SIZE or skip_size_check: + return arg, ref, None + # Small contiguous tensors can be cloned to have small storage. + # TODO: Should we do this even for non-contiguous tensors? + if arg.is_contiguous() and arg.numel() <= MAX_RAW_TENSOR_SIZE: + return arg.clone(), ref, None + # Example inputs commonly come from torch.zeros, torch.ones, or torch.full. + # These can be represented compactly. + for fmt in [torch.contiguous_format, torch.channels_last]: + if arg.is_contiguous(memory_format=fmt) and (arg == arg.flatten()[0]).all().item(): + return (arg.flatten()[0].clone().expand(*arg.size()), + f"{ref}.contiguous(memory_format={fmt})", None) + # Prevent big tensors from being bundled by default. + # TODO: Provide more useful diagnostics. + raise Exception( + f"Bundled input argument at position '{ref}' is " + f"a tensor with storage size {arg._typed_storage().size()}. " + f"You probably don't want to bundle this as an input. " + ) + else: + return arg, ref, None + +def _get_bundled_inputs_attributes_and_methods(script_module: torch.jit.ScriptModule) -> Tuple[List[str], List[str]]: + methods: List[str] = [] + attributes: List[str] = [] + + # Has bundled inputs for forward + if hasattr(script_module, 'get_all_bundled_inputs'): + methods.append('get_all_bundled_inputs') + methods.append('get_num_bundled_inputs') + methods.append('run_on_bundled_input') + + if hasattr(script_module, 'get_bundled_inputs_functions_and_info'): + methods.append('get_bundled_inputs_functions_and_info') + all_info = script_module.get_bundled_inputs_functions_and_info() + for function_name in all_info: + methods.append("get_all_bundled_inputs_for_" + function_name) + methods.append("_generate_bundled_inputs_for_" + function_name) + attributes.append("_bundled_inputs_deflated_" + function_name) + + bundled_inputs_fn = getattr( + script_module, + f"get_all_bundled_inputs_for_{function_name}" + ) + num_bundled_inputs: int = len(bundled_inputs_fn()) + + # Check inflate helper functions for each function, argument and bundled input + func = getattr(script_module, function_name) + for arg_idx in range(len(func.schema.arguments) - 1): + for input_idx in range(num_bundled_inputs): + helper_fn_name = _get_inflate_helper_fn_name( + arg_idx=arg_idx, + input_idx=input_idx, + function_name=function_name + ) + # if the arg has an InflatableArg with fmt_fn, add the helper function name + if hasattr(script_module, helper_fn_name): + methods.append(helper_fn_name) + + return (methods, attributes) + + +def _get_inflate_helper_fn_name( + arg_idx: int, + input_idx: int, + function_name: str, +) -> str: + return f"_inflate_helper_for_{function_name}_input_{input_idx}_arg_{arg_idx}" + + + +def bundle_randn(*size, dtype=None): + """Generate a tensor that will be inflated with torch.randn.""" + stub = torch.zeros(1, dtype=dtype).expand(*size) + return InflatableArg(value=stub, fmt="torch.randn_like({})") + + +def bundle_large_tensor(t): + """Wrap a tensor to allow bundling regardless of size.""" + return InflatableArg(value=t, fmt="{}") diff --git a/venv/lib/python3.10/site-packages/torch/utils/checkpoint.py b/venv/lib/python3.10/site-packages/torch/utils/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..aa2c2c513c24e979a09a86772a9d16e3e07d3565 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/checkpoint.py @@ -0,0 +1,1439 @@ +import contextlib +import platform +import uuid +import warnings +import weakref +from collections import defaultdict +from itertools import count +from typing import ( + Any, + Callable, + ContextManager, + DefaultDict, + Dict, + Iterable, + List, + Optional, + Tuple, +) +from weakref import ReferenceType + +import torch +import torch.fx.traceback as fx_traceback +from torch._functorch._aot_autograd.functional_utils import is_fun +from torch.utils._pytree import tree_map +from torch.testing._internal.logging_tensor import capture_logs, LoggingTensorMode +from torch.utils._python_dispatch import TorchDispatchMode + +__all__ = [ + "checkpoint", + "checkpoint_sequential", + "CheckpointError", + "CheckpointFunction", + "check_backward_validity", + "detach_variable", + "get_device_states", + "set_device_states", + "noop_context_fn", + "set_checkpoint_early_stop", + "DefaultDeviceType", + "set_checkpoint_debug_enabled", +] + +_DEFAULT_DETERMINISM_MODE = "default" + +_checkpoint_debug_enabled: Optional[bool] = None + + +@contextlib.contextmanager +def set_checkpoint_debug_enabled(enabled: Optional[bool]): + """ + Context manager that sets whether checkpoint should print additional debug + information when running. See the ``debug`` flag for + :func:`~torch.utils.checkpoint.checkpoint` for more information. Note that + when set, this context manager overrides the value of ``debug`` passed to + checkpoint. To defer to the local setting, pass ``None`` to this context. + + Args: + enabled (bool): Whether checkpoint should print debug information. + Default is 'None'. + """ + global _checkpoint_debug_enabled + try: + prev = _checkpoint_debug_enabled + _checkpoint_debug_enabled = enabled + yield + finally: + _checkpoint_debug_enabled = prev + + +def detach_variable(inputs: Tuple[Any, ...]) -> Tuple[torch.Tensor, ...]: + if isinstance(inputs, tuple): + out = [] + for inp in inputs: + if not isinstance(inp, torch.Tensor): + out.append(inp) + continue + + x = inp.detach() + x.requires_grad = inp.requires_grad + out.append(x) + return tuple(out) + else: + raise RuntimeError( + "Only tuple of tensors is supported. Got Unsupported input type: ", + type(inputs).__name__, + ) + + +def check_backward_validity(inputs: Iterable[Any]) -> None: + if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)): + warnings.warn( + "None of the inputs have requires_grad=True. Gradients will be None" + ) + + +def _get_device_module(device="cuda"): + device_module = getattr(torch, device) + return device_module + + +class DefaultDeviceType: + r""" + A class that manages the default device type for checkpointing. + + If no non-CPU tensors are present, the default device type will + be used. The default value is 'cuda'. The device type is used in + the checkpointing process when determining which device states + to save and restore for recomputation. + """ + + _default_device_type = "cuda" + + @staticmethod + def set_device_type(device: str = "cuda"): + """ + Set the default device type for checkpointing. + + Args: + device (str): The device type to be set as default. Default is 'cuda'. + """ + DefaultDeviceType._default_device_type = device + + @staticmethod + def get_device_type() -> str: + """ + Get the current default device type for checkpointing. + + Returns: + str: The current default device type. + """ + return DefaultDeviceType._default_device_type + + +def _infer_device_type(*args): + device_types = list( + { + arg.device.type + for arg in args + if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu" + } + ) + if len(device_types) > 1: + warnings.warn( + "Tensor arguments, excluding CPU tensors, are detected on at least two types of devices. " + "Device state will only be saved for devices of a single device type, and the remaining " + "devices will be ignored. Consequently, if any checkpointed functions involve randomness, " + "this may result in incorrect gradients. (Note that if CUDA devices are among the devices " + "detected, it will be prioritized; otherwise, the first device encountered will be selected.)" + ) + if len(device_types) == 0: + return DefaultDeviceType.get_device_type() + elif "cuda" in device_types: + return "cuda" + else: + return device_types[0] + + +# We can't know if the run_fn will internally move some args to different devices, +# which would require logic to preserve rng states for those devices as well. +# We could paranoically stash and restore ALL the rng states for all visible devices, +# but that seems very wasteful for most cases. Compromise: Stash the RNG state for +# the device of all Tensor args. +# +# To consider: maybe get_device_states and set_device_states should reside in torch/random.py? +def get_device_states(*args) -> Tuple[List[int], List[torch.Tensor]]: + # This will not error out if "arg" is a CPU tensor or a non-tensor type because + # the conditionals short-circuit. + fwd_device_ids = list( + { + arg.get_device() + for arg in args + if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu" + } + ) + + fwd_device_states = [] + device_module = _get_device_module(_infer_device_type(*args)) + + for device_id in fwd_device_ids: + with device_module.device(device_id): + fwd_device_states.append(device_module.get_rng_state()) + + return fwd_device_ids, fwd_device_states + + +def set_device_states(devices, states) -> None: + device_module = _get_device_module(_infer_device_type(*states)) + for device, state in zip(devices, states): + with device_module.device(device): + device_module.set_rng_state(state) + + +def _get_autocast_kwargs(device="cuda"): + if device == "cuda": + device_autocast_kwargs = { + "enabled": torch.is_autocast_enabled(), + "dtype": torch.get_autocast_gpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + elif _supports_autocast(device): + device_module = _get_device_module(device) + device_autocast_kwargs = { + "enabled": device_module.is_autocast_enabled(), + "dtype": device_module.get_autocast_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + else: + device_autocast_kwargs = None + + cpu_autocast_kwargs = { + "enabled": torch.is_autocast_cpu_enabled(), + "dtype": torch.get_autocast_cpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + + return device_autocast_kwargs, cpu_autocast_kwargs + +def _supports_autocast(device): + device_module = _get_device_module(device) + return device == "cuda" or (hasattr(device_module, "is_autocast_enabled") + and hasattr(device_module, "get_autocast_dtype")) + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, preserve_rng_state, *args): + check_backward_validity(args) + ctx.run_function = run_function + ctx.preserve_rng_state = preserve_rng_state + # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. + ctx.device = _infer_device_type(*args) + ctx.device_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs( + ctx.device + ) + if preserve_rng_state: + ctx.fwd_cpu_state = torch.get_rng_state() + # Don't eagerly initialize the cuda context by accident. + # (If the user intends that the context is initialized later, within their + # run_function, we SHOULD actually stash the cuda state here. Unfortunately, + # we have no way to anticipate this will happen before we run the function.) + ctx.had_device_in_fwd = False + device_module = _get_device_module(ctx.device) + if getattr(device_module, "_initialized", False): + ctx.had_device_in_fwd = True + ctx.fwd_devices, ctx.fwd_device_states = get_device_states(*args) + + # Save non-tensor inputs in ctx, keep a placeholder None for tensors + # to be filled out during the backward. + ctx.inputs = [] + ctx.tensor_indices = [] + tensor_inputs = [] + for i, arg in enumerate(args): + if torch.is_tensor(arg): + tensor_inputs.append(arg) + ctx.tensor_indices.append(i) + ctx.inputs.append(None) + else: + ctx.inputs.append(arg) + + ctx.save_for_backward(*tensor_inputs) + + with torch.no_grad(): + outputs = run_function(*args) + return outputs + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError( + "Checkpointing is not compatible with .grad() or when an `inputs` parameter" + " is passed to .backward(). Please use .backward() and do not pass its `inputs`" + " argument." + ) + # Copy the list to avoid modifying original list. + inputs = list(ctx.inputs) + tensor_indices = ctx.tensor_indices + tensors = ctx.saved_tensors + device_module = _get_device_module(ctx.device) + + # Fill in inputs with appropriate saved tensors. + for i, idx in enumerate(tensor_indices): + inputs[idx] = tensors[i] + + # Stash the surrounding rng state, and mimic the state that was + # present at this time during forward. Restore the surrounding state + # when we're done. + rng_devices = [] + if ctx.preserve_rng_state and ctx.had_device_in_fwd: + rng_devices = ctx.fwd_devices + with torch.random.fork_rng( + devices=rng_devices, enabled=ctx.preserve_rng_state, device_type=ctx.device + ): + if ctx.preserve_rng_state: + torch.set_rng_state(ctx.fwd_cpu_state) + if ctx.had_device_in_fwd: + set_device_states(ctx.fwd_devices, ctx.fwd_device_states) + detached_inputs = detach_variable(tuple(inputs)) + + device_autocast_ctx = device_module.amp.autocast( + **ctx.device_autocast_kwargs + ) if _supports_autocast(ctx.device) else contextlib.nullcontext() + with torch.enable_grad(), device_autocast_ctx, \ + torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs): + outputs = ctx.run_function(*detached_inputs) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + + # run backward() with only tensor that requires grad + outputs_with_grad = [] + args_with_grad = [] + for i in range(len(outputs)): + if torch.is_tensor(outputs[i]) and outputs[i].requires_grad: + outputs_with_grad.append(outputs[i]) + args_with_grad.append(args[i]) + if len(outputs_with_grad) == 0: + raise RuntimeError( + "none of output has requires_grad=True," + " this checkpoint() is not necessary" + ) + torch.autograd.backward(outputs_with_grad, args_with_grad) + grads = tuple( + inp.grad if isinstance(inp, torch.Tensor) else None + for inp in detached_inputs + ) + + return (None, None) + grads + + +def noop_context_fn(): + return contextlib.nullcontext(), contextlib.nullcontext() + +# TorchDynamo does not step inside utils.checkpoint function. The flow +# looks likes this +# 1) TorchDynamo tries to wrap utils.checkpoint in a HigherOrderOp by +# speculatively checking if the forward function is safe to trace. +# 2) If yes, then Dynamo-generated Fx graph has the wrapped higher +# order op. As a result, TorchDynamo does not look inside utils.checkpoint. +# 3) If not, then TorchDynamo falls back to eager by performing a graph +# break. And here, the following disable wrapper ensures that +# TorchDynamo does not trigger again on the frames created by +# utils.checkpoint innards. +@torch._disable_dynamo +def checkpoint( + function, + *args, + use_reentrant: Optional[bool] = None, + context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn, + determinism_check: str = _DEFAULT_DETERMINISM_MODE, + debug: bool = False, + **kwargs +): + r"""Checkpoint a model or part of the model. + + Activation checkpointing is a technique that trades compute for memory. + Instead of keeping tensors needed for backward alive until they are used in + gradient computation during backward, forward computation in checkpointed + regions omits saving tensors for backward and recomputes them during the + backward pass. Activation checkpointing can be applied to any part of a + model. + + There are currently two checkpointing implementations available, determined + by the :attr:`use_reentrant` parameter. It is recommended that you use + ``use_reentrant=False``. Please refer the note below for a discussion of + their differences. + + .. warning:: + + If the :attr:`function` invocation during the backward pass differs + from the forward pass, e.g., due to a global variable, the checkpointed + version may not be equivalent, potentially causing an + error being raised or leading to silently incorrect gradients. + + .. warning:: + + The ``use_reentrant`` parameter should be passed explicitly. In version + 2.4 we will raise an exception if ``use_reentrant`` is not passed. + If you are using the ``use_reentrant=True`` variant, please refer to the + note below for important considerations and potential limitations. + + .. note:: + + The reentrant variant of checkpoint (``use_reentrant=True``) and + the non-reentrant variant of checkpoint (``use_reentrant=False``) + differ in the following ways: + + * Non-reentrant checkpoint stops recomputation as soon as all needed + intermediate activations have been recomputed. This feature is enabled + by default, but can be disabled with :func:`set_checkpoint_early_stop`. + Reentrant checkpoint always recomputes :attr:`function` in its + entirety during the backward pass. + + * The reentrant variant does not record the autograd graph during the + forward pass, as it runs with the forward pass under + :func:`torch.no_grad`. The non-reentrant version does record the + autograd graph, allowing one to perform backward on the graph within + checkpointed regions. + + * The reentrant checkpoint only supports the + :func:`torch.autograd.backward` API for the backward pass without its + `inputs` argument, while the non-reentrant version supports all ways + of performing the backward pass. + + * At least one input and output must have ``requires_grad=True`` for the + reentrant variant. If this condition is unmet, the checkpointed part + of the model will not have gradients. The non-reentrant version does + not have this requirement. + + * The reentrant version does not consider tensors in nested structures + (e.g., custom objects, lists, dicts, etc) as participating in + autograd, while the non-reentrant version does. + + * The reentrant checkpoint does not support checkpointed regions with + detached tensors from the computational graph, whereas the + non-reentrant version does. For the reentrant variant, if the + checkpointed segment contains tensors detached using ``detach()`` or + with :func:`torch.no_grad`, the backward pass will raise an error. + This is because ``checkpoint`` makes all the outputs require gradients + and this causes issues when a tensor is defined to have no gradient in + the model. To avoid this, detach the tensors outside of the + ``checkpoint`` function. + + Args: + function: describes what to run in the forward pass of the model or + part of the model. It should also know how to handle the inputs + passed as the tuple. For example, in LSTM, if user passes + ``(activation, hidden)``, :attr:`function` should correctly use the + first input as ``activation`` and the second input as ``hidden`` + preserve_rng_state(bool, optional): Omit stashing and restoring + the RNG state during each checkpoint. Note that under torch.compile, + this flag doesn't take effect and we always preserve RNG state. + Default: ``True`` + use_reentrant(bool): + specify whether to use the activation checkpoint variant that + requires reentrant autograd. This parameter should be passed + explicitly. In version 2.4 we will raise an exception if + ``use_reentrant`` is not passed. If ``use_reentrant=False``, + ``checkpoint`` will use an implementation that does not require + reentrant autograd. This allows ``checkpoint`` to support additional + functionality, such as working as expected with + ``torch.autograd.grad`` and support for keyword arguments input into + the checkpointed function. + context_fn(Callable, optional): A callable returning a tuple of two + context managers. The function and its recomputation will be run + under the first and second context managers respectively. + This argument is only supported if ``use_reentrant=False``. + determinism_check(str, optional): A string specifying the determinism + check to perform. By default it is set to ``"default"`` which + compares the shapes, dtypes, and devices of the recomputed tensors + against those the saved tensors. To turn off this check, specify + ``"none"``. Currently these are the only two supported values. + Please open an issue if you would like to see more determinism + checks. This argument is only supported if ``use_reentrant=False``, + if ``use_reentrant=True``, the determinism check is always disabled. + debug(bool, optional): If ``True``, error messages will also include + a trace of the operators ran during the original forward computation + as well as the recomputation. This argument is only supported if + ``use_reentrant=False``. + args: tuple containing inputs to the :attr:`function` + + Returns: + Output of running :attr:`function` on :attr:`*args` + """ + if use_reentrant is None: + warnings.warn( + "torch.utils.checkpoint: the use_reentrant parameter should be " + "passed explicitly. In version 2.4 we will raise an exception " + "if use_reentrant is not passed. use_reentrant=False is " + "recommended, but if you need to preserve the current default " + "behavior, you can pass use_reentrant=True. Refer to docs for more " + "details on the differences between the two variants." + ) + use_reentrant = True + + # Hack to mix *args with **kwargs in a python 2.7-compliant way + preserve = kwargs.pop("preserve_rng_state", True) + if kwargs and use_reentrant: + raise ValueError( + "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs) + ) + + if use_reentrant: + if context_fn is not noop_context_fn or debug is not False: + raise ValueError( + "Passing `context_fn` or `debug` is only supported when " + "use_reentrant=False." + ) + return CheckpointFunction.apply(function, preserve, *args) + else: + gen = _checkpoint_without_reentrant_generator( + function, preserve, context_fn, determinism_check, debug, *args, **kwargs + ) + # Runs pre-forward logic + next(gen) + ret = function(*args, **kwargs) + # Runs post-forward logic + try: + next(gen) + except StopIteration: + return ret + + +def checkpoint_sequential(functions, segments, input, use_reentrant=None, **kwargs): + r"""Checkpoint a sequential model to save memory. + + Sequential models execute a list of modules/functions in order + (sequentially). Therefore, we can divide such a model in various segments + and checkpoint each segment. All segments except the last will not store + the intermediate activations. The inputs of each checkpointed segment will + be saved for re-running the segment in the backward pass. + + .. warning:: + The ``use_reentrant`` parameter should be passed explicitly. In version + 2.4 we will raise an exception if ``use_reentrant`` is not passed. + If you are using the ``use_reentrant=True` variant, please see + :func:`~torch.utils.checkpoint.checkpoint` for + the important considerations and limitations of this variant. It is + recommended that you use ``use_reentrant=False``. + + .. warning: + Since PyTorch 1.4, it allows only one Tensor as the input and + intermediate outputs, just like :class:`torch.nn.Sequential`. + + Args: + functions: A :class:`torch.nn.Sequential` or the list of modules or + functions (comprising the model) to run sequentially. + segments: Number of chunks to create in the model + input: A Tensor that is input to :attr:`functions` + preserve_rng_state(bool, optional): Omit stashing and restoring + the RNG state during each checkpoint. + Default: ``True`` + use_reentrant(bool): + specify whether to use the activation checkpoint variant that + requires reentrant autograd. This parameter should be passed + explicitly. In version 2.4 we will raise an exception if + ``use_reentrant`` is not passed. If ``use_reentrant=False``, + ``checkpoint`` will use an implementation that does not require + reentrant autograd. This allows ``checkpoint`` to support additional + functionality, such as working as expected with + ``torch.autograd.grad`` and support for keyword arguments input into + the checkpointed function. + + Returns: + Output of running :attr:`functions` sequentially on :attr:`*inputs` + + Example: + >>> # xdoctest: +SKIP("stub") + >>> model = nn.Sequential(...) + >>> input_var = checkpoint_sequential(model, chunks, input_var) + """ + if use_reentrant is None: + warnings.warn( + "torch.utils.checkpoint.checkpoint_sequential: the use_reentrant " + "parameter should be passed explicitly. " + "In version 2.4 we will raise an exception if use_reentrant " + "is not passed. use_reentrant=False is " + "recommended, but if you need to preserve the current default " + "behavior, you can pass use_reentrant=True. Refer to docs for more " + "details on the differences between the two variants." + ) + use_reentrant = True + + # Hack for keyword-only parameter in a python 2.7-compliant way + preserve = kwargs.pop("preserve_rng_state", True) + if kwargs: + raise ValueError( + "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs) + ) + + def run_function(start, end, functions): + def forward(input): + for j in range(start, end + 1): + input = functions[j](input) + return input + + return forward + + if isinstance(functions, torch.nn.Sequential): + functions = list(functions.children()) + + segment_size = len(functions) // segments + # the last chunk has to be non-volatile + end = -1 + for start in range(0, segment_size * (segments - 1), segment_size): + end = start + segment_size - 1 + input = checkpoint( + run_function(start, end, functions), + input, + use_reentrant=use_reentrant, + preserve_rng_state=preserve, + ) + return run_function(end + 1, len(functions) - 1, functions)(input) + + +def _internal_assert(cond): + if not cond: + raise AssertionError( + "Something went unexpectedly wrong in activation checkpoint. " + "Please report this bug by filing an issue to PyTorch." + ) + + +# NOTE [ Nestable Checkpoint ] +# +# The semantics of nested checkpoint can be defined by two basic rules. +# Following the two rules leads to an important implication that is central +# to motivating the design. +# +# Rule 1. Saved tensors are managed by inner-most checkpoint only and hidden +# from any outer layers of checkpoint. +# +# Rule 2. The inputs of inner checkpoints are treated as tensors saved to its +# parent checkpoint. +# +# Implication: To recompute any given saved tensor, we need to recompute all of +# the checkpoints wrapping it. +# +# Why is this implied? To unpack a saved tensor X during backward we need to +# recompute the inner-most checkpoint (#1), and in order to recompute that +# checkpoint I need to have its inputs, which are managed by that checkpoint's +# parent (#2), which thus also needs to be recomputed first. Continue this line +# of reasoning and we realize that in order to unpack X, all checkpoints that +# were active at the time X was saved need to be recomputed. (unless we have +# already done so in that backward for some other saved tensor). +# +# In practice, we use a noop autograd Function to save inputs as saved tensors. +# During unpack calling ctx.saved_tensor triggers the parent checkpoint to +# recompute. +# +# Rule 3. We should start recomputation as if there are no checkpoints currently +# active. Checkpoints encountered during recomputation are still +# respected. +# +# When we start recomputation, we push the saved variable hook meant for +# recomputation on the stack. See examples in Rule 6 for more context. +# +# * * * * +# +# Beyond the basic semantics specific to nested checkpoint, we impose several +# more constraints that may apply to checkpointing in general. +# +# Rule 4. Lifetime of recomputed tensors +# +# Recomputed tensors are considered specific to particular invocations +# of backward and are always cleared immediately as they are unpacked +# Particularly, we require this to happen even if retain_graph=True. +# +# [ Implementation details of Rule 4 ] +# +# If we were okay with recomputed tensors staying alive after backward is run +# with retain_graph=True, we would store recomputed variables as the values of a +# WeakKeyDictionary and pack strong references to the keys, so that as we +# backward, those packed keys would be cleared as long as retain_graph=False. +# Clearing the packed key clears the corresponding entry in the WKD. +# +# If we wish recomputed variables to be immediately cleared as we unpack them in +# the retain_graph=True case, we cannot rely on the packed keys to be cleared by +# backward automatically. Instead of packing the strong reference to the key +# directly, we pack a container object, which we manually clear as we unpack. +# +# An important detail is that if a second backward happens, the second +# recomputation needs to reset the container with a newly created key. +# +# Rule 5. Stop recomputation as soon as we've recomputed the saved tensors we +# know we need. +# +# [ Implementation details of Rule 5 ] +# +# During recomputation, raise an exception if the number of recomputed tensors +# matches the number of tensors that we expected to recompute. We wrap the +# recomputation call with a try-catch to catch this specific exception. See +# Rule #6 below for some examples. +# +# Rule 6. We support doing backward inside checkpoint context +# +# [ retain_graph is True] +# +# def fn(x): +# y = x.sin() +# z = y.cos() +# gx, = torch.autograd.grad(z, x, retains_grad=True) +# return gx, z +# +# out = checkpoint(fn)(inp) +# out.backward() +# +# Because z is saved by cos while checkpoint is enabled, it would not be +# actually saved, and so the .grad() call inside must trigger a recomputation. +# +# During recomputation the "inner pack hook" has two responsibilities: +# +# 1) As usual, populating the WeakKeyDictionary storing recomputed tensors +# 2) Pack the actual tensor (detached) so that one may perform backward on the +# recomputed graph. The tensors saved to this graph will live until the end +# of recomputation, or die earlier if someone performs backward with +# retain_graph=False. +# +# More generally performing backward on the recomputed graph occurs in the +# following cases: +# - If backward is performed inside forward, +# - During the original forward IF early-stop is disabled +# - During the original backward +# - If there are multiple .grad()/.backward() calls, we would perform backward +# on the recomputed graph even if early-stop is enabled (see the example below) +# +# [ retain_graph is False ] +# +# The example below shows what happens if during recomputation we find that some +# of the tensors we are trying to recompute have already been cleared. +# +# Spoiler: we don't do anything special, we just skip over them! +# +# def fn(x): +# y = x.sin() # (1) +# z = y.cos() # (2) +# gx, = torch.autograd.grad(z, x) # (3) +# return x.cos() * gx # (4) +# +# out = checkpoint(fn)(inp) +# out.backward() # (5) +# +# 1, 2. Don't save x and y since we are inside a checkpoint. +# 3. Trigger a recompute of fn since x and y weren't saved. +# And depending on whether early stop is enabled, either stop at (2) or +# continue running the function. +# Because we are running backward with retain_graph=False, we clear x and y's +# holders. +# 4. Don't save x since we are inside a checkpoint. +# 5. Calling backward triggers another recompute of fn. During recompute, we see +# that x and y have already been cleared in the original graph as indicated +# by holder=None. We skip over them. We still save x at (4) (since its holder +# is still alive.) + +_enable_checkpoint_early_stop = True + + +@contextlib.contextmanager +def set_checkpoint_early_stop(enable: bool): + """Context manager that sets whether checkpoint should stop recomputation early. + + By default, non-reentrant checkpoint stops recomputation as soon as it + has computed all needed Tensors. This context manager can be used to disable + that feature if it is problematic for your specific application. + + This context manager only needs to be active when forward is run. It does + not need to be active during backward. + + Example:: + + >>> # xdoctest: +SKIP(failing) + >>> message = "saved tensors default hooks are disabled" + >>> with set_checkpoint_early_stop(False): + ... # Any checkpoint under this context manager will respect this + ... # context manager, even if its backward is performed outside. + ... out = checkpoint(fn, inputs) + ... + >>> out.backward() + """ + global _enable_checkpoint_early_stop + try: + prev = _enable_checkpoint_early_stop + _enable_checkpoint_early_stop = enable + yield + finally: + _enable_checkpoint_early_stop = prev + + +class _Handle: + pass + + +class _Holder: + def __init__(self): + self.handles: Dict[int, Optional[_Handle]] = dict() + + +class _NoopSaveInputs(torch.autograd.Function): + @staticmethod + def forward(*args): + return torch.empty((0,)) + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + # Only tensors can be saved with ctx.save_for_backward, everything else + # is captured by get_args, which is saved directly on ctx + tensor_indices, tensors = zip( + *[(i, o) for i, o in enumerate(inputs) if isinstance(o, torch.Tensor)] + ) + idx2saved_idx = {b: a for a, b in enumerate(tensor_indices)} + # args but with tensors replaced with None as placeholders + args = [None if isinstance(o, torch.Tensor) else o for o in inputs] + + def get_args(saved_tensors): + # restore the placeholders with the original tensors grabbed from + # ctx.saved_tensors (which may be saved on a parent checkpoint if + # this checkpoint is nested, and that would trigger a recursive + # unpack!) + ret = [ + saved_tensors[idx2saved_idx[i]] if i in tensor_indices else o + for i, o in enumerate(args) + ] + # grab the tail since we also saved the dummy to avoid having to explicitly + # handle the case where there are no tensor inputs + return ret[1:] + + ctx.get_args = get_args + ctx.save_for_backward(*tensors) + + @staticmethod + def backward(ctx, *grad_outputs): + raise AssertionError("Did not expect to backward on this graph") + + +class _CheckpointFrame: + def __init__(self, recompute_fn, early_stop, unpack_error_cb, metadata_fn): + self.recompute_fn = recompute_fn + self.input_saver = None + self.weak_holders: List[ReferenceType] = [] + # We store this as a weakkeydictionary so that in the case of a partial + # backward, the entries in the dict are cleared alongside the Holder + # which will be removed when the SavedVariable is cleared. + self.recomputed: DefaultDict[ + int, weakref.WeakKeyDictionary[_Handle, torch.Tensor] + ] = defaultdict(weakref.WeakKeyDictionary) + # We need both recomp_counter and recomputed since they can diverge + # https://github.com/pytorch/pytorch/pull/90105#discussion_r1135889885 + self.recomp_counter: DefaultDict[int, int] = defaultdict(int) + self.is_recomputed: DefaultDict[int, bool] = defaultdict(bool) + + # See Rule 5 + self.early_stop = early_stop + + # Debugging + self.metadata_fn = metadata_fn + self.unpack_error_cb = unpack_error_cb + self.x_metadatas = [] + self.forward_completed = False + self.ignore_saved_mismatch = False + + def check_recomputed_tensors_match(self, gid): + if self.ignore_saved_mismatch: + # TODO: we can probably make this check stricter by checking that + # the metadata of the first tensors still match. + return + # NOTE [ Error handling for checkpoint ] + # + # At a high level, we need to check that the tensors saved + # during original forward matches tensors saved during recompute + # This means handling 3 cases: + # + # 1. During recompute, more tensors were saved. + # + # Usually this is hidden due to the StopRecomputationError + # but if early stop is not enabled, or we would have errored + # anyway because there aren't enough weak_holders. But we + # do want to have a nice error. See the _recomputation_hook + # for details. + if not len(self.weak_holders) == self.recomp_counter[gid]: + # 2. During recompute, fewer tensors were saved + # + # We know that everytime we save something do original forward + # we append to weak_holder, and every time we save a tensor + # during recompute we increment recompute_counter. + raise CheckpointError( + "torch.utils.checkpoint: A different number of tensors was saved " + "during the original forward and recomputation.\n" + f"Number of tensors saved during forward: {len(self.weak_holders)}\n" + f"Number of tensors saved during recomputation: {self.recomp_counter[gid]}" + ) + + # 3. During recompute, the same tensors were saved, but they + # have different metadata + nb_meta_different = [] + for idx, weak_holder in enumerate(self.weak_holders): + holder = weak_holder() + if holder is None: + continue + # We've seen all holders since we iterate over them in order + # For every holder that is still alive now, it must've been + # alive when we saw it during recompute, therefore, the + # gid must be set. + _internal_assert(gid in holder.handles) + # We know this is the first unpack, so it couldn't have been set + # to None yet. + _internal_assert(holder.handles[gid] is not None) + # We always set these together in the recomputation hook + _internal_assert(holder.handles[gid] in self.recomputed[gid]) + # see pack hook, x_metadata is 1:1 with weak_holders. + x_meta = self.x_metadatas[idx] + recomputed_x = self.recomputed[gid][holder.handles[gid]] + if x_meta != self.metadata_fn(recomputed_x): + nb_meta_different.append((idx, x_meta, self.metadata_fn(recomputed_x))) + + if len(nb_meta_different) > 0: + mismatched_tensors = "" + for idx, x_meta, recomputed_meta in nb_meta_different: + mismatched_tensors += ( + f"tensor at position {idx}:\n" + f"saved metadata: {x_meta}\n" + f"recomputed metadata: {recomputed_meta}\n" + ) + raise CheckpointError( + "torch.utils.checkpoint: Recomputed values for the following tensors " + "have different metadata than during the forward pass.\n" + f"{mismatched_tensors}" + ) + + +_checkpoint_error_template = """ \ +An error happened while unpacking tensors; dumping logs of latest computation +because you passed `debug=True` to `torch.utils.checkpoint.checkpoint()`. +Scroll all the way down for guidance on how to navigate these logs. + ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ +| 1. Stack traces of the operators that ran in the original forward | ++------------------------------------------------------------------------------+ + +{forward_traces} ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ +| 2. Stack traces of the operators that ran during recomputation | ++------------------------------------------------------------------------------+ + +{recompute_traces} ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ +| 3. Log of operators in the original forward and recomputation | ++------------------------------------------------------------------------------+ +(Scroll up to correlate stack traces with each operation listed below. This + helps identify their source in the code.) + +IMPORTANT: Differences in "detach" calls between the original forward and the + recomputation are expected. They are introduced by the checkpointing + mechanism and can be ignored. + +Operations executed during the original forward: + +{forward_ops} + +Operations executed during recomputation: + +{recompute_ops} + ++------------------------------------------------------------------------------+ + ERROR: Detected non-determinism while running activation checkpointing + + You are seeing this error because you passed `debug=True` to checkpoint and + tensors to be saved during the original forward and differ between those saved + during recomputation. This can happen if different operators were ran in the + original forward and in the recomputation. + + To identify where the mismatch may be coming from, you can do the following: + + 1) Compare the operators ran during original forward and recomputation to + see where they differ. These operators are printed above in the order they + were executed. + + 2) Review the stack trace for each operator to locate its invocation source. + Each operator's stack trace is printed in their execution order. + + Note that the logs can be quite long. Here's how they are structured: + (Tip: you can Ctrl-f for these headers) + + 1. Stack traces of the operators that ran in the original forward + 2. Stack traces of the operators that ran during recomputation + 3. Log of operators in the original forward and recomputation + 4. Error message <--- You are here +-------------------------------------------------------------------------------- +""" + +class CheckpointError(RuntimeError): + pass + + +def _get_debug_context_and_cb() -> Tuple[Callable[[], Any], Callable[[CheckpointError], None]]: + # This function returns the context_fn and error_cb to be used by the + # checkpointing mechanism. error_cb is invoked when an error is detected + # during unpack. + + # record_context_cpp is not support on non-linux non-x86_64 platforms + cpp_tb = platform.machine() == 'x86_64' and platform.system() == 'Linux' + + class CaptureLogs: + def __init__(self): + self.logs = None + self.tbs = None + + def get_context_manager(self): + @contextlib.contextmanager + def logging_mode(): + with LoggingTensorMode(), \ + capture_logs(True, python_tb=True, script_tb=True, cpp_tb=cpp_tb) as logs_and_tb: + self.logs, self.tbs = logs_and_tb + yield logs_and_tb + return logging_mode() + + capture_logs_fwd = CaptureLogs() + capture_logs_recompute = CaptureLogs() + + def unpack_error_cb(e: CheckpointError): + def get_str_tb(label, capture_logs): + out = "" + total_len = len(capture_logs.logs) + for i, (log, tb) in enumerate(zip(capture_logs.logs, capture_logs.tbs)): + out += f"{log} ({i + 1} of {total_len} in {label})\n\n" + found_torch_dispatch = False + for line in tb: + # Start printing stack trace only after __torch_dispatch__ is found + is_torch_dispatch = line['name'] == '__torch_dispatch__' + if not found_torch_dispatch and not is_torch_dispatch: + continue + elif is_torch_dispatch: + found_torch_dispatch = True + continue + out += f"{line['filename']}:{line['line']}:{line['name']}\n" + out += "\n\n" + return out + assert capture_logs_fwd.logs is not None + assert capture_logs_recompute.logs is not None + raise CheckpointError( + _checkpoint_error_template.format( + forward_traces=get_str_tb("original", capture_logs_fwd), + recompute_traces=get_str_tb("recompute", capture_logs_recompute), + forward_ops="\n".join(capture_logs_fwd.logs), + recompute_ops="\n".join(capture_logs_recompute.logs) + ) + ) from e + + def context_fn(): + return capture_logs_fwd.get_context_manager(), capture_logs_recompute.get_context_manager() + + return context_fn, unpack_error_cb + +def _default_meta_extractor(x: torch.Tensor) -> Dict[str, Any]: + # These properties are fast to check, easy to understand + return { + "shape": x.shape, + "dtype": x.dtype, + "device": x.device + } + +_allowed_determinism_checks_to_fns: Dict[str, Callable[[torch.Tensor], Any]] = { + _DEFAULT_DETERMINISM_MODE: _default_meta_extractor, + "none": lambda _: None, +} + +# See Rule 5 +class _StopRecomputationError(Exception): + pass + + +class _recomputation_hook(torch.autograd.graph.saved_tensors_hooks): + def __init__(self, target_frame_ref: ReferenceType, gid: int): + def pack_hook(x): + target_frame = target_frame_ref() + assert target_frame is not None # appease mypy + recomp_idx = target_frame.recomp_counter[gid] + target_frame.recomp_counter[gid] += 1 + + if recomp_idx >= len(target_frame.weak_holders): + assert not target_frame.early_stop + if not target_frame.forward_completed: + # We run into this case when early stop is not enabled and do + # grad within checkpoint. + # We need to set this flag, so we don't error out later when + # we check if the number of tensors saved during forward and + # recomputation match. + target_frame.ignore_saved_mismatch = True + return x.detach() + raise CheckpointError( + "torch.utils.checkpoint: trying to save more tensors during " + "recomputation than during the original forward pass." + ) + + holder = target_frame.weak_holders[recomp_idx]() + + # This holder may have been cleared because someone may have called + # backward within forward. If so, we don't need to save. + if holder is not None: + _internal_assert(holder.handles.get(gid, None) is None) + holder.handles[gid] = _Handle() + target_frame.recomputed[gid][holder.handles[gid]] = x.detach() + + if target_frame.early_stop and target_frame.recomp_counter[gid] == len( + target_frame.weak_holders + ): + raise _StopRecomputationError() + # See Rule 6: [ retain_graph is True ] above + return x.detach() + + def unpack_hook(x): + # See Rule 6: [ retain_graph is True ] above for an example of when + # the graph created during recomputation could be backwarded. + return x + + super().__init__(pack_hook, unpack_hook) + + +class _checkpoint_hook(torch.autograd.graph.saved_tensors_hooks): + def __init__(self, frame): + def pack_hook(x): + # See Rule 4 above + holder = _Holder() + frame.weak_holders.append(weakref.ref(holder)) + # Save metadata to detect non-determinism + if frame.metadata_fn is not None: + with torch.no_grad(): + frame.x_metadatas.append(frame.metadata_fn(x)) + return holder + + def unpack_hook(holder): + gid = torch._C._current_graph_task_id() + if gid == -1: + # generate a temporary id if we trigger unpack outside of a backward call + gid = int(uuid.uuid4()) + + if not frame.is_recomputed[gid]: + ctx = frame.input_saver.grad_fn + args = ctx.get_args(ctx.saved_tensors) + + try: + with _recomputation_hook( + weakref.ref(frame), gid + ), torch.autograd.enable_grad(): + frame.recompute_fn(*args) + except _StopRecomputationError: + pass + frame.is_recomputed[gid] = True + frame.check_recomputed_tensors_match(gid) + + _internal_assert(gid in holder.handles) + + if holder.handles[gid] is None: + raise CheckpointError( + "torch.utils.checkpoint: Unpack is being triggered for a tensor that was already " + "unpacked once. If you are calling ctx.saved_tensors in backward, make sure to do " + "so only once. Otherwise please open an issue with details on your use case." + ) + _internal_assert(holder.handles[gid] in frame.recomputed[gid]) + ret = frame.recomputed[gid][holder.handles[gid]] + holder.handles[gid] = None + return ret + + if frame.unpack_error_cb is not None: + def unpack_hook_with_error_cb(holder): + try: + return unpack_hook(holder) + except CheckpointError as e: + frame.unpack_error_cb(e) + super().__init__(pack_hook, unpack_hook_with_error_cb) + else: + super().__init__(pack_hook, unpack_hook) + + +def _is_compiling(func, args, kwargs): + # Check if we are under AOTAutograd tracing + # There should probably be a better way to do this... + # TODO: unify _is_compiling across all compile stacks + for arg in args: + if isinstance(arg, torch.Tensor) and is_fun(arg): + return True + return False + + +def _detach(x): + if isinstance(x, torch.Tensor): + return x.detach() + return x + + +uid = count(1) + + +# NOTE: torch.utils.checkpoint internal logic will call these two functions unknown number of times +# (i.e. there could be _CachedTorchDispatchMode calls that doesn't map to a _CachingTorchDispatchMode call), +# so we ignore these ops and just always recompute them. +_ignored_ops = { + torch.ops.prim.device.default, + torch.ops.aten.detach.default, +} | set(torch._subclasses.functional_tensor.FunctionalTensor.metadata_fns) + + +class _CachingTorchDispatchMode(TorchDispatchMode): + r""" + A :class:`TorchDispatchMode` to implement selective activation checkpointing + that's compatible with torch.compile. Used together with _CachedTorchDispatchMode. + """ + def __init__(self, policy_fn, storage): + self.policy_fn = policy_fn + self.storage = storage + + def push_into_storage(self, out, func, args, kwargs): + out_detached = tree_map(_detach, out) + self.storage[func].append(out_detached) + + def _handle_compile_in_forward_ctx(self, should_not_recompute, func, args, kwargs): + if func in _ignored_ops: + return func(*args, **kwargs) + if should_not_recompute: + fx_traceback.current_meta["recompute"] = 0 + # NOTE: Here we just store and reuse output of all ops, since in torch.compile mode + # we decide and handle recomputation in the partitioner. + out = func(*args, **kwargs) + self.push_into_storage(out, func, args, kwargs) + return out + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + should_not_recompute = self.policy_fn("forward", func, *args, **kwargs) + if _is_compiling(func, args, kwargs): + return self._handle_compile_in_forward_ctx(should_not_recompute, func, args, kwargs) + else: + if should_not_recompute: + out = func(*args, **kwargs) + self.push_into_storage(out, func, args, kwargs) + else: + out = func(*args, **kwargs) + return out + + +class _CachedTorchDispatchMode(TorchDispatchMode): + r""" + A :class:`TorchDispatchMode` to implement selective activation checkpointing + that's compatible with torch.compile. Used together with _CachingTorchDispatchMode. + """ + def __init__(self, policy_fn, storage): + self.policy_fn = policy_fn + self.storage = storage + + def pop_from_storage(self, func, args, kwargs): + assert func in self.storage + out = self.storage[func].pop(0) + return out + + def _handle_compile_in_recompute_ctx(self, should_not_recompute, func, args, kwargs): + if func in _ignored_ops: + return func(*args, **kwargs) + out = self.pop_from_storage(func, args, kwargs) + return out + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + should_not_recompute = self.policy_fn("recompute", func, *args, **kwargs) + if _is_compiling(func, args, kwargs): + return self._handle_compile_in_recompute_ctx(should_not_recompute, func, args, kwargs) + else: + if should_not_recompute: + out = self.pop_from_storage(func, args, kwargs) + else: + out = func(*args, **kwargs) + return out + + +def _pt2_selective_checkpoint_context_fn_gen(policy_fn): + """ + A helper function that generates a pair of contexts to be later passed into + `torch.utils.checkpoint` API to implment selective checkpointing. + + .. warning:: + This is context_fn is intended for use with torch.compile only. + + Args: + policy_fn (Callable[[Callable, List[Any], Dict[str, Any]], bool]): Policy function + to decide whether a particular op should be recomputed in backward pass or not. + In eager mode: + If policy_fn(...) returns True, the op is guaranteed to NOT be recomputed. + If policy_fn(...) returns False, the op is guaranteed to be recomputed. + In torch.compile mode: + If policy_fn(...) returns True, the op is guaranteed to NOT be recomputed. + If policy_fn(...) returns False, the op may or may not be recomputed + (it's up to the partitioner to decide). + + Returns: + A pair of generated contexts. + + Example: + >>> # xdoctest: +REQUIRES(LINUX) + >>> + >>> def get_custom_policy(): + >>> no_recompute_list = [ + >>> torch.ops.aten.mm.default, + >>> ] + >>> def custom_policy(mode, func, *args, **kwargs): + >>> return func in no_recompute_list + >>> return custom_policy + >>> + >>> def selective_checkpointing_context_fn(): + >>> return _pt2_selective_checkpoint_context_fn_gen(get_custom_policy()) + >>> + >>> def gn(x, y): + >>> return torch.sigmoid(torch.matmul(torch.matmul(x, y), y)) * y + >>> + >>> def fn(x, y): + >>> return torch.utils.checkpoint.checkpoint( + >>> gn, x, y, + >>> use_reentrant=False, + >>> context_fn=selective_checkpointing_context_fn, + >>> ) + >>> + >>> x = torch.randn(4, 4, requires_grad=True) + >>> y = torch.randn(4, 4, requires_grad=True) + >>> + >>> compiled_fn = torch.compile(fn) + """ + storage: Dict[Any, List[Any]] = defaultdict(list) + return _CachingTorchDispatchMode(policy_fn, storage), _CachedTorchDispatchMode(policy_fn, storage) + + +# NB: this helper wraps fn before calling checkpoint_impl. kwargs and +# saving/restoring of global state is handled here. + +def _checkpoint_without_reentrant_generator( + fn, + preserve_rng_state=True, + context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn, + determinism_check: str = _DEFAULT_DETERMINISM_MODE, + debug: bool = False, + *args, + **kwargs +): + """Checkpointing without reentrant autograd. + + Args: + function: describes what to run in the forward pass of the model or + part of the model. It should also know how to handle the inputs + passed as the tuple. For example, in LSTM, if user passes + ``(activation, hidden)``, :attr:`function` should correctly use the + first input as ``activation`` and the second input as ``hidden`` + preserve_rng_state(bool, optional): Omit stashing and restoring + the RNG state during each checkpoint. + Default: ``True`` + context_fn(Callable, optional): A callable returning a tuple of two + context managers. The function and its recomputation will be run + under the first and second context managers respectively. + determinism_check(str, optional): A string specifying the determinism + check to perform. By default it is set to ``"default"`` which + compares the shapes, dtypes, and devices of the recomputed tensors + against those the saved tensors. To turn off this check, specify + ``"none"``. Currently these are the only two supported values. + Please open an issue if you would like to see more determinism + checks. + debug(bool, optional): If ``True``, error messages will also include + a trace of the operators ran during the original forward computation + as well as the recomputation. + *args: Arguments to pass in to the given ``function``. + **kwargs: Keyword arguments to pass into the given ``function``. + """ + unpack_error_cb = None + + if _checkpoint_debug_enabled if _checkpoint_debug_enabled is not None else debug: + if context_fn != noop_context_fn: + raise ValueError( + "debug=True is incompatible with non-default context_fn" + ) + context_fn, unpack_error_cb = _get_debug_context_and_cb() + + if determinism_check in _allowed_determinism_checks_to_fns: + metadata_fn = _allowed_determinism_checks_to_fns[determinism_check] + else: + raise ValueError( + f"determinism_check should be one of {list(_allowed_determinism_checks_to_fns.keys())}, " + f"but got {determinism_check}" + ) + + device = _infer_device_type(*args) + device_module = _get_device_module(device) + forward_context, recompute_context = context_fn() + if _is_compiling(fn, args, kwargs) and context_fn != noop_context_fn: + assert ( + isinstance(forward_context, TorchDispatchMode) and + isinstance(recompute_context, TorchDispatchMode) + ), \ + "In torch.compile mode, `context_fn` arg passed to `torch.utils.checkpoint` " + \ + "must generate a tuple of two `TorchDispatchMode`s." + # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. + device_autocast_kwargs, cpu_autocast_kwargs = _get_autocast_kwargs(device=device) + + if preserve_rng_state: + fwd_cpu_state = torch.get_rng_state() + # Don't eagerly initialize the cuda context by accident. + # (If the user intends that the context is initialized later, within their + # run_function, we SHOULD actually stash the cuda state here. Unfortunately, + # we have no way to anticipate this will happen before we run the function. + # If they do so, we raise an error.) + had_device_in_fwd = False + if getattr(device_module, "_initialized", False): + had_device_in_fwd = True + fwd_devices, fwd_device_states = get_device_states(*args) + + def recompute_fn(*inputs): + kwargs, *args = inputs + # This will be called later during recomputation. This wrapping enables + # the necessary global state to be captured. + rng_devices = [] + if preserve_rng_state and had_device_in_fwd: + rng_devices = fwd_devices + with torch.random.fork_rng( + devices=rng_devices, enabled=preserve_rng_state, device_type=device + ): + if preserve_rng_state: + torch.set_rng_state(fwd_cpu_state) + if had_device_in_fwd: + set_device_states(fwd_devices, fwd_device_states) + + device_autocast_ctx = device_module.amp.autocast( + **device_autocast_kwargs + ) if _supports_autocast(device) else contextlib.nullcontext() + with device_autocast_ctx, torch.cpu.amp.autocast(**cpu_autocast_kwargs), \ + recompute_context: + fn(*args, **kwargs) + + new_frame = _CheckpointFrame( + recompute_fn, + _enable_checkpoint_early_stop, + unpack_error_cb, + metadata_fn + ) + dummy = torch.empty((0,), requires_grad=True) + new_frame.input_saver = _NoopSaveInputs.apply(dummy, kwargs, *args) + + # When ambient grad_mode is False + if new_frame.input_saver.grad_fn is None: + yield + return + + with _checkpoint_hook(new_frame), forward_context: + yield + new_frame.forward_completed = True + + if getattr(device_module, "_initialized", False) and \ + preserve_rng_state and not had_device_in_fwd: # type: ignore[possibly-undefined] + # Device was not initialized before running the forward, so we didn't + # stash the device state. + raise RuntimeError( + "PyTorch's device state was initialized in the forward pass " + "of a Checkpoint, which is not allowed. Please open an issue " + "if you need this feature." + ) + + return diff --git a/venv/lib/python3.10/site-packages/torch/utils/collect_env.py b/venv/lib/python3.10/site-packages/torch/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..6cbf598156b031c1deef8f3d7fed84961e46b4e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/collect_env.py @@ -0,0 +1,624 @@ + +# Unlike the rest of the PyTorch this file must be python2 compliant. +# This script outputs relevant system environment info +# Run it with `python collect_env.py` or `python -m torch.utils.collect_env` +import datetime +import locale +import re +import subprocess +import sys +import os +from collections import namedtuple + + +try: + import torch + TORCH_AVAILABLE = True +except (ImportError, NameError, AttributeError, OSError): + TORCH_AVAILABLE = False + +# System Environment Information +SystemEnv = namedtuple('SystemEnv', [ + 'torch_version', + 'is_debug_build', + 'cuda_compiled_version', + 'gcc_version', + 'clang_version', + 'cmake_version', + 'os', + 'libc_version', + 'python_version', + 'python_platform', + 'is_cuda_available', + 'cuda_runtime_version', + 'cuda_module_loading', + 'nvidia_driver_version', + 'nvidia_gpu_models', + 'cudnn_version', + 'pip_version', # 'pip' or 'pip3' + 'pip_packages', + 'conda_packages', + 'hip_compiled_version', + 'hip_runtime_version', + 'miopen_runtime_version', + 'caching_allocator_config', + 'is_xnnpack_available', + 'cpu_info', +]) + +DEFAULT_CONDA_PATTERNS = { + "torch", + "numpy", + "cudatoolkit", + "soumith", + "mkl", + "magma", + "triton", + "optree", +} + +DEFAULT_PIP_PATTERNS = { + "torch", + "numpy", + "mypy", + "flake8", + "triton", + "optree", + "onnx", +} + + +def run(command): + """Return (return-code, stdout, stderr).""" + shell = True if type(command) is str else False + p = subprocess.Popen(command, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=shell) + raw_output, raw_err = p.communicate() + rc = p.returncode + if get_platform() == 'win32': + enc = 'oem' + else: + enc = locale.getpreferredencoding() + output = raw_output.decode(enc) + err = raw_err.decode(enc) + return rc, output.strip(), err.strip() + + +def run_and_read_all(run_lambda, command): + """Run command using run_lambda; reads and returns entire output if rc is 0.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out + + +def run_and_parse_first_match(run_lambda, command, regex): + """Run command using run_lambda, returns the first regex match if it exists.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + match = re.search(regex, out) + if match is None: + return None + return match.group(1) + +def run_and_return_first_line(run_lambda, command): + """Run command using run_lambda and returns first line if output is not empty.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out.split('\n')[0] + + +def get_conda_packages(run_lambda, patterns=None): + if patterns is None: + patterns = DEFAULT_CONDA_PATTERNS + conda = os.environ.get('CONDA_EXE', 'conda') + out = run_and_read_all(run_lambda, "{} list".format(conda)) + if out is None: + return out + + return "\n".join( + line + for line in out.splitlines() + if not line.startswith("#") + and any(name in line for name in patterns) + ) + +def get_gcc_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)') + +def get_clang_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)') + + +def get_cmake_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)') + + +def get_nvidia_driver_version(run_lambda): + if get_platform() == 'darwin': + cmd = 'kextstat | grep -i cuda' + return run_and_parse_first_match(run_lambda, cmd, + r'com[.]nvidia[.]CUDA [(](.*?)[)]') + smi = get_nvidia_smi() + return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ') + + +def get_gpu_info(run_lambda): + if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None): + if TORCH_AVAILABLE and torch.cuda.is_available(): + if torch.version.hip is not None: + prop = torch.cuda.get_device_properties(0) + if hasattr(prop, "gcnArchName"): + gcnArch = " ({})".format(prop.gcnArchName) + else: + gcnArch = "NoGCNArchNameOnOldPyTorch" + else: + gcnArch = "" + return torch.cuda.get_device_name(None) + gcnArch + return None + smi = get_nvidia_smi() + uuid_regex = re.compile(r' \(UUID: .+?\)') + rc, out, _ = run_lambda(smi + ' -L') + if rc != 0: + return None + # Anonymize GPUs by removing their UUID + return re.sub(uuid_regex, '', out) + + +def get_running_cuda_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)') + + +def get_cudnn_version(run_lambda): + """Return a list of libcudnn.so; it's hard to tell which one is being used.""" + if get_platform() == 'win32': + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%") + where_cmd = os.path.join(system_root, 'System32', 'where') + cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path) + elif get_platform() == 'darwin': + # CUDA libraries and drivers can be found in /usr/local/cuda/. See + # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install + # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac + # Use CUDNN_LIBRARY when cudnn library is installed elsewhere. + cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*' + else: + cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev' + rc, out, _ = run_lambda(cudnn_cmd) + # find will return 1 if there are permission errors or if not found + if len(out) == 0 or (rc != 1 and rc != 0): + l = os.environ.get('CUDNN_LIBRARY') + if l is not None and os.path.isfile(l): + return os.path.realpath(l) + return None + files_set = set() + for fn in out.split('\n'): + fn = os.path.realpath(fn) # eliminate symbolic links + if os.path.isfile(fn): + files_set.add(fn) + if not files_set: + return None + # Alphabetize the result because the order is non-deterministic otherwise + files = sorted(files_set) + if len(files) == 1: + return files[0] + result = '\n'.join(files) + return 'Probably one of the following:\n{}'.format(result) + + +def get_nvidia_smi(): + # Note: nvidia-smi is currently available only on Windows and Linux + smi = 'nvidia-smi' + if get_platform() == 'win32': + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files') + legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi) + new_path = os.path.join(system_root, 'System32', smi) + smis = [new_path, legacy_path] + for candidate_smi in smis: + if os.path.exists(candidate_smi): + smi = '"{}"'.format(candidate_smi) + break + return smi + + +# example outputs of CPU infos +# * linux +# Architecture: x86_64 +# CPU op-mode(s): 32-bit, 64-bit +# Address sizes: 46 bits physical, 48 bits virtual +# Byte Order: Little Endian +# CPU(s): 128 +# On-line CPU(s) list: 0-127 +# Vendor ID: GenuineIntel +# Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# CPU family: 6 +# Model: 106 +# Thread(s) per core: 2 +# Core(s) per socket: 32 +# Socket(s): 2 +# Stepping: 6 +# BogoMIPS: 5799.78 +# Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr +# sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl +# xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16 +# pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand +# hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced +# fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap +# avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 +# xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq +# avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities +# Virtualization features: +# Hypervisor vendor: KVM +# Virtualization type: full +# Caches (sum of all): +# L1d: 3 MiB (64 instances) +# L1i: 2 MiB (64 instances) +# L2: 80 MiB (64 instances) +# L3: 108 MiB (2 instances) +# NUMA: +# NUMA node(s): 2 +# NUMA node0 CPU(s): 0-31,64-95 +# NUMA node1 CPU(s): 32-63,96-127 +# Vulnerabilities: +# Itlb multihit: Not affected +# L1tf: Not affected +# Mds: Not affected +# Meltdown: Not affected +# Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown +# Retbleed: Not affected +# Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp +# Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization +# Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence +# Srbds: Not affected +# Tsx async abort: Not affected +# * win32 +# Architecture=9 +# CurrentClockSpeed=2900 +# DeviceID=CPU0 +# Family=179 +# L2CacheSize=40960 +# L2CacheSpeed= +# Manufacturer=GenuineIntel +# MaxClockSpeed=2900 +# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# ProcessorType=3 +# Revision=27142 +# +# Architecture=9 +# CurrentClockSpeed=2900 +# DeviceID=CPU1 +# Family=179 +# L2CacheSize=40960 +# L2CacheSpeed= +# Manufacturer=GenuineIntel +# MaxClockSpeed=2900 +# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# ProcessorType=3 +# Revision=27142 + +def get_cpu_info(run_lambda): + rc, out, err = 0, '', '' + if get_platform() == 'linux': + rc, out, err = run_lambda('lscpu') + elif get_platform() == 'win32': + rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \ + CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE') + elif get_platform() == 'darwin': + rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string") + cpu_info = 'None' + if rc == 0: + cpu_info = out + else: + cpu_info = err + return cpu_info + + +def get_platform(): + if sys.platform.startswith('linux'): + return 'linux' + elif sys.platform.startswith('win32'): + return 'win32' + elif sys.platform.startswith('cygwin'): + return 'cygwin' + elif sys.platform.startswith('darwin'): + return 'darwin' + else: + return sys.platform + + +def get_mac_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)') + + +def get_windows_version(run_lambda): + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic') + findstr_cmd = os.path.join(system_root, 'System32', 'findstr') + return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd)) + + +def get_lsb_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)') + + +def check_release_file(run_lambda): + return run_and_parse_first_match(run_lambda, 'cat /etc/*-release', + r'PRETTY_NAME="(.*)"') + + +def get_os(run_lambda): + from platform import machine + platform = get_platform() + + if platform == 'win32' or platform == 'cygwin': + return get_windows_version(run_lambda) + + if platform == 'darwin': + version = get_mac_version(run_lambda) + if version is None: + return None + return 'macOS {} ({})'.format(version, machine()) + + if platform == 'linux': + # Ubuntu/Debian based + desc = get_lsb_version(run_lambda) + if desc is not None: + return '{} ({})'.format(desc, machine()) + + # Try reading /etc/*-release + desc = check_release_file(run_lambda) + if desc is not None: + return '{} ({})'.format(desc, machine()) + + return '{} ({})'.format(platform, machine()) + + # Unknown platform + return platform + + +def get_python_platform(): + import platform + return platform.platform() + + +def get_libc_version(): + import platform + if get_platform() != 'linux': + return 'N/A' + return '-'.join(platform.libc_ver()) + + +def get_pip_packages(run_lambda, patterns=None): + """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages.""" + if patterns is None: + patterns = DEFAULT_PIP_PATTERNS + + # People generally have `pip` as `pip` or `pip3` + # But here it is invoked as `python -mpip` + def run_with_pip(pip): + out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"]) + return "\n".join( + line + for line in out.splitlines() + if any(name in line for name in patterns) + ) + + pip_version = 'pip3' if sys.version[0] == '3' else 'pip' + out = run_with_pip([sys.executable, '-mpip']) + + return pip_version, out + + +def get_cachingallocator_config(): + ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '') + return ca_config + + +def get_cuda_module_loading_config(): + if TORCH_AVAILABLE and torch.cuda.is_available(): + torch.cuda.init() + config = os.environ.get('CUDA_MODULE_LOADING', '') + return config + else: + return "N/A" + + +def is_xnnpack_available(): + if TORCH_AVAILABLE: + import torch.backends.xnnpack + return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined] + else: + return "N/A" + +def get_env_info(): + run_lambda = run + pip_version, pip_list_output = get_pip_packages(run_lambda) + + if TORCH_AVAILABLE: + version_str = torch.__version__ + debug_mode_str = str(torch.version.debug) + cuda_available_str = str(torch.cuda.is_available()) + cuda_version_str = torch.version.cuda + if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version + hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' + else: # HIP version + def get_version_or_na(cfg, prefix): + _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s] + return _lst[0] if _lst else 'N/A' + + cfg = torch._C._show_config().split('\n') + hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime') + miopen_runtime_version = get_version_or_na(cfg, 'MIOpen') + cuda_version_str = 'N/A' + hip_compiled_version = torch.version.hip + else: + version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A' + hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' + + sys_version = sys.version.replace("\n", " ") + + conda_packages = get_conda_packages(run_lambda) + + return SystemEnv( + torch_version=version_str, + is_debug_build=debug_mode_str, + python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1), + python_platform=get_python_platform(), + is_cuda_available=cuda_available_str, + cuda_compiled_version=cuda_version_str, + cuda_runtime_version=get_running_cuda_version(run_lambda), + cuda_module_loading=get_cuda_module_loading_config(), + nvidia_gpu_models=get_gpu_info(run_lambda), + nvidia_driver_version=get_nvidia_driver_version(run_lambda), + cudnn_version=get_cudnn_version(run_lambda), + hip_compiled_version=hip_compiled_version, + hip_runtime_version=hip_runtime_version, + miopen_runtime_version=miopen_runtime_version, + pip_version=pip_version, + pip_packages=pip_list_output, + conda_packages=conda_packages, + os=get_os(run_lambda), + libc_version=get_libc_version(), + gcc_version=get_gcc_version(run_lambda), + clang_version=get_clang_version(run_lambda), + cmake_version=get_cmake_version(run_lambda), + caching_allocator_config=get_cachingallocator_config(), + is_xnnpack_available=is_xnnpack_available(), + cpu_info=get_cpu_info(run_lambda), + ) + +env_info_fmt = """ +PyTorch version: {torch_version} +Is debug build: {is_debug_build} +CUDA used to build PyTorch: {cuda_compiled_version} +ROCM used to build PyTorch: {hip_compiled_version} + +OS: {os} +GCC version: {gcc_version} +Clang version: {clang_version} +CMake version: {cmake_version} +Libc version: {libc_version} + +Python version: {python_version} +Python platform: {python_platform} +Is CUDA available: {is_cuda_available} +CUDA runtime version: {cuda_runtime_version} +CUDA_MODULE_LOADING set to: {cuda_module_loading} +GPU models and configuration: {nvidia_gpu_models} +Nvidia driver version: {nvidia_driver_version} +cuDNN version: {cudnn_version} +HIP runtime version: {hip_runtime_version} +MIOpen runtime version: {miopen_runtime_version} +Is XNNPACK available: {is_xnnpack_available} + +CPU: +{cpu_info} + +Versions of relevant libraries: +{pip_packages} +{conda_packages} +""".strip() + + +def pretty_str(envinfo): + def replace_nones(dct, replacement='Could not collect'): + for key in dct.keys(): + if dct[key] is not None: + continue + dct[key] = replacement + return dct + + def replace_bools(dct, true='Yes', false='No'): + for key in dct.keys(): + if dct[key] is True: + dct[key] = true + elif dct[key] is False: + dct[key] = false + return dct + + def prepend(text, tag='[prepend]'): + lines = text.split('\n') + updated_lines = [tag + line for line in lines] + return '\n'.join(updated_lines) + + def replace_if_empty(text, replacement='No relevant packages'): + if text is not None and len(text) == 0: + return replacement + return text + + def maybe_start_on_next_line(string): + # If `string` is multiline, prepend a \n to it. + if string is not None and len(string.split('\n')) > 1: + return '\n{}\n'.format(string) + return string + + mutable_dict = envinfo._asdict() + + # If nvidia_gpu_models is multiline, start on the next line + mutable_dict['nvidia_gpu_models'] = \ + maybe_start_on_next_line(envinfo.nvidia_gpu_models) + + # If the machine doesn't have CUDA, report some fields as 'No CUDA' + dynamic_cuda_fields = [ + 'cuda_runtime_version', + 'nvidia_gpu_models', + 'nvidia_driver_version', + ] + all_cuda_fields = dynamic_cuda_fields + ['cudnn_version'] + all_dynamic_cuda_fields_missing = all( + mutable_dict[field] is None for field in dynamic_cuda_fields) + if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing: + for field in all_cuda_fields: + mutable_dict[field] = 'No CUDA' + if envinfo.cuda_compiled_version is None: + mutable_dict['cuda_compiled_version'] = 'None' + + # Replace True with Yes, False with No + mutable_dict = replace_bools(mutable_dict) + + # Replace all None objects with 'Could not collect' + mutable_dict = replace_nones(mutable_dict) + + # If either of these are '', replace with 'No relevant packages' + mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages']) + mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages']) + + # Tag conda and pip packages with a prefix + # If they were previously None, they'll show up as ie '[conda] Could not collect' + if mutable_dict['pip_packages']: + mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'], + '[{}] '.format(envinfo.pip_version)) + if mutable_dict['conda_packages']: + mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'], + '[conda] ') + mutable_dict['cpu_info'] = envinfo.cpu_info + return env_info_fmt.format(**mutable_dict) + + +def get_pretty_env_info(): + return pretty_str(get_env_info()) + + +def main(): + print("Collecting environment information...") + output = get_pretty_env_info() + print(output) + + if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'): + minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR + if sys.platform == "linux" and os.path.exists(minidump_dir): + dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)] + latest = max(dumps, key=os.path.getctime) + ctime = os.path.getctime(latest) + creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S') + msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \ + "if this is related to your bug please include it when you file a report ***" + print(msg, file=sys.stderr) + + + +if __name__ == '__main__': + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py b/venv/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py new file mode 100644 index 0000000000000000000000000000000000000000..40dbbb5b913af0ca725ef5c6cab9fee1a3ffec70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py @@ -0,0 +1,11 @@ +from torch._C import _get_cpp_backtrace + +def get_cpp_backtrace(frames_to_skip=0, maximum_number_of_frames=64) -> str: + r""" + Return a string containing the C++ stack trace of the current thread. + + Args: + frames_to_skip (int): the number of frames to skip from the top of the stack + maximum_number_of_frames (int): the maximum number of frames to return + """ + return _get_cpp_backtrace(frames_to_skip, maximum_number_of_frames) diff --git a/venv/lib/python3.10/site-packages/torch/utils/cpp_extension.py b/venv/lib/python3.10/site-packages/torch/utils/cpp_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d264d61729586aa421a18388a684eb118a0df2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/cpp_extension.py @@ -0,0 +1,2428 @@ +import copy +import glob +import importlib +import importlib.abc +import os +import re +import shlex +import shutil +import setuptools +import subprocess +import sys +import sysconfig +import warnings +import collections +from pathlib import Path +import errno + +import torch +import torch._appdirs +from .file_baton import FileBaton +from ._cpp_extension_versioner import ExtensionVersioner +from .hipify import hipify_python +from .hipify.hipify_python import GeneratedFileCleaner +from typing import Dict, List, Optional, Union, Tuple +from torch.torch_version import TorchVersion, Version + +from setuptools.command.build_ext import build_ext + +IS_WINDOWS = sys.platform == 'win32' +IS_MACOS = sys.platform.startswith('darwin') +IS_LINUX = sys.platform.startswith('linux') +LIB_EXT = '.pyd' if IS_WINDOWS else '.so' +EXEC_EXT = '.exe' if IS_WINDOWS else '' +CLIB_PREFIX = '' if IS_WINDOWS else 'lib' +CLIB_EXT = '.dll' if IS_WINDOWS else '.so' +SHARED_FLAG = '/DLL' if IS_WINDOWS else '-shared' + +_HERE = os.path.abspath(__file__) +_TORCH_PATH = os.path.dirname(os.path.dirname(_HERE)) +TORCH_LIB_PATH = os.path.join(_TORCH_PATH, 'lib') + + +SUBPROCESS_DECODE_ARGS = ('oem',) if IS_WINDOWS else () +MINIMUM_GCC_VERSION = (5, 0, 0) +MINIMUM_MSVC_VERSION = (19, 0, 24215) + +VersionRange = Tuple[Tuple[int, ...], Tuple[int, ...]] +VersionMap = Dict[str, VersionRange] +# The following values were taken from the following GitHub gist that +# summarizes the minimum valid major versions of g++/clang++ for each supported +# CUDA version: https://gist.github.com/ax3l/9489132 +# Or from include/crt/host_config.h in the CUDA SDK +# The second value is the exclusive(!) upper bound, i.e. min <= version < max +CUDA_GCC_VERSIONS: VersionMap = { + '11.0': (MINIMUM_GCC_VERSION, (10, 0)), + '11.1': (MINIMUM_GCC_VERSION, (11, 0)), + '11.2': (MINIMUM_GCC_VERSION, (11, 0)), + '11.3': (MINIMUM_GCC_VERSION, (11, 0)), + '11.4': ((6, 0, 0), (12, 0)), + '11.5': ((6, 0, 0), (12, 0)), + '11.6': ((6, 0, 0), (12, 0)), + '11.7': ((6, 0, 0), (12, 0)), +} + +MINIMUM_CLANG_VERSION = (3, 3, 0) +CUDA_CLANG_VERSIONS: VersionMap = { + '11.1': (MINIMUM_CLANG_VERSION, (11, 0)), + '11.2': (MINIMUM_CLANG_VERSION, (12, 0)), + '11.3': (MINIMUM_CLANG_VERSION, (12, 0)), + '11.4': (MINIMUM_CLANG_VERSION, (13, 0)), + '11.5': (MINIMUM_CLANG_VERSION, (13, 0)), + '11.6': (MINIMUM_CLANG_VERSION, (14, 0)), + '11.7': (MINIMUM_CLANG_VERSION, (14, 0)), +} + +__all__ = ["get_default_build_root", "check_compiler_ok_for_platform", "get_compiler_abi_compatibility_and_version", "BuildExtension", + "CppExtension", "CUDAExtension", "include_paths", "library_paths", "load", "load_inline", "is_ninja_available", + "verify_ninja_availability", "remove_extension_h_precompiler_headers", "get_cxx_compiler", "check_compiler_is_gcc"] +# Taken directly from python stdlib < 3.9 +# See https://github.com/pytorch/pytorch/issues/48617 +def _nt_quote_args(args: Optional[List[str]]) -> List[str]: + """Quote command-line arguments for DOS/Windows conventions. + + Just wraps every argument which contains blanks in double quotes, and + returns a new argument list. + """ + # Cover None-type + if not args: + return [] + return [f'"{arg}"' if ' ' in arg else arg for arg in args] + +def _find_cuda_home() -> Optional[str]: + """Find the CUDA install path.""" + # Guess #1 + cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH') + if cuda_home is None: + # Guess #2 + try: + which = 'where' if IS_WINDOWS else 'which' + with open(os.devnull, 'w') as devnull: + nvcc = subprocess.check_output([which, 'nvcc'], + stderr=devnull).decode(*SUBPROCESS_DECODE_ARGS).rstrip('\r\n') + cuda_home = os.path.dirname(os.path.dirname(nvcc)) + except Exception: + # Guess #3 + if IS_WINDOWS: + cuda_homes = glob.glob( + 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*') + if len(cuda_homes) == 0: + cuda_home = '' + else: + cuda_home = cuda_homes[0] + else: + cuda_home = '/usr/local/cuda' + if not os.path.exists(cuda_home): + cuda_home = None + if cuda_home and not torch.cuda.is_available(): + print(f"No CUDA runtime is found, using CUDA_HOME='{cuda_home}'", + file=sys.stderr) + return cuda_home + +def _find_rocm_home() -> Optional[str]: + """Find the ROCm install path.""" + # Guess #1 + rocm_home = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH') + if rocm_home is None: + # Guess #2 + hipcc_path = shutil.which('hipcc') + if hipcc_path is not None: + rocm_home = os.path.dirname(os.path.dirname( + os.path.realpath(hipcc_path))) + # can be either /hip/bin/hipcc or /bin/hipcc + if os.path.basename(rocm_home) == 'hip': + rocm_home = os.path.dirname(rocm_home) + else: + # Guess #3 + fallback_path = '/opt/rocm' + if os.path.exists(fallback_path): + rocm_home = fallback_path + if rocm_home and torch.version.hip is None: + print(f"No ROCm runtime is found, using ROCM_HOME='{rocm_home}'", + file=sys.stderr) + return rocm_home + + +def _join_rocm_home(*paths) -> str: + """ + Join paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set. + + This is basically a lazy way of raising an error for missing $ROCM_HOME + only once we need to get any ROCm-specific path. + """ + if ROCM_HOME is None: + raise OSError('ROCM_HOME environment variable is not set. ' + 'Please set it to your ROCm install root.') + elif IS_WINDOWS: + raise OSError('Building PyTorch extensions using ' + 'ROCm and Windows is not supported.') + return os.path.join(ROCM_HOME, *paths) + + +ABI_INCOMPATIBILITY_WARNING = ''' + + !! WARNING !! + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +Your compiler ({}) may be ABI-incompatible with PyTorch! +Please use a compiler that is ABI-compatible with GCC 5.0 and above. +See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html. + +See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6 +for instructions on how to install GCC 5 or higher. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + !! WARNING !! +''' +WRONG_COMPILER_WARNING = ''' + + !! WARNING !! + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was +built with for this platform, which is {pytorch_compiler} on {platform}. Please +use {pytorch_compiler} to to compile your extension. Alternatively, you may +compile PyTorch from source using {user_compiler}, and then you can also use +{user_compiler} to compile your extension. + +See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help +with compiling PyTorch from source. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + !! WARNING !! +''' +CUDA_MISMATCH_MESSAGE = ''' +The detected CUDA version ({0}) mismatches the version that was used to compile +PyTorch ({1}). Please make sure to use the same CUDA versions. +''' +CUDA_MISMATCH_WARN = "The detected CUDA version ({0}) has a minor version mismatch with the version that was used to compile PyTorch ({1}). Most likely this shouldn't be a problem." +CUDA_NOT_FOUND_MESSAGE = ''' +CUDA was not found on the system, please set the CUDA_HOME or the CUDA_PATH +environment variable or add NVCC to your system PATH. The extension compilation will fail. +''' +ROCM_HOME = _find_rocm_home() +HIP_HOME = _join_rocm_home('hip') if ROCM_HOME else None +IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False +ROCM_VERSION = None +if torch.version.hip is not None: + ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2]) + +CUDA_HOME = _find_cuda_home() if torch.cuda._is_compiled() else None +CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH') +# PyTorch releases have the version pattern major.minor.patch, whereas when +# PyTorch is built from source, we append the git commit hash, which gives +# it the below pattern. +BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\w+\+\w+') + +COMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', '/wd4190', '/wd4624', '/wd4067', '/wd4068', '/EHsc'] + +MSVC_IGNORE_CUDAFE_WARNINGS = [ + 'base_class_has_different_dll_interface', + 'field_without_dll_interface', + 'dll_interface_conflict_none_assumed', + 'dll_interface_conflict_dllexport_assumed' +] + +COMMON_NVCC_FLAGS = [ + '-D__CUDA_NO_HALF_OPERATORS__', + '-D__CUDA_NO_HALF_CONVERSIONS__', + '-D__CUDA_NO_BFLOAT16_CONVERSIONS__', + '-D__CUDA_NO_HALF2_OPERATORS__', + '--expt-relaxed-constexpr' +] + +COMMON_HIP_FLAGS = [ + '-fPIC', + '-D__HIP_PLATFORM_AMD__=1', + '-DUSE_ROCM=1', +] + +if ROCM_VERSION is not None and ROCM_VERSION >= (6, 0): + COMMON_HIP_FLAGS.append('-DHIPBLAS_V2') + +COMMON_HIPCC_FLAGS = [ + '-DCUDA_HAS_FP16=1', + '-D__HIP_NO_HALF_OPERATORS__=1', + '-D__HIP_NO_HALF_CONVERSIONS__=1', +] + +JIT_EXTENSION_VERSIONER = ExtensionVersioner() + +PLAT_TO_VCVARS = { + 'win32' : 'x86', + 'win-amd64' : 'x86_amd64', +} + +def get_cxx_compiler(): + if IS_WINDOWS: + compiler = os.environ.get('CXX', 'cl') + else: + compiler = os.environ.get('CXX', 'c++') + return compiler + +def _is_binary_build() -> bool: + return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__) + + +def _accepted_compilers_for_platform() -> List[str]: + # gnu-c++ and gnu-cc are the conda gcc compilers + return ['clang++', 'clang'] if IS_MACOS else ['g++', 'gcc', 'gnu-c++', 'gnu-cc', 'clang++', 'clang'] + +def _maybe_write(filename, new_content): + r''' + Equivalent to writing the content into the file but will not touch the file + if it already had the right content (to avoid triggering recompile). + ''' + if os.path.exists(filename): + with open(filename) as f: + content = f.read() + + if content == new_content: + # The file already contains the right thing! + return + + with open(filename, 'w') as source_file: + source_file.write(new_content) + +def get_default_build_root() -> str: + """ + Return the path to the root folder under which extensions will built. + + For each extension module built, there will be one folder underneath the + folder returned by this function. For example, if ``p`` is the path + returned by this function and ``ext`` the name of an extension, the build + folder for the extension will be ``p/ext``. + + This directory is **user-specific** so that multiple users on the same + machine won't meet permission issues. + """ + return os.path.realpath(torch._appdirs.user_cache_dir(appname='torch_extensions')) + + +def check_compiler_ok_for_platform(compiler: str) -> bool: + """ + Verify that the compiler is the expected one for the current platform. + + Args: + compiler (str): The compiler executable to check. + + Returns: + True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS, + and always True for Windows. + """ + if IS_WINDOWS: + return True + which = subprocess.check_output(['which', compiler], stderr=subprocess.STDOUT) + # Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'. + compiler_path = os.path.realpath(which.decode(*SUBPROCESS_DECODE_ARGS).strip()) + # Check the compiler name + if any(name in compiler_path for name in _accepted_compilers_for_platform()): + return True + # If compiler wrapper is used try to infer the actual compiler by invoking it with -v flag + env = os.environ.copy() + env['LC_ALL'] = 'C' # Don't localize output + version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS) + if IS_LINUX: + # Check for 'gcc' or 'g++' for sccache wrapper + pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE) + results = re.findall(pattern, version_string) + if len(results) != 1: + # Clang is also a supported compiler on Linux + # Though on Ubuntu it's sometimes called "Ubuntu clang version" + return 'clang version' in version_string + compiler_path = os.path.realpath(results[0].strip()) + # On RHEL/CentOS c++ is a gcc compiler wrapper + if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string: + return True + return any(name in compiler_path for name in _accepted_compilers_for_platform()) + if IS_MACOS: + # Check for 'clang' or 'clang++' + return version_string.startswith("Apple clang") + return False + + +def get_compiler_abi_compatibility_and_version(compiler) -> Tuple[bool, TorchVersion]: + """ + Determine if the given compiler is ABI-compatible with PyTorch alongside its version. + + Args: + compiler (str): The compiler executable name to check (e.g. ``g++``). + Must be executable in a shell process. + + Returns: + A tuple that contains a boolean that defines if the compiler is (likely) ABI-incompatible with PyTorch, + followed by a `TorchVersion` string that contains the compiler version separated by dots. + """ + if not _is_binary_build(): + return (True, TorchVersion('0.0.0')) + if os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') in ['ON', '1', 'YES', 'TRUE', 'Y']: + return (True, TorchVersion('0.0.0')) + + # First check if the compiler is one of the expected ones for the particular platform. + if not check_compiler_ok_for_platform(compiler): + warnings.warn(WRONG_COMPILER_WARNING.format( + user_compiler=compiler, + pytorch_compiler=_accepted_compilers_for_platform()[0], + platform=sys.platform)) + return (False, TorchVersion('0.0.0')) + + if IS_MACOS: + # There is no particular minimum version we need for clang, so we're good here. + return (True, TorchVersion('0.0.0')) + try: + if IS_LINUX: + minimum_required_version = MINIMUM_GCC_VERSION + versionstr = subprocess.check_output([compiler, '-dumpfullversion', '-dumpversion']) + version = versionstr.decode(*SUBPROCESS_DECODE_ARGS).strip().split('.') + else: + minimum_required_version = MINIMUM_MSVC_VERSION + compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT) + match = re.search(r'(\d+)\.(\d+)\.(\d+)', compiler_info.decode(*SUBPROCESS_DECODE_ARGS).strip()) + version = ['0', '0', '0'] if match is None else list(match.groups()) + except Exception: + _, error, _ = sys.exc_info() + warnings.warn(f'Error checking compiler version for {compiler}: {error}') + return (False, TorchVersion('0.0.0')) + + if tuple(map(int, version)) >= minimum_required_version: + return (True, TorchVersion('.'.join(version))) + + compiler = f'{compiler} {".".join(version)}' + warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler)) + + return (False, TorchVersion('.'.join(version))) + + +def _check_cuda_version(compiler_name: str, compiler_version: TorchVersion) -> None: + if not CUDA_HOME: + raise RuntimeError(CUDA_NOT_FOUND_MESSAGE) + + nvcc = os.path.join(CUDA_HOME, 'bin', 'nvcc') + cuda_version_str = subprocess.check_output([nvcc, '--version']).strip().decode(*SUBPROCESS_DECODE_ARGS) + cuda_version = re.search(r'release (\d+[.]\d+)', cuda_version_str) + if cuda_version is None: + return + + cuda_str_version = cuda_version.group(1) + cuda_ver = Version(cuda_str_version) + if torch.version.cuda is None: + return + + torch_cuda_version = Version(torch.version.cuda) + if cuda_ver != torch_cuda_version: + # major/minor attributes are only available in setuptools>=49.4.0 + if getattr(cuda_ver, "major", None) is None: + raise ValueError("setuptools>=49.4.0 is required") + if cuda_ver.major != torch_cuda_version.major: + raise RuntimeError(CUDA_MISMATCH_MESSAGE.format(cuda_str_version, torch.version.cuda)) + warnings.warn(CUDA_MISMATCH_WARN.format(cuda_str_version, torch.version.cuda)) + + if not (sys.platform.startswith('linux') and + os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') not in ['ON', '1', 'YES', 'TRUE', 'Y'] and + _is_binary_build()): + return + + cuda_compiler_bounds: VersionMap = CUDA_CLANG_VERSIONS if compiler_name.startswith('clang') else CUDA_GCC_VERSIONS + + if cuda_str_version not in cuda_compiler_bounds: + warnings.warn(f'There are no {compiler_name} version bounds defined for CUDA version {cuda_str_version}') + else: + min_compiler_version, max_excl_compiler_version = cuda_compiler_bounds[cuda_str_version] + # Special case for 11.4.0, which has lower compiler bounds than 11.4.1 + if "V11.4.48" in cuda_version_str and cuda_compiler_bounds == CUDA_GCC_VERSIONS: + max_excl_compiler_version = (11, 0) + min_compiler_version_str = '.'.join(map(str, min_compiler_version)) + max_excl_compiler_version_str = '.'.join(map(str, max_excl_compiler_version)) + + version_bound_str = f'>={min_compiler_version_str}, <{max_excl_compiler_version_str}' + + if compiler_version < TorchVersion(min_compiler_version_str): + raise RuntimeError( + f'The current installed version of {compiler_name} ({compiler_version}) is less ' + f'than the minimum required version by CUDA {cuda_str_version} ({min_compiler_version_str}). ' + f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).' + ) + if compiler_version >= TorchVersion(max_excl_compiler_version_str): + raise RuntimeError( + f'The current installed version of {compiler_name} ({compiler_version}) is greater ' + f'than the maximum required version by CUDA {cuda_str_version}. ' + f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).' + ) + + +class BuildExtension(build_ext): + """ + A custom :mod:`setuptools` build extension . + + This :class:`setuptools.build_ext` subclass takes care of passing the + minimum required compiler flags (e.g. ``-std=c++17``) as well as mixed + C++/CUDA compilation (and support for CUDA files in general). + + When using :class:`BuildExtension`, it is allowed to supply a dictionary + for ``extra_compile_args`` (rather than the usual list) that maps from + languages (``cxx`` or ``nvcc``) to a list of additional compiler flags to + supply to the compiler. This makes it possible to supply different flags to + the C++ and CUDA compiler during mixed compilation. + + ``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we + attempt to build using the Ninja backend. Ninja greatly speeds up + compilation compared to the standard ``setuptools.build_ext``. + Fallbacks to the standard distutils backend if Ninja is not available. + + .. note:: + By default, the Ninja backend uses #CPUS + 2 workers to build the + extension. This may use up too many resources on some systems. One + can control the number of workers by setting the `MAX_JOBS` environment + variable to a non-negative number. + """ + + @classmethod + def with_options(cls, **options): + """Return a subclass with alternative constructor that extends any original keyword arguments to the original constructor with the given options.""" + class cls_with_options(cls): # type: ignore[misc, valid-type] + def __init__(self, *args, **kwargs): + kwargs.update(options) + super().__init__(*args, **kwargs) + + return cls_with_options + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False) + + self.use_ninja = kwargs.get('use_ninja', True) + if self.use_ninja: + # Test if we can use ninja. Fallback otherwise. + msg = ('Attempted to use ninja as the BuildExtension backend but ' + '{}. Falling back to using the slow distutils backend.') + if not is_ninja_available(): + warnings.warn(msg.format('we could not find ninja.')) + self.use_ninja = False + + def finalize_options(self) -> None: + super().finalize_options() + if self.use_ninja: + self.force = True + + def build_extensions(self) -> None: + compiler_name, compiler_version = self._check_abi() + + cuda_ext = False + extension_iter = iter(self.extensions) + extension = next(extension_iter, None) + while not cuda_ext and extension: + for source in extension.sources: + _, ext = os.path.splitext(source) + if ext == '.cu': + cuda_ext = True + break + extension = next(extension_iter, None) + + if cuda_ext and not IS_HIP_EXTENSION: + _check_cuda_version(compiler_name, compiler_version) + + for extension in self.extensions: + # Ensure at least an empty list of flags for 'cxx' and 'nvcc' when + # extra_compile_args is a dict. Otherwise, default torch flags do + # not get passed. Necessary when only one of 'cxx' and 'nvcc' is + # passed to extra_compile_args in CUDAExtension, i.e. + # CUDAExtension(..., extra_compile_args={'cxx': [...]}) + # or + # CUDAExtension(..., extra_compile_args={'nvcc': [...]}) + if isinstance(extension.extra_compile_args, dict): + for ext in ['cxx', 'nvcc']: + if ext not in extension.extra_compile_args: + extension.extra_compile_args[ext] = [] + + self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H') + # See note [Pybind11 ABI constants] + for name in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]: + val = getattr(torch._C, f"_PYBIND11_{name}") + if val is not None and not IS_WINDOWS: + self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"') + self._define_torch_extension_name(extension) + self._add_gnu_cpp_abi_flag(extension) + + if 'nvcc_dlink' in extension.extra_compile_args: + assert self.use_ninja, f"With dlink=True, ninja is required to build cuda extension {extension.name}." + + # Register .cu, .cuh, .hip, and .mm as valid source extensions. + self.compiler.src_extensions += ['.cu', '.cuh', '.hip'] + if torch.backends.mps.is_built(): + self.compiler.src_extensions += ['.mm'] + # Save the original _compile method for later. + if self.compiler.compiler_type == 'msvc': + self.compiler._cpp_extensions += ['.cu', '.cuh'] + original_compile = self.compiler.compile + original_spawn = self.compiler.spawn + else: + original_compile = self.compiler._compile + + def append_std17_if_no_std_present(cflags) -> None: + # NVCC does not allow multiple -std to be passed, so we avoid + # overriding the option if the user explicitly passed it. + cpp_format_prefix = '/{}:' if self.compiler.compiler_type == 'msvc' else '-{}=' + cpp_flag_prefix = cpp_format_prefix.format('std') + cpp_flag = cpp_flag_prefix + 'c++17' + if not any(flag.startswith(cpp_flag_prefix) for flag in cflags): + cflags.append(cpp_flag) + + def unix_cuda_flags(cflags): + cflags = (COMMON_NVCC_FLAGS + + ['--compiler-options', "'-fPIC'"] + + cflags + _get_cuda_arch_flags(cflags)) + + # NVCC does not allow multiple -ccbin/--compiler-bindir to be passed, so we avoid + # overriding the option if the user explicitly passed it. + _ccbin = os.getenv("CC") + if ( + _ccbin is not None + and not any(flag.startswith(('-ccbin', '--compiler-bindir')) for flag in cflags) + ): + cflags.extend(['-ccbin', _ccbin]) + + return cflags + + def convert_to_absolute_paths_inplace(paths): + # Helper function. See Note [Absolute include_dirs] + if paths is not None: + for i in range(len(paths)): + if not os.path.isabs(paths[i]): + paths[i] = os.path.abspath(paths[i]) + + def unix_wrap_single_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) -> None: + # Copy before we make any modifications. + cflags = copy.deepcopy(extra_postargs) + try: + original_compiler = self.compiler.compiler_so + if _is_cuda_file(src): + nvcc = [_join_rocm_home('bin', 'hipcc') if IS_HIP_EXTENSION else _join_cuda_home('bin', 'nvcc')] + self.compiler.set_executable('compiler_so', nvcc) + if isinstance(cflags, dict): + cflags = cflags['nvcc'] + if IS_HIP_EXTENSION: + cflags = COMMON_HIPCC_FLAGS + cflags + _get_rocm_arch_flags(cflags) + else: + cflags = unix_cuda_flags(cflags) + elif isinstance(cflags, dict): + cflags = cflags['cxx'] + if IS_HIP_EXTENSION: + cflags = COMMON_HIP_FLAGS + cflags + append_std17_if_no_std_present(cflags) + + original_compile(obj, src, ext, cc_args, cflags, pp_opts) + finally: + # Put the original compiler back in place. + self.compiler.set_executable('compiler_so', original_compiler) + + def unix_wrap_ninja_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + r"""Compiles sources by outputting a ninja file and running it.""" + # NB: I copied some lines from self.compiler (which is an instance + # of distutils.UnixCCompiler). See the following link. + # https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567 + # This can be fragile, but a lot of other repos also do this + # (see https://github.com/search?q=_setup_compile&type=Code) + # so it is probably OK; we'll also get CI signal if/when + # we update our python version (which is when distutils can be + # upgraded) + + # Use absolute path for output_dir so that the object file paths + # (`objects`) get generated with absolute paths. + output_dir = os.path.abspath(output_dir) + + # See Note [Absolute include_dirs] + convert_to_absolute_paths_inplace(self.compiler.include_dirs) + + _, objects, extra_postargs, pp_opts, _ = \ + self.compiler._setup_compile(output_dir, macros, + include_dirs, sources, + depends, extra_postargs) + common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs) + extra_cc_cflags = self.compiler.compiler_so[1:] + with_cuda = any(map(_is_cuda_file, sources)) + + # extra_postargs can be either: + # - a dict mapping cxx/nvcc to extra flags + # - a list of extra flags. + if isinstance(extra_postargs, dict): + post_cflags = extra_postargs['cxx'] + else: + post_cflags = list(extra_postargs) + if IS_HIP_EXTENSION: + post_cflags = COMMON_HIP_FLAGS + post_cflags + append_std17_if_no_std_present(post_cflags) + + cuda_post_cflags = None + cuda_cflags = None + if with_cuda: + cuda_cflags = common_cflags + if isinstance(extra_postargs, dict): + cuda_post_cflags = extra_postargs['nvcc'] + else: + cuda_post_cflags = list(extra_postargs) + if IS_HIP_EXTENSION: + cuda_post_cflags = cuda_post_cflags + _get_rocm_arch_flags(cuda_post_cflags) + cuda_post_cflags = COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_post_cflags + else: + cuda_post_cflags = unix_cuda_flags(cuda_post_cflags) + append_std17_if_no_std_present(cuda_post_cflags) + cuda_cflags = [shlex.quote(f) for f in cuda_cflags] + cuda_post_cflags = [shlex.quote(f) for f in cuda_post_cflags] + + if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs: + cuda_dlink_post_cflags = unix_cuda_flags(extra_postargs['nvcc_dlink']) + else: + cuda_dlink_post_cflags = None + _write_ninja_file_and_compile_objects( + sources=sources, + objects=objects, + cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags], + post_cflags=[shlex.quote(f) for f in post_cflags], + cuda_cflags=cuda_cflags, + cuda_post_cflags=cuda_post_cflags, + cuda_dlink_post_cflags=cuda_dlink_post_cflags, + build_directory=output_dir, + verbose=True, + with_cuda=with_cuda) + + # Return *all* object filenames, not just the ones we just built. + return objects + + def win_cuda_flags(cflags): + return (COMMON_NVCC_FLAGS + + cflags + _get_cuda_arch_flags(cflags)) + + def win_wrap_single_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + + self.cflags = copy.deepcopy(extra_postargs) + extra_postargs = None + + def spawn(cmd): + # Using regex to match src, obj and include files + src_regex = re.compile('/T(p|c)(.*)') + src_list = [ + m.group(2) for m in (src_regex.match(elem) for elem in cmd) + if m + ] + + obj_regex = re.compile('/Fo(.*)') + obj_list = [ + m.group(1) for m in (obj_regex.match(elem) for elem in cmd) + if m + ] + + include_regex = re.compile(r'((\-|\/)I.*)') + include_list = [ + m.group(1) + for m in (include_regex.match(elem) for elem in cmd) if m + ] + + if len(src_list) >= 1 and len(obj_list) >= 1: + src = src_list[0] + obj = obj_list[0] + if _is_cuda_file(src): + nvcc = _join_cuda_home('bin', 'nvcc') + if isinstance(self.cflags, dict): + cflags = self.cflags['nvcc'] + elif isinstance(self.cflags, list): + cflags = self.cflags + else: + cflags = [] + + cflags = win_cuda_flags(cflags) + ['-std=c++17', '--use-local-env'] + for flag in COMMON_MSVC_FLAGS: + cflags = ['-Xcompiler', flag] + cflags + for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: + cflags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cflags + cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags + elif isinstance(self.cflags, dict): + cflags = COMMON_MSVC_FLAGS + self.cflags['cxx'] + append_std17_if_no_std_present(cflags) + cmd += cflags + elif isinstance(self.cflags, list): + cflags = COMMON_MSVC_FLAGS + self.cflags + append_std17_if_no_std_present(cflags) + cmd += cflags + + return original_spawn(cmd) + + try: + self.compiler.spawn = spawn + return original_compile(sources, output_dir, macros, + include_dirs, debug, extra_preargs, + extra_postargs, depends) + finally: + self.compiler.spawn = original_spawn + + def win_wrap_ninja_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + + if not self.compiler.initialized: + self.compiler.initialize() + output_dir = os.path.abspath(output_dir) + + # Note [Absolute include_dirs] + # Convert relative path in self.compiler.include_dirs to absolute path if any, + # For ninja build, the build location is not local, the build happens + # in a in script created build folder, relative path lost their correctness. + # To be consistent with jit extension, we allow user to enter relative include_dirs + # in setuptools.setup, and we convert the relative path to absolute path here + convert_to_absolute_paths_inplace(self.compiler.include_dirs) + + _, objects, extra_postargs, pp_opts, _ = \ + self.compiler._setup_compile(output_dir, macros, + include_dirs, sources, + depends, extra_postargs) + common_cflags = extra_preargs or [] + cflags = [] + if debug: + cflags.extend(self.compiler.compile_options_debug) + else: + cflags.extend(self.compiler.compile_options) + common_cflags.extend(COMMON_MSVC_FLAGS) + cflags = cflags + common_cflags + pp_opts + with_cuda = any(map(_is_cuda_file, sources)) + + # extra_postargs can be either: + # - a dict mapping cxx/nvcc to extra flags + # - a list of extra flags. + if isinstance(extra_postargs, dict): + post_cflags = extra_postargs['cxx'] + else: + post_cflags = list(extra_postargs) + append_std17_if_no_std_present(post_cflags) + + cuda_post_cflags = None + cuda_cflags = None + if with_cuda: + cuda_cflags = ['-std=c++17', '--use-local-env'] + for common_cflag in common_cflags: + cuda_cflags.append('-Xcompiler') + cuda_cflags.append(common_cflag) + for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: + cuda_cflags.append('-Xcudafe') + cuda_cflags.append('--diag_suppress=' + ignore_warning) + cuda_cflags.extend(pp_opts) + if isinstance(extra_postargs, dict): + cuda_post_cflags = extra_postargs['nvcc'] + else: + cuda_post_cflags = list(extra_postargs) + cuda_post_cflags = win_cuda_flags(cuda_post_cflags) + + cflags = _nt_quote_args(cflags) + post_cflags = _nt_quote_args(post_cflags) + if with_cuda: + cuda_cflags = _nt_quote_args(cuda_cflags) + cuda_post_cflags = _nt_quote_args(cuda_post_cflags) + if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs: + cuda_dlink_post_cflags = win_cuda_flags(extra_postargs['nvcc_dlink']) + else: + cuda_dlink_post_cflags = None + + _write_ninja_file_and_compile_objects( + sources=sources, + objects=objects, + cflags=cflags, + post_cflags=post_cflags, + cuda_cflags=cuda_cflags, + cuda_post_cflags=cuda_post_cflags, + cuda_dlink_post_cflags=cuda_dlink_post_cflags, + build_directory=output_dir, + verbose=True, + with_cuda=with_cuda) + + # Return *all* object filenames, not just the ones we just built. + return objects + + # Monkey-patch the _compile or compile method. + # https://github.com/python/cpython/blob/dc0284ee8f7a270b6005467f26d8e5773d76e959/Lib/distutils/ccompiler.py#L511 + if self.compiler.compiler_type == 'msvc': + if self.use_ninja: + self.compiler.compile = win_wrap_ninja_compile + else: + self.compiler.compile = win_wrap_single_compile + else: + if self.use_ninja: + self.compiler.compile = unix_wrap_ninja_compile + else: + self.compiler._compile = unix_wrap_single_compile + + build_ext.build_extensions(self) + + def get_ext_filename(self, ext_name): + # Get the original shared library name. For Python 3, this name will be + # suffixed with ".so", where will be something like + # cpython-37m-x86_64-linux-gnu. + ext_filename = super().get_ext_filename(ext_name) + # If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI + # component. This makes building shared libraries with setuptools that + # aren't Python modules nicer. + if self.no_python_abi_suffix: + # The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"]. + ext_filename_parts = ext_filename.split('.') + # Omit the second to last element. + without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:] + ext_filename = '.'.join(without_abi) + return ext_filename + + def _check_abi(self) -> Tuple[str, TorchVersion]: + # On some platforms, like Windows, compiler_cxx is not available. + if hasattr(self.compiler, 'compiler_cxx'): + compiler = self.compiler.compiler_cxx[0] + else: + compiler = get_cxx_compiler() + _, version = get_compiler_abi_compatibility_and_version(compiler) + # Warn user if VC env is activated but `DISTUILS_USE_SDK` is not set. + if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ: + msg = ('It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.' + 'This may lead to multiple activations of the VC env.' + 'Please set `DISTUTILS_USE_SDK=1` and try again.') + raise UserWarning(msg) + return compiler, version + + def _add_compile_flag(self, extension, flag): + extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args) + if isinstance(extension.extra_compile_args, dict): + for args in extension.extra_compile_args.values(): + args.append(flag) + else: + extension.extra_compile_args.append(flag) + + def _define_torch_extension_name(self, extension): + # pybind11 doesn't support dots in the names + # so in order to support extensions in the packages + # like torch._C, we take the last part of the string + # as the library name + names = extension.name.split('.') + name = names[-1] + define = f'-DTORCH_EXTENSION_NAME={name}' + self._add_compile_flag(extension, define) + + def _add_gnu_cpp_abi_flag(self, extension): + # use the same CXX ABI as what PyTorch was compiled with + self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))) + + +def CppExtension(name, sources, *args, **kwargs): + """ + Create a :class:`setuptools.Extension` for C++. + + Convenience method that creates a :class:`setuptools.Extension` with the + bare minimum (but often sufficient) arguments to build a C++ extension. + + All arguments are forwarded to the :class:`setuptools.Extension` + constructor. Full list arguments can be found at + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> from setuptools import setup + >>> from torch.utils.cpp_extension import BuildExtension, CppExtension + >>> setup( + ... name='extension', + ... ext_modules=[ + ... CppExtension( + ... name='extension', + ... sources=['extension.cpp'], + ... extra_compile_args=['-g'], + ... extra_link_flags=['-Wl,--no-as-needed', '-lm']) + ... ], + ... cmdclass={ + ... 'build_ext': BuildExtension + ... }) + """ + include_dirs = kwargs.get('include_dirs', []) + include_dirs += include_paths() + kwargs['include_dirs'] = include_dirs + + library_dirs = kwargs.get('library_dirs', []) + library_dirs += library_paths() + kwargs['library_dirs'] = library_dirs + + libraries = kwargs.get('libraries', []) + libraries.append('c10') + libraries.append('torch') + libraries.append('torch_cpu') + libraries.append('torch_python') + kwargs['libraries'] = libraries + + kwargs['language'] = 'c++' + return setuptools.Extension(name, sources, *args, **kwargs) + + +def CUDAExtension(name, sources, *args, **kwargs): + """ + Create a :class:`setuptools.Extension` for CUDA/C++. + + Convenience method that creates a :class:`setuptools.Extension` with the + bare minimum (but often sufficient) arguments to build a CUDA/C++ + extension. This includes the CUDA include path, library path and runtime + library. + + All arguments are forwarded to the :class:`setuptools.Extension` + constructor. Full list arguments can be found at + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> from setuptools import setup + >>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension + >>> setup( + ... name='cuda_extension', + ... ext_modules=[ + ... CUDAExtension( + ... name='cuda_extension', + ... sources=['extension.cpp', 'extension_kernel.cu'], + ... extra_compile_args={'cxx': ['-g'], + ... 'nvcc': ['-O2']}, + ... extra_link_flags=['-Wl,--no-as-needed', '-lcuda']) + ... ], + ... cmdclass={ + ... 'build_ext': BuildExtension + ... }) + + Compute capabilities: + + By default the extension will be compiled to run on all archs of the cards visible during the + building process of the extension, plus PTX. If down the road a new card is installed the + extension may need to be recompiled. If a visible card has a compute capability (CC) that's + newer than the newest version for which your nvcc can build fully-compiled binaries, Pytorch + will make nvcc fall back to building kernels with the newest version of PTX your nvcc does + support (see below for details on PTX). + + You can override the default behavior using `TORCH_CUDA_ARCH_LIST` to explicitly specify which + CCs you want the extension to support: + + ``TORCH_CUDA_ARCH_LIST="6.1 8.6" python build_my_extension.py`` + ``TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python build_my_extension.py`` + + The +PTX option causes extension kernel binaries to include PTX instructions for the specified + CC. PTX is an intermediate representation that allows kernels to runtime-compile for any CC >= + the specified CC (for example, 8.6+PTX generates PTX that can runtime-compile for any GPU with + CC >= 8.6). This improves your binary's forward compatibility. However, relying on older PTX to + provide forward compat by runtime-compiling for newer CCs can modestly reduce performance on + those newer CCs. If you know exact CC(s) of the GPUs you want to target, you're always better + off specifying them individually. For example, if you want your extension to run on 8.0 and 8.6, + "8.0+PTX" would work functionally because it includes PTX that can runtime-compile for 8.6, but + "8.0 8.6" would be better. + + Note that while it's possible to include all supported archs, the more archs get included the + slower the building process will be, as it will build a separate kernel image for each arch. + + Note that CUDA-11.5 nvcc will hit internal compiler error while parsing torch/extension.h on Windows. + To workaround the issue, move python binding logic to pure C++ file. + + Example use: + #include + at::Tensor SigmoidAlphaBlendForwardCuda(....) + + Instead of: + #include + torch::Tensor SigmoidAlphaBlendForwardCuda(...) + + Currently open issue for nvcc bug: https://github.com/pytorch/pytorch/issues/69460 + Complete workaround code example: https://github.com/facebookresearch/pytorch3d/commit/cb170ac024a949f1f9614ffe6af1c38d972f7d48 + + Relocatable device code linking: + + If you want to reference device symbols across compilation units (across object files), + the object files need to be built with `relocatable device code` (-rdc=true or -dc). + An exception to this rule is "dynamic parallelism" (nested kernel launches) which is not used a lot anymore. + `Relocatable device code` is less optimized so it needs to be used only on object files that need it. + Using `-dlto` (Device Link Time Optimization) at the device code compilation step and `dlink` step + help reduce the protentional perf degradation of `-rdc`. + Note that it needs to be used at both steps to be useful. + + If you have `rdc` objects you need to have an extra `-dlink` (device linking) step before the CPU symbol linking step. + There is also a case where `-dlink` is used without `-rdc`: + when an extension is linked against a static lib containing rdc-compiled objects + like the [NVSHMEM library](https://developer.nvidia.com/nvshmem). + + Note: Ninja is required to build a CUDA Extension with RDC linking. + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> CUDAExtension( + ... name='cuda_extension', + ... sources=['extension.cpp', 'extension_kernel.cu'], + ... dlink=True, + ... dlink_libraries=["dlink_lib"], + ... extra_compile_args={'cxx': ['-g'], + ... 'nvcc': ['-O2', '-rdc=true']}) + """ + library_dirs = kwargs.get('library_dirs', []) + library_dirs += library_paths(cuda=True) + kwargs['library_dirs'] = library_dirs + + libraries = kwargs.get('libraries', []) + libraries.append('c10') + libraries.append('torch') + libraries.append('torch_cpu') + libraries.append('torch_python') + if IS_HIP_EXTENSION: + assert ROCM_VERSION is not None + libraries.append('amdhip64' if ROCM_VERSION >= (3, 5) else 'hip_hcc') + libraries.append('c10_hip') + libraries.append('torch_hip') + else: + libraries.append('cudart') + libraries.append('c10_cuda') + libraries.append('torch_cuda') + kwargs['libraries'] = libraries + + include_dirs = kwargs.get('include_dirs', []) + + if IS_HIP_EXTENSION: + build_dir = os.getcwd() + hipify_result = hipify_python.hipify( + project_directory=build_dir, + output_directory=build_dir, + header_include_dirs=include_dirs, + includes=[os.path.join(build_dir, '*')], # limit scope to build_dir only + extra_files=[os.path.abspath(s) for s in sources], + show_detailed=True, + is_pytorch_extension=True, + hipify_extra_files_only=True, # don't hipify everything in includes path + ) + + hipified_sources = set() + for source in sources: + s_abs = os.path.abspath(source) + hipified_s_abs = (hipify_result[s_abs].hipified_path if (s_abs in hipify_result and + hipify_result[s_abs].hipified_path is not None) else s_abs) + # setup() arguments must *always* be /-separated paths relative to the setup.py directory, + # *never* absolute paths + hipified_sources.add(os.path.relpath(hipified_s_abs, build_dir)) + + sources = list(hipified_sources) + + include_dirs += include_paths(cuda=True) + kwargs['include_dirs'] = include_dirs + + kwargs['language'] = 'c++' + + dlink_libraries = kwargs.get('dlink_libraries', []) + dlink = kwargs.get('dlink', False) or dlink_libraries + if dlink: + extra_compile_args = kwargs.get('extra_compile_args', {}) + + extra_compile_args_dlink = extra_compile_args.get('nvcc_dlink', []) + extra_compile_args_dlink += ['-dlink'] + extra_compile_args_dlink += [f'-L{x}' for x in library_dirs] + extra_compile_args_dlink += [f'-l{x}' for x in dlink_libraries] + + if (torch.version.cuda is not None) and TorchVersion(torch.version.cuda) >= '11.2': + extra_compile_args_dlink += ['-dlto'] # Device Link Time Optimization started from cuda 11.2 + + extra_compile_args['nvcc_dlink'] = extra_compile_args_dlink + + kwargs['extra_compile_args'] = extra_compile_args + + return setuptools.Extension(name, sources, *args, **kwargs) + + +def include_paths(cuda: bool = False) -> List[str]: + """ + Get the include paths required to build a C++ or CUDA extension. + + Args: + cuda: If `True`, includes CUDA-specific include paths. + + Returns: + A list of include path strings. + """ + lib_include = os.path.join(_TORCH_PATH, 'include') + paths = [ + lib_include, + # Remove this once torch/torch.h is officially no longer supported for C++ extensions. + os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'), + # Some internal (old) Torch headers don't properly prefix their includes, + # so we need to pass -Itorch/lib/include/TH as well. + os.path.join(lib_include, 'TH'), + os.path.join(lib_include, 'THC') + ] + if cuda and IS_HIP_EXTENSION: + paths.append(os.path.join(lib_include, 'THH')) + paths.append(_join_rocm_home('include')) + elif cuda: + cuda_home_include = _join_cuda_home('include') + # if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home. + # but gcc doesn't like having /usr/include passed explicitly + if cuda_home_include != '/usr/include': + paths.append(cuda_home_include) + if CUDNN_HOME is not None: + paths.append(os.path.join(CUDNN_HOME, 'include')) + return paths + + +def library_paths(cuda: bool = False) -> List[str]: + """ + Get the library paths required to build a C++ or CUDA extension. + + Args: + cuda: If `True`, includes CUDA-specific library paths. + + Returns: + A list of library path strings. + """ + # We need to link against libtorch.so + paths = [TORCH_LIB_PATH] + + if cuda and IS_HIP_EXTENSION: + lib_dir = 'lib' + paths.append(_join_rocm_home(lib_dir)) + if HIP_HOME is not None: + paths.append(os.path.join(HIP_HOME, 'lib')) + elif cuda: + if IS_WINDOWS: + lib_dir = os.path.join('lib', 'x64') + else: + lib_dir = 'lib64' + if (not os.path.exists(_join_cuda_home(lib_dir)) and + os.path.exists(_join_cuda_home('lib'))): + # 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955) + # Note that it's also possible both don't exist (see + # _find_cuda_home) - in that case we stay with 'lib64'. + lib_dir = 'lib' + + paths.append(_join_cuda_home(lib_dir)) + if CUDNN_HOME is not None: + paths.append(os.path.join(CUDNN_HOME, lib_dir)) + return paths + + +def load(name, + sources: Union[str, List[str]], + extra_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, + with_cuda: Optional[bool] = None, + is_python_module=True, + is_standalone=False, + keep_intermediates=True): + """ + Load a PyTorch C++ extension just-in-time (JIT). + + To load an extension, a Ninja build file is emitted, which is used to + compile the given sources into a dynamic library. This library is + subsequently loaded into the current Python process as a module and + returned from this function, ready for use. + + By default, the directory to which the build file is emitted and the + resulting library compiled to is ``/torch_extensions/``, where + ```` is the temporary folder on the current platform and ```` + the name of the extension. This location can be overridden in two ways. + First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it + replaces ``/torch_extensions`` and all extensions will be compiled + into subfolders of this directory. Second, if the ``build_directory`` + argument to this function is supplied, it overrides the entire path, i.e. + the library will be compiled into that folder directly. + + To compile the sources, the default system compiler (``c++``) is used, + which can be overridden by setting the ``CXX`` environment variable. To pass + additional arguments to the compilation process, ``extra_cflags`` or + ``extra_ldflags`` can be provided. For example, to compile your extension + with optimizations, pass ``extra_cflags=['-O3']``. You can also use + ``extra_cflags`` to pass further include directories. + + CUDA support with mixed compilation is provided. Simply pass CUDA source + files (``.cu`` or ``.cuh``) along with other sources. Such files will be + detected and compiled with nvcc rather than the C++ compiler. This includes + passing the CUDA lib64 directory as a library directory, and linking + ``cudart``. You can pass additional flags to nvcc via + ``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various + heuristics for finding the CUDA install directory are used, which usually + work fine. If not, setting the ``CUDA_HOME`` environment variable is the + safest option. + + Args: + name: The name of the extension to build. This MUST be the same as the + name of the pybind11 module! + sources: A list of relative or absolute paths to C++ source files. + extra_cflags: optional list of compiler flags to forward to the build. + extra_cuda_cflags: optional list of compiler flags to forward to nvcc + when building CUDA sources. + extra_ldflags: optional list of linker flags to forward to the build. + extra_include_paths: optional list of include directories to forward + to the build. + build_directory: optional path to use as build workspace. + verbose: If ``True``, turns on verbose logging of load steps. + with_cuda: Determines whether CUDA headers and libraries are added to + the build. If set to ``None`` (default), this value is + automatically determined based on the existence of ``.cu`` or + ``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers + and libraries to be included. + is_python_module: If ``True`` (default), imports the produced shared + library as a Python module. If ``False``, behavior depends on + ``is_standalone``. + is_standalone: If ``False`` (default) loads the constructed extension + into the process as a plain dynamic library. If ``True``, build a + standalone executable. + + Returns: + If ``is_python_module`` is ``True``: + Returns the loaded PyTorch extension as a Python module. + + If ``is_python_module`` is ``False`` and ``is_standalone`` is ``False``: + Returns nothing. (The shared library is loaded into the process as + a side effect.) + + If ``is_standalone`` is ``True``. + Return the path to the executable. (On Windows, TORCH_LIB_PATH is + added to the PATH environment variable as a side effect.) + + Example: + >>> # xdoctest: +SKIP + >>> from torch.utils.cpp_extension import load + >>> module = load( + ... name='extension', + ... sources=['extension.cpp', 'extension_kernel.cu'], + ... extra_cflags=['-O2'], + ... verbose=True) + """ + return _jit_compile( + name, + [sources] if isinstance(sources, str) else sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory or _get_build_directory(name, verbose), + verbose, + with_cuda, + is_python_module, + is_standalone, + keep_intermediates=keep_intermediates) + +def _get_pybind11_abi_build_flags(): + # Note [Pybind11 ABI constants] + # + # Pybind11 before 2.4 used to build an ABI strings using the following pattern: + # f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_BUILD_TYPE}__" + # Since 2.4 compier type, stdlib and build abi parameters are also encoded like this: + # f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_COMPILER_TYPE}{PYBIND11_STDLIB}{PYBIND11_BUILD_ABI}{PYBIND11_BUILD_TYPE}__" + # + # This was done in order to further narrow down the chances of compiler ABI incompatibility + # that can cause a hard to debug segfaults. + # For PyTorch extensions we want to relax those restrictions and pass compiler, stdlib and abi properties + # captured during PyTorch native library compilation in torch/csrc/Module.cpp + + abi_cflags = [] + for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]: + pval = getattr(torch._C, f"_PYBIND11_{pname}") + if pval is not None and not IS_WINDOWS: + abi_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"') + return abi_cflags + +def _get_glibcxx_abi_build_flags(): + glibcxx_abi_cflags = ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))] + return glibcxx_abi_cflags + +def check_compiler_is_gcc(compiler): + if not IS_LINUX: + return False + + env = os.environ.copy() + env['LC_ALL'] = 'C' # Don't localize output + try: + version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS) + except Exception as e: + try: + version_string = subprocess.check_output([compiler, '--version'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS) + except Exception as e: + return False + # Check for 'gcc' or 'g++' for sccache wrapper + pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE) + results = re.findall(pattern, version_string) + if len(results) != 1: + return False + compiler_path = os.path.realpath(results[0].strip()) + # On RHEL/CentOS c++ is a gcc compiler wrapper + if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string: + return True + return False + +def _check_and_build_extension_h_precompiler_headers( + extra_cflags, + extra_include_paths, + is_standalone=False): + r''' + Precompiled Headers(PCH) can pre-build the same headers and reduce build time for pytorch load_inline modules. + GCC offical manual: https://gcc.gnu.org/onlinedocs/gcc-4.0.4/gcc/Precompiled-Headers.html + PCH only works when built pch file(header.h.gch) and build target have the same build parameters. So, We need + add a signature file to record PCH file parameters. If the build parameters(signature) changed, it should rebuild + PCH file. + + Note: + 1. Windows and MacOS have different PCH mechanism. We only support Linux currently. + 2. It only works on GCC/G++. + ''' + if not IS_LINUX: + return + + compiler = get_cxx_compiler() + + b_is_gcc = check_compiler_is_gcc(compiler) + if b_is_gcc is False: + return + + head_file = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h') + head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch') + head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign') + + def listToString(s): + # initialize an empty string + string = "" + if s is None: + return string + + # traverse in the string + for element in s: + string += (element + ' ') + # return string + return string + + def format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags, torch_include_dirs, extra_cflags, extra_include_paths): + return re.sub( + r"[ \n]+", + " ", + f""" + {compiler} -x c++-header {head_file} -o {head_file_pch} {torch_include_dirs} {extra_include_paths} {extra_cflags} {common_cflags} + """, + ).strip() + + def command_to_signature(cmd): + signature = cmd.replace(' ', '_') + return signature + + def check_pch_signature_in_file(file_path, signature): + b_exist = os.path.isfile(file_path) + if b_exist is False: + return False + + with open(file_path) as file: + # read all content of a file + content = file.read() + # check if string present in a file + if signature == content: + return True + else: + return False + + def _create_if_not_exist(path_dir): + if not os.path.exists(path_dir): + try: + Path(path_dir).mkdir(parents=True, exist_ok=True) + except OSError as exc: # Guard against race condition + if exc.errno != errno.EEXIST: + raise RuntimeError(f"Fail to create path {path_dir}") from exc + + def write_pch_signature_to_file(file_path, pch_sign): + _create_if_not_exist(os.path.dirname(file_path)) + with open(file_path, "w") as f: + f.write(pch_sign) + f.close() + + def build_precompile_header(pch_cmd): + try: + subprocess.check_output(pch_cmd, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise RuntimeError(f"Compile PreCompile Header fail, command: {pch_cmd}") from e + + extra_cflags_str = listToString(extra_cflags) + extra_include_paths_str = " ".join( + [f"-I{include}" for include in extra_include_paths] if extra_include_paths else [] + ) + + lib_include = os.path.join(_TORCH_PATH, 'include') + torch_include_dirs = [ + f"-I {lib_include}", + # Python.h + "-I {}".format(sysconfig.get_path("include")), + # torch/all.h + "-I {}".format(os.path.join(lib_include, 'torch', 'csrc', 'api', 'include')), + ] + + torch_include_dirs_str = listToString(torch_include_dirs) + + common_cflags = [] + if not is_standalone: + common_cflags += ['-DTORCH_API_INCLUDE_EXTENSION_H'] + + common_cflags += ['-std=c++17', '-fPIC'] + common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()] + common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()] + common_cflags_str = listToString(common_cflags) + + pch_cmd = format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags_str, torch_include_dirs_str, extra_cflags_str, extra_include_paths_str) + pch_sign = command_to_signature(pch_cmd) + + if os.path.isfile(head_file_pch) is not True: + build_precompile_header(pch_cmd) + write_pch_signature_to_file(head_file_signature, pch_sign) + else: + b_same_sign = check_pch_signature_in_file(head_file_signature, pch_sign) + if b_same_sign is False: + build_precompile_header(pch_cmd) + write_pch_signature_to_file(head_file_signature, pch_sign) + +def remove_extension_h_precompiler_headers(): + def _remove_if_file_exists(path_file): + if os.path.exists(path_file): + os.remove(path_file) + + head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch') + head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign') + + _remove_if_file_exists(head_file_pch) + _remove_if_file_exists(head_file_signature) + +def load_inline(name, + cpp_sources, + cuda_sources=None, + functions=None, + extra_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, + with_cuda=None, + is_python_module=True, + with_pytorch_error_handling=True, + keep_intermediates=True, + use_pch=False): + r''' + Load a PyTorch C++ extension just-in-time (JIT) from string sources. + + This function behaves exactly like :func:`load`, but takes its sources as + strings rather than filenames. These strings are stored to files in the + build directory, after which the behavior of :func:`load_inline` is + identical to :func:`load`. + + See `the + tests `_ + for good examples of using this function. + + Sources may omit two required parts of a typical non-inline C++ extension: + the necessary header includes, as well as the (pybind11) binding code. More + precisely, strings passed to ``cpp_sources`` are first concatenated into a + single ``.cpp`` file. This file is then prepended with ``#include + ``. + + Furthermore, if the ``functions`` argument is supplied, bindings will be + automatically generated for each function specified. ``functions`` can + either be a list of function names, or a dictionary mapping from function + names to docstrings. If a list is given, the name of each function is used + as its docstring. + + The sources in ``cuda_sources`` are concatenated into a separate ``.cu`` + file and prepended with ``torch/types.h``, ``cuda.h`` and + ``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled + separately, but ultimately linked into a single library. Note that no + bindings are generated for functions in ``cuda_sources`` per se. To bind + to a CUDA kernel, you must create a C++ function that calls it, and either + declare or define this C++ function in one of the ``cpp_sources`` (and + include its name in ``functions``). + + See :func:`load` for a description of arguments omitted below. + + Args: + cpp_sources: A string, or list of strings, containing C++ source code. + cuda_sources: A string, or list of strings, containing CUDA source code. + functions: A list of function names for which to generate function + bindings. If a dictionary is given, it should map function names to + docstrings (which are otherwise just the function names). + with_cuda: Determines whether CUDA headers and libraries are added to + the build. If set to ``None`` (default), this value is + automatically determined based on whether ``cuda_sources`` is + provided. Set it to ``True`` to force CUDA headers + and libraries to be included. + with_pytorch_error_handling: Determines whether pytorch error and + warning macros are handled by pytorch instead of pybind. To do + this, each function ``foo`` is called via an intermediary ``_safe_foo`` + function. This redirection might cause issues in obscure cases + of cpp. This flag should be set to ``False`` when this redirect + causes issues. + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> from torch.utils.cpp_extension import load_inline + >>> source = """ + at::Tensor sin_add(at::Tensor x, at::Tensor y) { + return x.sin() + y.sin(); + } + """ + >>> module = load_inline(name='inline_extension', + ... cpp_sources=[source], + ... functions=['sin_add']) + + .. note:: + By default, the Ninja backend uses #CPUS + 2 workers to build the + extension. This may use up too many resources on some systems. One + can control the number of workers by setting the `MAX_JOBS` environment + variable to a non-negative number. + ''' + build_directory = build_directory or _get_build_directory(name, verbose) + + if isinstance(cpp_sources, str): + cpp_sources = [cpp_sources] + cuda_sources = cuda_sources or [] + if isinstance(cuda_sources, str): + cuda_sources = [cuda_sources] + + cpp_sources.insert(0, '#include ') + + if use_pch is True: + # Using PreCompile Header('torch/extension.h') to reduce compile time. + _check_and_build_extension_h_precompiler_headers(extra_cflags, extra_include_paths) + else: + remove_extension_h_precompiler_headers() + + # If `functions` is supplied, we create the pybind11 bindings for the user. + # Here, `functions` is (or becomes, after some processing) a map from + # function names to function docstrings. + if functions is not None: + module_def = [] + module_def.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {') + if isinstance(functions, str): + functions = [functions] + if isinstance(functions, list): + # Make the function docstring the same as the function name. + functions = {f: f for f in functions} + elif not isinstance(functions, dict): + raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}") + for function_name, docstring in functions.items(): + if with_pytorch_error_handling: + module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");') + else: + module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");') + module_def.append('}') + cpp_sources += module_def + + cpp_source_path = os.path.join(build_directory, 'main.cpp') + _maybe_write(cpp_source_path, "\n".join(cpp_sources)) + + sources = [cpp_source_path] + + if cuda_sources: + cuda_sources.insert(0, '#include ') + cuda_sources.insert(1, '#include ') + cuda_sources.insert(2, '#include ') + + cuda_source_path = os.path.join(build_directory, 'cuda.cu') + _maybe_write(cuda_source_path, "\n".join(cuda_sources)) + + sources.append(cuda_source_path) + + return _jit_compile( + name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory, + verbose, + with_cuda, + is_python_module, + is_standalone=False, + keep_intermediates=keep_intermediates) + + +def _jit_compile(name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory: str, + verbose: bool, + with_cuda: Optional[bool], + is_python_module, + is_standalone, + keep_intermediates=True) -> None: + if is_python_module and is_standalone: + raise ValueError("`is_python_module` and `is_standalone` are mutually exclusive.") + + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + with_cudnn = any('cudnn' in f for f in extra_ldflags or []) + old_version = JIT_EXTENSION_VERSIONER.get_version(name) + version = JIT_EXTENSION_VERSIONER.bump_version_if_changed( + name, + sources, + build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths], + build_directory=build_directory, + with_cuda=with_cuda, + is_python_module=is_python_module, + is_standalone=is_standalone, + ) + if version > 0: + if version != old_version and verbose: + print(f'The input conditions for extension module {name} have changed. ' + + f'Bumping to version {version} and re-building as {name}_v{version}...', + file=sys.stderr) + name = f'{name}_v{version}' + + if version != old_version: + baton = FileBaton(os.path.join(build_directory, 'lock')) + if baton.try_acquire(): + try: + with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx: + if IS_HIP_EXTENSION and (with_cuda or with_cudnn): + hipify_result = hipify_python.hipify( + project_directory=build_directory, + output_directory=build_directory, + header_include_dirs=(extra_include_paths if extra_include_paths is not None else []), + extra_files=[os.path.abspath(s) for s in sources], + ignores=[_join_rocm_home('*'), os.path.join(_TORCH_PATH, '*')], # no need to hipify ROCm or PyTorch headers + show_detailed=verbose, + show_progress=verbose, + is_pytorch_extension=True, + clean_ctx=clean_ctx + ) + + hipified_sources = set() + for source in sources: + s_abs = os.path.abspath(source) + hipified_sources.add(hipify_result[s_abs].hipified_path if s_abs in hipify_result else s_abs) + + sources = list(hipified_sources) + + _write_ninja_file_and_build_library( + name=name, + sources=sources, + extra_cflags=extra_cflags or [], + extra_cuda_cflags=extra_cuda_cflags or [], + extra_ldflags=extra_ldflags or [], + extra_include_paths=extra_include_paths or [], + build_directory=build_directory, + verbose=verbose, + with_cuda=with_cuda, + is_standalone=is_standalone) + finally: + baton.release() + else: + baton.wait() + elif verbose: + print('No modifications detected for re-loaded extension ' + f'module {name}, skipping build step...', + file=sys.stderr) + + if verbose: + print(f'Loading extension module {name}...', file=sys.stderr) + + if is_standalone: + return _get_exec_path(name, build_directory) + + return _import_module_from_library(name, build_directory, is_python_module) + + +def _write_ninja_file_and_compile_objects( + sources: List[str], + objects, + cflags, + post_cflags, + cuda_cflags, + cuda_post_cflags, + cuda_dlink_post_cflags, + build_directory: str, + verbose: bool, + with_cuda: Optional[bool]) -> None: + verify_ninja_availability() + + compiler = get_cxx_compiler() + + get_compiler_abi_compatibility_and_version(compiler) + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + build_file_path = os.path.join(build_directory, 'build.ninja') + if verbose: + print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr) + _write_ninja_file( + path=build_file_path, + cflags=cflags, + post_cflags=post_cflags, + cuda_cflags=cuda_cflags, + cuda_post_cflags=cuda_post_cflags, + cuda_dlink_post_cflags=cuda_dlink_post_cflags, + sources=sources, + objects=objects, + ldflags=None, + library_target=None, + with_cuda=with_cuda) + if verbose: + print('Compiling objects...', file=sys.stderr) + _run_ninja_build( + build_directory, + verbose, + # It would be better if we could tell users the name of the extension + # that failed to build but there isn't a good way to get it here. + error_prefix='Error compiling objects for extension') + + +def _write_ninja_file_and_build_library( + name, + sources: List[str], + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory: str, + verbose: bool, + with_cuda: Optional[bool], + is_standalone: bool = False) -> None: + verify_ninja_availability() + + compiler = get_cxx_compiler() + + get_compiler_abi_compatibility_and_version(compiler) + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + extra_ldflags = _prepare_ldflags( + extra_ldflags or [], + with_cuda, + verbose, + is_standalone) + build_file_path = os.path.join(build_directory, 'build.ninja') + if verbose: + print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr) + # NOTE: Emitting a new ninja build file does not cause re-compilation if + # the sources did not change, so it's ok to re-emit (and it's fast). + _write_ninja_file_to_build_library( + path=build_file_path, + name=name, + sources=sources, + extra_cflags=extra_cflags or [], + extra_cuda_cflags=extra_cuda_cflags or [], + extra_ldflags=extra_ldflags or [], + extra_include_paths=extra_include_paths or [], + with_cuda=with_cuda, + is_standalone=is_standalone) + + if verbose: + print(f'Building extension module {name}...', file=sys.stderr) + _run_ninja_build( + build_directory, + verbose, + error_prefix=f"Error building extension '{name}'") + + +def is_ninja_available(): + """Return ``True`` if the `ninja `_ build system is available on the system, ``False`` otherwise.""" + try: + subprocess.check_output('ninja --version'.split()) + except Exception: + return False + else: + return True + + +def verify_ninja_availability(): + """Raise ``RuntimeError`` if `ninja `_ build system is not available on the system, does nothing otherwise.""" + if not is_ninja_available(): + raise RuntimeError("Ninja is required to load C++ extensions") + + +def _prepare_ldflags(extra_ldflags, with_cuda, verbose, is_standalone): + if IS_WINDOWS: + python_lib_path = os.path.join(sys.base_exec_prefix, 'libs') + + extra_ldflags.append('c10.lib') + if with_cuda: + extra_ldflags.append('c10_cuda.lib') + extra_ldflags.append('torch_cpu.lib') + if with_cuda: + extra_ldflags.append('torch_cuda.lib') + # /INCLUDE is used to ensure torch_cuda is linked against in a project that relies on it. + # Related issue: https://github.com/pytorch/pytorch/issues/31611 + extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ') + extra_ldflags.append('torch.lib') + extra_ldflags.append(f'/LIBPATH:{TORCH_LIB_PATH}') + if not is_standalone: + extra_ldflags.append('torch_python.lib') + extra_ldflags.append(f'/LIBPATH:{python_lib_path}') + + else: + extra_ldflags.append(f'-L{TORCH_LIB_PATH}') + extra_ldflags.append('-lc10') + if with_cuda: + extra_ldflags.append('-lc10_hip' if IS_HIP_EXTENSION else '-lc10_cuda') + extra_ldflags.append('-ltorch_cpu') + if with_cuda: + extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda') + extra_ldflags.append('-ltorch') + if not is_standalone: + extra_ldflags.append('-ltorch_python') + + if is_standalone and "TBB" in torch.__config__.parallel_info(): + extra_ldflags.append('-ltbb') + + if is_standalone: + extra_ldflags.append(f"-Wl,-rpath,{TORCH_LIB_PATH}") + + if with_cuda: + if verbose: + print('Detected CUDA files, patching ldflags', file=sys.stderr) + if IS_WINDOWS: + extra_ldflags.append(f'/LIBPATH:{_join_cuda_home("lib", "x64")}') + extra_ldflags.append('cudart.lib') + if CUDNN_HOME is not None: + extra_ldflags.append(f'/LIBPATH:{os.path.join(CUDNN_HOME, "lib", "x64")}') + elif not IS_HIP_EXTENSION: + extra_lib_dir = "lib64" + if (not os.path.exists(_join_cuda_home(extra_lib_dir)) and + os.path.exists(_join_cuda_home("lib"))): + # 64-bit CUDA may be installed in "lib" + # Note that it's also possible both don't exist (see _find_cuda_home) - in that case we stay with "lib64" + extra_lib_dir = "lib" + extra_ldflags.append(f'-L{_join_cuda_home(extra_lib_dir)}') + extra_ldflags.append('-lcudart') + if CUDNN_HOME is not None: + extra_ldflags.append(f'-L{os.path.join(CUDNN_HOME, "lib64")}') + elif IS_HIP_EXTENSION: + assert ROCM_VERSION is not None + extra_ldflags.append(f'-L{_join_rocm_home("lib")}') + extra_ldflags.append('-lamdhip64' if ROCM_VERSION >= (3, 5) else '-lhip_hcc') + return extra_ldflags + + +def _get_cuda_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: + """ + Determine CUDA arch flags to use. + + For an arch, say "6.1", the added compile flag will be + ``-gencode=arch=compute_61,code=sm_61``. + For an added "+PTX", an additional + ``-gencode=arch=compute_xx,code=compute_xx`` is added. + + See select_compute_arch.cmake for corresponding named and supported arches + when building with CMake. + """ + # If cflags is given, there may already be user-provided arch flags in it + # (from `extra_compile_args`) + if cflags is not None: + for flag in cflags: + if 'TORCH_EXTENSION_NAME' in flag: + continue + if 'arch' in flag: + return [] + + # Note: keep combined names ("arch1+arch2") above single names, otherwise + # string replacement may not do the right thing + named_arches = collections.OrderedDict([ + ('Kepler+Tesla', '3.7'), + ('Kepler', '3.5+PTX'), + ('Maxwell+Tegra', '5.3'), + ('Maxwell', '5.0;5.2+PTX'), + ('Pascal', '6.0;6.1+PTX'), + ('Volta+Tegra', '7.2'), + ('Volta', '7.0+PTX'), + ('Turing', '7.5+PTX'), + ('Ampere+Tegra', '8.7'), + ('Ampere', '8.0;8.6+PTX'), + ('Ada', '8.9+PTX'), + ('Hopper', '9.0+PTX'), + ]) + + supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2', + '7.0', '7.2', '7.5', '8.0', '8.6', '8.7', '8.9', '9.0', '9.0a'] + valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches] + + # The default is sm_30 for CUDA 9.x and 10.x + # First check for an env var (same as used by the main setup.py) + # Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX" + # See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake + _arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None) + + # If not given, determine what's best for the GPU / CUDA version that can be found + if not _arch_list: + warnings.warn( + "TORCH_CUDA_ARCH_LIST is not set, all archs for visible cards are included for compilation. \n" + "If this is not desired, please set os.environ['TORCH_CUDA_ARCH_LIST'].") + arch_list = [] + # the assumption is that the extension should run on any of the currently visible cards, + # which could be of different types - therefore all archs for visible cards should be included + for i in range(torch.cuda.device_count()): + capability = torch.cuda.get_device_capability(i) + supported_sm = [int(arch.split('_')[1]) + for arch in torch.cuda.get_arch_list() if 'sm_' in arch] + max_supported_sm = max((sm // 10, sm % 10) for sm in supported_sm) + # Capability of the device may be higher than what's supported by the user's + # NVCC, causing compilation error. User's NVCC is expected to match the one + # used to build pytorch, so we use the maximum supported capability of pytorch + # to clamp the capability. + capability = min(max_supported_sm, capability) + arch = f'{capability[0]}.{capability[1]}' + if arch not in arch_list: + arch_list.append(arch) + arch_list = sorted(arch_list) + arch_list[-1] += '+PTX' + else: + # Deal with lists that are ' ' separated (only deal with ';' after) + _arch_list = _arch_list.replace(' ', ';') + # Expand named arches + for named_arch, archval in named_arches.items(): + _arch_list = _arch_list.replace(named_arch, archval) + + arch_list = _arch_list.split(';') + + flags = [] + for arch in arch_list: + if arch not in valid_arch_strings: + raise ValueError(f"Unknown CUDA arch ({arch}) or GPU not supported") + else: + num = arch[0] + arch[2:].split("+")[0] + flags.append(f'-gencode=arch=compute_{num},code=sm_{num}') + if arch.endswith('+PTX'): + flags.append(f'-gencode=arch=compute_{num},code=compute_{num}') + + return sorted(set(flags)) + + +def _get_rocm_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: + # If cflags is given, there may already be user-provided arch flags in it + # (from `extra_compile_args`) + if cflags is not None: + for flag in cflags: + if 'amdgpu-target' in flag or 'offload-arch' in flag: + return ['-fno-gpu-rdc'] + # Use same defaults as used for building PyTorch + # Allow env var to override, just like during initial cmake build. + _archs = os.environ.get('PYTORCH_ROCM_ARCH', None) + if not _archs: + archFlags = torch._C._cuda_getArchFlags() + if archFlags: + archs = archFlags.split() + else: + archs = [] + else: + archs = _archs.replace(' ', ';').split(';') + flags = [f'--offload-arch={arch}' for arch in archs] + flags += ['-fno-gpu-rdc'] + return flags + +def _get_build_directory(name: str, verbose: bool) -> str: + root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR') + if root_extensions_directory is None: + root_extensions_directory = get_default_build_root() + cu_str = ('cpu' if torch.version.cuda is None else + f'cu{torch.version.cuda.replace(".", "")}') # type: ignore[attr-defined] + python_version = f'py{sys.version_info.major}{sys.version_info.minor}' + build_folder = f'{python_version}_{cu_str}' + + root_extensions_directory = os.path.join( + root_extensions_directory, build_folder) + + if verbose: + print(f'Using {root_extensions_directory} as PyTorch extensions root...', file=sys.stderr) + + build_directory = os.path.join(root_extensions_directory, name) + if not os.path.exists(build_directory): + if verbose: + print(f'Creating extension directory {build_directory}...', file=sys.stderr) + # This is like mkdir -p, i.e. will also create parent directories. + os.makedirs(build_directory, exist_ok=True) + + return build_directory + + +def _get_num_workers(verbose: bool) -> Optional[int]: + max_jobs = os.environ.get('MAX_JOBS') + if max_jobs is not None and max_jobs.isdigit(): + if verbose: + print(f'Using envvar MAX_JOBS ({max_jobs}) as the number of workers...', + file=sys.stderr) + return int(max_jobs) + if verbose: + print('Allowing ninja to set a default number of workers... ' + '(overridable by setting the environment variable MAX_JOBS=N)', + file=sys.stderr) + return None + + +def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None: + command = ['ninja', '-v'] + num_workers = _get_num_workers(verbose) + if num_workers is not None: + command.extend(['-j', str(num_workers)]) + env = os.environ.copy() + # Try to activate the vc env for the users + if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' not in env: + from setuptools import distutils + + plat_name = distutils.util.get_platform() + plat_spec = PLAT_TO_VCVARS[plat_name] + + vc_env = distutils._msvccompiler._get_vc_env(plat_spec) + vc_env = {k.upper(): v for k, v in vc_env.items()} + for k, v in env.items(): + uk = k.upper() + if uk not in vc_env: + vc_env[uk] = v + env = vc_env + try: + sys.stdout.flush() + sys.stderr.flush() + # Warning: don't pass stdout=None to subprocess.run to get output. + # subprocess.run assumes that sys.__stdout__ has not been modified and + # attempts to write to it by default. However, when we call _run_ninja_build + # from ahead-of-time cpp extensions, the following happens: + # 1) If the stdout encoding is not utf-8, setuptools detachs __stdout__. + # https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110 + # (it probably shouldn't do this) + # 2) subprocess.run (on POSIX, with no stdout override) relies on + # __stdout__ not being detached: + # https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214 + # To work around this, we pass in the fileno directly and hope that + # it is valid. + stdout_fileno = 1 + subprocess.run( + command, + stdout=stdout_fileno if verbose else subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=build_directory, + check=True, + env=env) + except subprocess.CalledProcessError as e: + # Python 2 and 3 compatible way of getting the error object. + _, error, _ = sys.exc_info() + # error.output contains the stdout and stderr of the build attempt. + message = error_prefix + # `error` is a CalledProcessError (which has an `output`) attribute, but + # mypy thinks it's Optional[BaseException] and doesn't narrow + if hasattr(error, 'output') and error.output: # type: ignore[union-attr] + message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr] + raise RuntimeError(message) from e + + +def _get_exec_path(module_name, path): + if IS_WINDOWS and TORCH_LIB_PATH not in os.getenv('PATH', '').split(';'): + torch_lib_in_path = any( + os.path.exists(p) and os.path.samefile(p, TORCH_LIB_PATH) + for p in os.getenv('PATH', '').split(';') + ) + if not torch_lib_in_path: + os.environ['PATH'] = f"{TORCH_LIB_PATH};{os.getenv('PATH', '')}" + return os.path.join(path, f'{module_name}{EXEC_EXT}') + + +def _import_module_from_library(module_name, path, is_python_module): + filepath = os.path.join(path, f"{module_name}{LIB_EXT}") + if is_python_module: + # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path + spec = importlib.util.spec_from_file_location(module_name, filepath) + assert spec is not None + module = importlib.util.module_from_spec(spec) + assert isinstance(spec.loader, importlib.abc.Loader) + spec.loader.exec_module(module) + return module + else: + torch.ops.load_library(filepath) + + +def _write_ninja_file_to_build_library(path, + name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + with_cuda, + is_standalone) -> None: + extra_cflags = [flag.strip() for flag in extra_cflags] + extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags] + extra_ldflags = [flag.strip() for flag in extra_ldflags] + extra_include_paths = [flag.strip() for flag in extra_include_paths] + + # Turn into absolute paths so we can emit them into the ninja build + # file wherever it is. + user_includes = [os.path.abspath(file) for file in extra_include_paths] + + # include_paths() gives us the location of torch/extension.h + system_includes = include_paths(with_cuda) + # sysconfig.get_path('include') gives us the location of Python.h + # Explicitly specify 'posix_prefix' scheme on non-Windows platforms to workaround error on some MacOS + # installations where default `get_path` points to non-existing `/Library/Python/M.m/include` folder + python_include_path = sysconfig.get_path('include', scheme='nt' if IS_WINDOWS else 'posix_prefix') + if python_include_path is not None: + system_includes.append(python_include_path) + + # Windows does not understand `-isystem`. + if IS_WINDOWS: + user_includes += system_includes + system_includes.clear() + + common_cflags = [] + if not is_standalone: + common_cflags.append(f'-DTORCH_EXTENSION_NAME={name}') + common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H') + + common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()] + + common_cflags += [f'-I{include}' for include in user_includes] + common_cflags += [f'-isystem {include}' for include in system_includes] + + common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()] + + if IS_WINDOWS: + cflags = common_cflags + COMMON_MSVC_FLAGS + ['/std:c++17'] + extra_cflags + cflags = _nt_quote_args(cflags) + else: + cflags = common_cflags + ['-fPIC', '-std=c++17'] + extra_cflags + + if with_cuda and IS_HIP_EXTENSION: + cuda_flags = ['-DWITH_HIP'] + cflags + COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_flags += extra_cuda_cflags + cuda_flags += _get_rocm_arch_flags(cuda_flags) + elif with_cuda: + cuda_flags = common_cflags + COMMON_NVCC_FLAGS + _get_cuda_arch_flags() + if IS_WINDOWS: + for flag in COMMON_MSVC_FLAGS: + cuda_flags = ['-Xcompiler', flag] + cuda_flags + for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: + cuda_flags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cuda_flags + cuda_flags = cuda_flags + ['-std=c++17'] + cuda_flags = _nt_quote_args(cuda_flags) + cuda_flags += _nt_quote_args(extra_cuda_cflags) + else: + cuda_flags += ['--compiler-options', "'-fPIC'"] + cuda_flags += extra_cuda_cflags + if not any(flag.startswith('-std=') for flag in cuda_flags): + cuda_flags.append('-std=c++17') + cc_env = os.getenv("CC") + if cc_env is not None: + cuda_flags = ['-ccbin', cc_env] + cuda_flags + else: + cuda_flags = None + + def object_file_path(source_file: str) -> str: + # '/path/to/file.cpp' -> 'file' + file_name = os.path.splitext(os.path.basename(source_file))[0] + if _is_cuda_file(source_file) and with_cuda: + # Use a different object filename in case a C++ and CUDA file have + # the same filename but different extension (.cpp vs. .cu). + target = f'{file_name}.cuda.o' + else: + target = f'{file_name}.o' + return target + + objects = [object_file_path(src) for src in sources] + ldflags = ([] if is_standalone else [SHARED_FLAG]) + extra_ldflags + + # The darwin linker needs explicit consent to ignore unresolved symbols. + if IS_MACOS: + ldflags.append('-undefined dynamic_lookup') + elif IS_WINDOWS: + ldflags = _nt_quote_args(ldflags) + + ext = EXEC_EXT if is_standalone else LIB_EXT + library_target = f'{name}{ext}' + + _write_ninja_file( + path=path, + cflags=cflags, + post_cflags=None, + cuda_cflags=cuda_flags, + cuda_post_cflags=None, + cuda_dlink_post_cflags=None, + sources=sources, + objects=objects, + ldflags=ldflags, + library_target=library_target, + with_cuda=with_cuda) + + +def _write_ninja_file(path, + cflags, + post_cflags, + cuda_cflags, + cuda_post_cflags, + cuda_dlink_post_cflags, + sources, + objects, + ldflags, + library_target, + with_cuda) -> None: + r"""Write a ninja file that does the desired compiling and linking. + + `path`: Where to write this file + `cflags`: list of flags to pass to $cxx. Can be None. + `post_cflags`: list of flags to append to the $cxx invocation. Can be None. + `cuda_cflags`: list of flags to pass to $nvcc. Can be None. + `cuda_postflags`: list of flags to append to the $nvcc invocation. Can be None. + `sources`: list of paths to source files + `objects`: list of desired paths to objects, one per source. + `ldflags`: list of flags to pass to linker. Can be None. + `library_target`: Name of the output library. Can be None; in that case, + we do no linking. + `with_cuda`: If we should be compiling with CUDA. + """ + def sanitize_flags(flags): + if flags is None: + return [] + else: + return [flag.strip() for flag in flags] + + cflags = sanitize_flags(cflags) + post_cflags = sanitize_flags(post_cflags) + cuda_cflags = sanitize_flags(cuda_cflags) + cuda_post_cflags = sanitize_flags(cuda_post_cflags) + cuda_dlink_post_cflags = sanitize_flags(cuda_dlink_post_cflags) + ldflags = sanitize_flags(ldflags) + + # Sanity checks... + assert len(sources) == len(objects) + assert len(sources) > 0 + + compiler = get_cxx_compiler() + + # Version 1.3 is required for the `deps` directive. + config = ['ninja_required_version = 1.3'] + config.append(f'cxx = {compiler}') + if with_cuda or cuda_dlink_post_cflags: + if "PYTORCH_NVCC" in os.environ: + nvcc = os.getenv("PYTORCH_NVCC") # user can set nvcc compiler with ccache using the environment variable here + else: + if IS_HIP_EXTENSION: + nvcc = _join_rocm_home('bin', 'hipcc') + else: + nvcc = _join_cuda_home('bin', 'nvcc') + config.append(f'nvcc = {nvcc}') + + if IS_HIP_EXTENSION: + post_cflags = COMMON_HIP_FLAGS + post_cflags + flags = [f'cflags = {" ".join(cflags)}'] + flags.append(f'post_cflags = {" ".join(post_cflags)}') + if with_cuda: + flags.append(f'cuda_cflags = {" ".join(cuda_cflags)}') + flags.append(f'cuda_post_cflags = {" ".join(cuda_post_cflags)}') + flags.append(f'cuda_dlink_post_cflags = {" ".join(cuda_dlink_post_cflags)}') + flags.append(f'ldflags = {" ".join(ldflags)}') + + # Turn into absolute paths so we can emit them into the ninja build + # file wherever it is. + sources = [os.path.abspath(file) for file in sources] + + # See https://ninja-build.org/build.ninja.html for reference. + compile_rule = ['rule compile'] + if IS_WINDOWS: + compile_rule.append( + ' command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags') + compile_rule.append(' deps = msvc') + else: + compile_rule.append( + ' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags') + compile_rule.append(' depfile = $out.d') + compile_rule.append(' deps = gcc') + + if with_cuda: + cuda_compile_rule = ['rule cuda_compile'] + nvcc_gendeps = '' + # --generate-dependencies-with-compile is not supported by ROCm + # Nvcc flag `--generate-dependencies-with-compile` is not supported by sccache, which may increase build time. + if torch.version.cuda is not None and os.getenv('TORCH_EXTENSION_SKIP_NVCC_GEN_DEPENDENCIES', '0') != '1': + cuda_compile_rule.append(' depfile = $out.d') + cuda_compile_rule.append(' deps = gcc') + # Note: non-system deps with nvcc are only supported + # on Linux so use --generate-dependencies-with-compile + # to make this work on Windows too. + nvcc_gendeps = '--generate-dependencies-with-compile --dependency-output $out.d' + cuda_compile_rule.append( + f' command = $nvcc {nvcc_gendeps} $cuda_cflags -c $in -o $out $cuda_post_cflags') + + # Emit one build rule per source to enable incremental build. + build = [] + for source_file, object_file in zip(sources, objects): + is_cuda_source = _is_cuda_file(source_file) and with_cuda + rule = 'cuda_compile' if is_cuda_source else 'compile' + if IS_WINDOWS: + source_file = source_file.replace(':', '$:') + object_file = object_file.replace(':', '$:') + source_file = source_file.replace(" ", "$ ") + object_file = object_file.replace(" ", "$ ") + build.append(f'build {object_file}: {rule} {source_file}') + + if cuda_dlink_post_cflags: + devlink_out = os.path.join(os.path.dirname(objects[0]), 'dlink.o') + devlink_rule = ['rule cuda_devlink'] + devlink_rule.append(' command = $nvcc $in -o $out $cuda_dlink_post_cflags') + devlink = [f'build {devlink_out}: cuda_devlink {" ".join(objects)}'] + objects += [devlink_out] + else: + devlink_rule, devlink = [], [] + + if library_target is not None: + link_rule = ['rule link'] + if IS_WINDOWS: + cl_paths = subprocess.check_output(['where', + 'cl']).decode(*SUBPROCESS_DECODE_ARGS).split('\r\n') + if len(cl_paths) >= 1: + cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:') + else: + raise RuntimeError("MSVC is required to load C++ extensions") + link_rule.append(f' command = "{cl_path}/link.exe" $in /nologo $ldflags /out:$out') + else: + link_rule.append(' command = $cxx $in $ldflags -o $out') + + link = [f'build {library_target}: link {" ".join(objects)}'] + + default = [f'default {library_target}'] + else: + link_rule, link, default = [], [], [] + + # 'Blocks' should be separated by newlines, for visual benefit. + blocks = [config, flags, compile_rule] + if with_cuda: + blocks.append(cuda_compile_rule) # type: ignore[possibly-undefined] + blocks += [devlink_rule, link_rule, build, devlink, link, default] + content = "\n\n".join("\n".join(b) for b in blocks) + # Ninja requires a new lines at the end of the .ninja file + content += "\n" + _maybe_write(path, content) + +def _join_cuda_home(*paths) -> str: + """ + Join paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set. + + This is basically a lazy way of raising an error for missing $CUDA_HOME + only once we need to get any CUDA-specific path. + """ + if CUDA_HOME is None: + raise OSError('CUDA_HOME environment variable is not set. ' + 'Please set it to your CUDA install root.') + return os.path.join(CUDA_HOME, *paths) + + +def _is_cuda_file(path: str) -> bool: + valid_ext = ['.cu', '.cuh'] + if IS_HIP_EXTENSION: + valid_ext.append('.hip') + return os.path.splitext(path)[1] in valid_ext diff --git a/venv/lib/python3.10/site-packages/torch/utils/deterministic.py b/venv/lib/python3.10/site-packages/torch/utils/deterministic.py new file mode 100644 index 0000000000000000000000000000000000000000..98a6d30b067bb496e2a1a77e142974a98494997a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/deterministic.py @@ -0,0 +1,21 @@ +import sys +import types + +import torch + + +class _Deterministic(types.ModuleType): + @property + def fill_uninitialized_memory(self): + """ + Whether to fill uninitialized memory with a known value when + :meth:`torch.use_deterministic_algorithms()` is set to ``True``. + """ + return torch._C._get_deterministic_fill_uninitialized_memory() + + @fill_uninitialized_memory.setter + def fill_uninitialized_memory(self, mode): + return torch._C._set_deterministic_fill_uninitialized_memory(mode) + + +sys.modules[__name__].__class__ = _Deterministic diff --git a/venv/lib/python3.10/site-packages/torch/utils/dlpack.py b/venv/lib/python3.10/site-packages/torch/utils/dlpack.py new file mode 100644 index 0000000000000000000000000000000000000000..6bfa4b9f85bd6fc8bb8524926210b1e931e2bd50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/dlpack.py @@ -0,0 +1,121 @@ +from typing import Any + +import torch +import enum + +from torch._C import _from_dlpack +from torch._C import _to_dlpack as to_dlpack + + +class DLDeviceType(enum.IntEnum): + # Enums as in DLPack specification (aten/src/ATen/dlpack.h) + kDLCPU = 1, + kDLGPU = 2, + kDLCPUPinned = 3, + kDLOpenCL = 4, + kDLVulkan = 7, + kDLMetal = 8, + kDLVPI = 9, + kDLROCM = 10, + kDLExtDev = 12, + kDLOneAPI = 14, + + +torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule + +Returns an opaque object (a "DLPack capsule") representing the tensor. + +.. note:: + ``to_dlpack`` is a legacy DLPack interface. The capsule it returns + cannot be used for anything in Python other than use it as input to + ``from_dlpack``. The more idiomatic use of DLPack is to call + ``from_dlpack`` directly on the tensor object - this works when that + object has a ``__dlpack__`` method, which PyTorch and most other + libraries indeed have now. + +.. warning:: + Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``. + Behavior when a capsule is consumed multiple times is undefined. + +Args: + tensor: a tensor to be exported + +The DLPack capsule shares the tensor's memory. +""") + + +# TODO: add a typing.Protocol to be able to tell Mypy that only objects with +# __dlpack__ and __dlpack_device__ methods are accepted. +def from_dlpack(ext_tensor: Any) -> 'torch.Tensor': + """from_dlpack(ext_tensor) -> Tensor + + Converts a tensor from an external library into a ``torch.Tensor``. + + The returned PyTorch tensor will share the memory with the input tensor + (which may have come from another library). Note that in-place operations + will therefore also affect the data of the input tensor. This may lead to + unexpected issues (e.g., other libraries may have read-only flags or + immutable data structures), so the user should only do this if they know + for sure that this is fine. + + Args: + ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule): + The tensor or DLPack capsule to convert. + + If ``ext_tensor`` is a tensor (or ndarray) object, it must support + the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__`` + method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is + an opaque ``PyCapsule`` instance, typically produced by a + ``to_dlpack`` function or method. + + Examples:: + + >>> import torch.utils.dlpack + >>> t = torch.arange(4) + + # Convert a tensor directly (supported in PyTorch >= 1.10) + >>> t2 = torch.from_dlpack(t) + >>> t2[:2] = -1 # show that memory is shared + >>> t2 + tensor([-1, -1, 2, 3]) + >>> t + tensor([-1, -1, 2, 3]) + + # The old-style DLPack usage, with an intermediate capsule object + >>> capsule = torch.utils.dlpack.to_dlpack(t) + >>> capsule + + >>> t3 = torch.from_dlpack(capsule) + >>> t3 + tensor([-1, -1, 2, 3]) + >>> t3[0] = -9 # now we're sharing memory between 3 tensors + >>> t3 + tensor([-9, -1, 2, 3]) + >>> t2 + tensor([-9, -1, 2, 3]) + >>> t + tensor([-9, -1, 2, 3]) + + """ + if hasattr(ext_tensor, '__dlpack__'): + device = ext_tensor.__dlpack_device__() + # device is either CUDA or ROCm, we need to pass the current + # stream + if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM): + stream = torch.cuda.current_stream(f'cuda:{device[1]}') + # cuda_stream is the pointer to the stream and it is a public + # attribute, but it is not documented + # The array API specify that the default legacy stream must be passed + # with a value of 1 for CUDA + # https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none + is_cuda = device[0] == DLDeviceType.kDLGPU + # Since pytorch is not using PTDS by default, lets directly pass + # the legacy stream + stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream + dlpack = ext_tensor.__dlpack__(stream=stream_ptr) + else: + dlpack = ext_tensor.__dlpack__() + else: + # Old versions just call the converter + dlpack = ext_tensor + return _from_dlpack(dlpack) diff --git a/venv/lib/python3.10/site-packages/torch/utils/file_baton.py b/venv/lib/python3.10/site-packages/torch/utils/file_baton.py new file mode 100644 index 0000000000000000000000000000000000000000..b55db82b8532b7a7679e267773156b39ae08b5e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/file_baton.py @@ -0,0 +1,49 @@ +import os +import time + + +class FileBaton: + """A primitive, file-based synchronization utility.""" + + def __init__(self, lock_file_path, wait_seconds=0.1): + """ + Create a new :class:`FileBaton`. + + Args: + lock_file_path: The path to the file used for locking. + wait_seconds: The seconds to periodically sleep (spin) when + calling ``wait()``. + """ + self.lock_file_path = lock_file_path + self.wait_seconds = wait_seconds + self.fd = None + + def try_acquire(self): + """ + Try to atomically create a file under exclusive access. + + Returns: + True if the file could be created, else False. + """ + try: + self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL) + return True + except FileExistsError: + return False + + def wait(self): + """ + Periodically sleeps for a certain amount until the baton is released. + + The amount of time slept depends on the ``wait_seconds`` parameter + passed to the constructor. + """ + while os.path.exists(self.lock_file_path): + time.sleep(self.wait_seconds) + + def release(self): + """Release the baton and removes its file.""" + if self.fd is not None: + os.close(self.fd) + + os.remove(self.lock_file_path) diff --git a/venv/lib/python3.10/site-packages/torch/utils/flop_counter.py b/venv/lib/python3.10/site-packages/torch/utils/flop_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..c76a9a2432a7853a873b2184dc0fc3d44d6f4034 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/flop_counter.py @@ -0,0 +1,559 @@ +import torch +import torch.nn as nn +from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten +from typing import List, Any, Dict, Optional, Union, NamedTuple +from collections import defaultdict +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.hooks import RemovableHandle +from torch._decomp import register_decomposition +from math import prod +from functools import wraps + + + +__all__ = ["FlopCounterMode", "register_flop_formula"] + +aten = torch.ops.aten + +def get_shape(i): + if isinstance(i, torch.Tensor): + return i.shape + return i + +flop_registry: Dict[Any, Any] = {} + +def shape_wrapper(f): + @wraps(f) + def nf(*args, out=None, **kwargs): + args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out)) + return f(*args, out_shape=out_shape, **kwargs) + return nf + +def register_flop_formula(targets, get_raw=False): + def register_fun(flop_formula): + if not get_raw: + flop_formula = shape_wrapper(flop_formula) + register_decomposition(targets, registry=flop_registry, unsafe=True)(flop_formula) + return flop_formula + + return register_fun + +@register_flop_formula(aten.mm) +def mm_flop(a_shape, b_shape, *args, out_shape=None, **kwargs) -> int: + """Count flops for matmul.""" + # Inputs should be a list of length 2. + # Inputs contains the shapes of two matrices. + m, k = a_shape + k2, n = b_shape + assert k == k2 + # NB(chilli): Should be 2 * k - 1 technically for FLOPs. + return m * n * 2 * k + +@register_flop_formula(aten.addmm) +def addmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int: + """Count flops for addmm.""" + return mm_flop(a_shape, b_shape) + +@register_flop_formula(aten.bmm) +def bmm_flop(a_shape, b_shape, out_shape=None, **kwargs) -> int: + """Count flops for the bmm operation.""" + # Inputs should be a list of length 2. + # Inputs contains the shapes of two tensor. + b, m, k = a_shape + b2, k2, n = b_shape + assert b == b2 + assert k == k2 + # NB(chilli): Should be 2 * k - 1 technically for FLOPs. + flop = b * m * n * 2 * k + return flop + +@register_flop_formula(aten.baddbmm) +def baddbmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int: + """Count flops for the baddbmm operation.""" + # Inputs should be a list of length 3. + # Inputs contains the shapes of three tensors. + return bmm_flop(a_shape, b_shape) + + +def conv_flop_count( + x_shape: List[int], + w_shape: List[int], + out_shape: List[int], + transposed: bool = False, +) -> int: + """Count flops for convolution. + + Note only multiplication is + counted. Computation for bias are ignored. + Flops for a transposed convolution are calculated as + flops = (x_shape[2:] * prod(w_shape) * batch_size). + Args: + x_shape (list(int)): The input shape before convolution. + w_shape (list(int)): The filter shape. + out_shape (list(int)): The output shape after convolution. + transposed (bool): is the convolution transposed + Returns: + int: the number of flops + """ + + batch_size = x_shape[0] + conv_shape = (x_shape if transposed else out_shape)[2:] + c_out, c_in, *filter_size = w_shape + + """ + General idea here is that for a regular conv, for each point in the output + spatial dimension we convolve the filter with something (hence + `prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by + 1. batch_size, 2. the cross product of input and weight channels. + + For the transpose, it's not each point in the *output* spatial dimension but + each point in the *input* spatial dimension. + """ + # NB(chilli): I don't think this properly accounts for padding :think: + # NB(chilli): Should be 2 * c_in - 1 technically for FLOPs. + flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2 + return flop + +@register_flop_formula([aten.convolution, aten._convolution]) +def conv_flop(x_shape, w_shape, _bias, _stride, _padding, _dilation, transposed, *args, out_shape=None, **kwargs) -> int: + """Count flops for convolution.""" + return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed) + + +@register_flop_formula(aten.convolution_backward) +def conv_backward_flop( + grad_out_shape, + x_shape, + w_shape, + _bias, + _stride, + _padding, + _dilation, + transposed, + _output_padding, + _groups, + output_mask, + out_shape) -> int: + + def t(shape): + return [shape[1], shape[0]] + list(shape[2:]) + flop_count = 0 + + """ + Let's say we have a regular 1D conv + {A, B, C} [inp] + {i, j} [weight] + => (conv) + {Ai + Bj, Bi + Cj} [out] + + And as a reminder, the transposed conv of the above is + => {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out] + + For the backwards of conv, we now have + {D, E} [grad_out] + {A, B, C} [inp] + {i, j} [weight] + + # grad_inp as conv_transpose(grad_out, weight) + Let's first compute grad_inp. To do so, we can simply look at all the + multiplications that each element of inp is involved in. For example, A is + only involved in the first element of the output (and thus only depends upon + D in grad_out), and C is only involved in the last element of the output + (and thus only depends upon E in grad_out) + + {Di, Dj + Ei, Ej} [grad_inp] + + Note that this corresponds to the below conv_transpose. This gives us the + output_mask[0] branch, which is grad_inp. + + {D, E} [inp (grad_out)] + {i, j} [weight] + => (conv_transpose) + {Di, Dj + Ei, Ej} [out (grad_inp)] + + I leave the fact that grad_inp for a transposed conv is just conv(grad_out, + weight) as an exercise for the reader. + + # grad_weight as conv(inp, grad_out) + To compute grad_weight, we again look at the terms in the output, which as + a reminder is: + => {Ai + Bj, Bi + Cj} [out] + => {D, E} [grad_out] + If we manually compute the gradient for the weights, we see it's + {AD + BE, BD + CE} [grad_weight] + + This corresponds to the below conv + {A, B, C} [inp] + {D, E} [weight (grad_out)] + => (conv) + {AD + BE, BD + CE} [out (grad_weight)] + + # grad_weight of transposed conv as conv(grad_out, inp) + As a reminder, the terms of the output of a transposed conv are: + => {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out] + => {D, E, F, G} [grad_out] + + Manually computing the gradient for the weights, we see it's + {AD + BE + CF, AE + BF + CG} [grad_weight] + + This corresponds to the below conv + {D, E, F, G} [inp (grad_out)] + {A, B, C} [weight (inp)] + => (conv) + {AD + BE + CF, AE + BF + CG} [out (grad_weight)] + + For the full backwards formula, there are also some details involving + transpose of the batch/channel dimensions and groups, but I skip those for + the sake of brevity (and they're pretty similar to matmul backwards) + + Check [conv backwards decomposition as conv forwards] + """ + # grad_inp as conv_transpose(grad_out, weight) + if output_mask[0]: + grad_input_shape = get_shape(out_shape[0]) + flop_count += conv_flop_count(grad_out_shape, w_shape, grad_input_shape, not transposed) + + if output_mask[1]: + grad_weight_shape = get_shape(out_shape[1]) + if transposed: + # grad_weight of transposed conv as conv(grad_out, inp) + flop_count += conv_flop_count(t(grad_out_shape), t(x_shape), t(grad_weight_shape), transposed=False) + else: + # grad_weight as conv(inp, grad_out) + flop_count += conv_flop_count(t(x_shape), t(grad_out_shape), t(grad_weight_shape), transposed=False) + + return flop_count + +def sdpa_flop_count(query_shape, key_shape, value_shape): + """ + Count flops for self-attention. + + NB: We can assume that value_shape == key_shape + """ + b, h, s_q, d_q = query_shape + _b2, _h2, s_k, _d2 = key_shape + _b3, _h3, _s3, d_v = value_shape + assert b == _b2 == _b3 and h == _h2 == _h3 and d_q == _d2 and s_k == _s3 and d_q == _d2 + total_flops = 0 + # q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k] + total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k)) + # scores: [b, h, s_q, s_k] @ v: [b, h, s_k, d_v] -> out: [b, h, s_q, d_v] + total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v)) + return total_flops + + +@register_flop_formula([aten._scaled_dot_product_efficient_attention, aten._scaled_dot_product_flash_attention]) +def sdpa_flop(query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int: + """Count flops for self-attention.""" + # NB: We aren't accounting for causal attention here + return sdpa_flop_count(query_shape, key_shape, value_shape) + + +def sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape): + total_flops = 0 + b, h, s_q, d_q = query_shape + _b2, _h2, s_k, _d2 = key_shape + _b3, _h3, _s3, d_v = value_shape + _b4, _h4, _s4, _d4 = grad_out_shape + assert b == _b2 == _b3 == _b4 and h == _h2 == _h3 == _h4 and d_q == _d2 + assert d_v == _d4 and s_k == _s3 and s_q == _s4 + total_flops = 0 + # Step 1: We recompute the scores matrix. + # q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k] + total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k)) + + # Step 2: We propagate the gradients through the score @ v operation. + # gradOut: [b, h, s_q, d_v] @ v: [b, h, d_v, s_k] -> gradScores: [b, h, s_q, s_k] + total_flops += bmm_flop((b * h, s_q, d_v), (b * h, d_v, s_k)) + # scores: [b, h, s_k, s_q] @ gradOut: [b, h, s_q, d_v] -> gradV: [b, h, s_k, d_v] + total_flops += bmm_flop((b * h, s_k, s_q), (b * h, s_q, d_v)) + + # Step 3: We propagate th gradients through the k @ v operation + # gradScores: [b, h, s_q, s_k] @ k: [b, h, s_k, d_q] -> gradQ: [b, h, s_q, d_q] + total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_q)) + # q: [b, h, d_q, s_q] @ gradScores: [b, h, s_q, s_k] -> gradK: [b, h, d_q, s_k] + total_flops += bmm_flop((b * h, d_q, s_q), (b * h, s_q, s_k)) + return total_flops + + +@register_flop_formula([aten._scaled_dot_product_efficient_attention_backward, aten._scaled_dot_product_flash_attention_backward]) +def sdpa_backward_flop(grad_out_shape, query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int: + """Count flops for self-attention backward.""" + return sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape) + +flop_registry = { + aten.mm: mm_flop, + aten.addmm: addmm_flop, + aten.bmm: bmm_flop, + aten.baddbmm: baddbmm_flop, + aten.convolution: conv_flop, + aten._convolution: conv_flop, + aten.convolution_backward: conv_backward_flop, + aten._scaled_dot_product_efficient_attention: sdpa_flop, + aten._scaled_dot_product_flash_attention: sdpa_flop, + aten._scaled_dot_product_efficient_attention_backward: sdpa_backward_flop, + aten._scaled_dot_product_flash_attention_backward: sdpa_backward_flop, +} + +def normalize_tuple(x): + if not isinstance(x, tuple): + return (x,) + return x + + +# Define the suffixes for different orders of magnitude +suffixes = ["", "K", "M", "B", "T"] +# Thanks BingChat! +def get_suffix_str(number): + # Find the index of the appropriate suffix based on the number of digits + # with some additional overflow. + # i.e. 1.01B should be displayed as 1001M, not 1.001B + index = max(0, min(len(suffixes) - 1, (len(str(number)) - 2) // 3)) + return suffixes[index] + +def convert_num_with_suffix(number, suffix): + index = suffixes.index(suffix) + # Divide the number by 1000^index and format it to two decimal places + value = f"{number / 1000 ** index:.3f}" + # Return the value and the suffix as a string + return value + suffixes[index] + +def convert_to_percent_str(num, denom): + if denom == 0: + return "0%" + return f"{num / denom:.2%}" + +def _pytreeify_preserve_structure(f): + @wraps(f) + def nf(args): + flat_args, spec = tree_flatten(args) + out = f(*flat_args) + return tree_unflatten(out, spec) + + return nf + + +class FlopCounterMode(TorchDispatchMode): + """ + ``FlopCounterMode`` is a context manager that counts the number of flops within its context. + + It does this using a ``TorchDispatchMode``. + + It also supports hierarchical output by passing a module (or list of + modules) to FlopCounterMode on construction. If you do not need hierarchical + output, you do not need to use it with a module. + + Example usage + + .. code-block:: python + + mod = ... + flop_counter = FlopCounterMode(mod) + with flop_counter: + mod.sum().backward() + + """ + + def __init__( + self, + mods: Optional[Union[torch.nn.Module, List[torch.nn.Module]]] = None, + depth: int = 2, + display: bool = True, + custom_mapping: Optional[Dict[Any, Any]] = None): + self.flop_counts: Dict[str, Dict[Any, int]] = defaultdict(lambda: defaultdict(int)) + self.depth = depth + self.parents = ["Global"] + self.in_backward = False + self.display = display + if custom_mapping is None: + custom_mapping = {} + if isinstance(mods, torch.nn.Module): + mods = [mods] + self.mods = mods + # Keys will include the modules in `mods` and their submodules + self._module_to_forward_hook_handles: Dict[nn.Module, _ForwardHookHandles] = {} + self.flop_registry = { + **flop_registry, + **{k: v if getattr(v, "_get_raw", False) else shape_wrapper(v) for k, v in custom_mapping.items()} + } + + def _register_forward_hooks(self): + if self.mods is None: + return + for mod in self.mods: + prefix = type(mod).__name__ + for name, module in dict(mod.named_modules()).items(): + if name == "": + name = prefix + else: + name = ".".join([prefix, name]) + + forward_pre_hook_handle = module.register_forward_pre_hook(self._enter_module(name)) + forward_hook_handle = module.register_forward_hook(self._exit_module(name)) + self._module_to_forward_hook_handles[module] = _ForwardHookHandles( + forward_pre_hook_handle, forward_hook_handle + ) + + def _deregister_forward_hooks(self): + for forward_hook_handles in self._module_to_forward_hook_handles.values(): + forward_hook_handles[0].remove() + forward_hook_handles[1].remove() + self._module_to_forward_hook_handles.clear() + + def _enter_module(self, name): + def f(module, inputs): + out = _pytreeify_preserve_structure(self._create_pre_module(name))(inputs) + return out + + return f + + def _exit_module(self, name): + def f(module, inputs, outputs): + outputs = _pytreeify_preserve_structure(self._create_post_module(name))(outputs) + return outputs + return f + + def _create_post_module(self, name): + class PushState(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + assert self.parents[-1] == name, f"{self.parents[-1]} is not {name}" + self.parents.pop() + args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args) + return args + + @staticmethod + def backward(ctx, *grad_outs): + self.in_backward = True + self.parents.append(name) + return grad_outs + + return PushState.apply + + def _create_pre_module(self, name): + class PopState(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + if self.in_backward: + self.parents = ["Global"] + self.in_backward = True + self.parents.append(name) + args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args) + return args + + @staticmethod + def backward(ctx, *grad_outs): + assert self.parents[-1] == name + self.parents.pop() + return grad_outs + + return PopState.apply + + def get_total_flops(self) -> int: + return sum(self.flop_counts['Global'].values()) + + def get_flop_counts(self) -> Dict[str, Dict[Any, int]]: + """Return the flop counts as a dictionary of dictionaries. + + The outer + dictionary is keyed by module name, and the inner dictionary is keyed by + operation name. + + Returns: + Dict[str, Dict[Any, int]]: The flop counts as a dictionary. + """ + return {k: dict(v) for k, v in self.flop_counts.items()} + + def get_table(self, depth=None): + if depth is None: + depth = self.depth + if depth is None: + depth = 999999 + + import tabulate + tabulate.PRESERVE_WHITESPACE = True + header = ["Module", "FLOP", "% Total"] + values = [] + global_flops = self.get_total_flops() + global_suffix = get_suffix_str(global_flops) + is_global_subsumed = False + + def process_mod(mod_name, depth): + nonlocal is_global_subsumed + + total_flops = sum(self.flop_counts[mod_name].values()) + + is_global_subsumed |= total_flops >= global_flops + + padding = " " * depth + values = [] + values.append([ + padding + mod_name, + convert_num_with_suffix(total_flops, global_suffix), + convert_to_percent_str(total_flops, global_flops) + ]) + for k, v in self.flop_counts[mod_name].items(): + values.append([ + padding + " - " + str(k), + convert_num_with_suffix(v, global_suffix), + convert_to_percent_str(v, global_flops) + ]) + return values + + for mod in self.flop_counts.keys(): + if mod == 'Global': + continue + mod_depth = mod.count(".") + 1 + if mod_depth > depth: + continue + + cur_values = process_mod(mod, mod_depth - 1) + values.extend(cur_values) + + # We do a bit of messing around here to only output the "Global" value + # if there are any FLOPs in there that aren't already fully contained by + # a module. + if 'Global' in self.flop_counts and not is_global_subsumed: + for idx, value in enumerate(values): + values[idx][0] = " " + values[idx][0] + + values = process_mod('Global', 0) + values + + if len(values) == 0: + values = [["Global", "0", "0%"]] + + return tabulate.tabulate(values, headers=header, colalign=("left", "right", "right")) + + def __enter__(self): + self.flop_counts.clear() + self._register_forward_hooks() + super().__enter__() + return self + + def __exit__(self, *args): + if self.display: + print(self.get_table(self.depth)) + self._deregister_forward_hooks() + super().__exit__(*args) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + out = func(*args, **kwargs) + func_packet = func._overloadpacket + if func_packet in self.flop_registry: + flop_count_func = self.flop_registry[func_packet] + flop_count = flop_count_func(*args, **kwargs, out=out) # type: ignore[operator] + if len(set(self.parents)) != len(self.parents): + print( + "The module hierarchy tracking seems to be messed up." + "Please file a bug or just run the flop counter without" + "tracking the module hierarchy (i.e. `with FlopCounterMode():`)" + ) + for par in set(self.parents): + self.flop_counts[par][func_packet] += flop_count + + return out + +class _ForwardHookHandles(NamedTuple): + forward_pre_hook_handle: RemovableHandle + forward_hook_handle: RemovableHandle diff --git a/venv/lib/python3.10/site-packages/torch/utils/hooks.py b/venv/lib/python3.10/site-packages/torch/utils/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..790bb498e5d83e05b2b59b58720e0d7ca1ef5e0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/hooks.py @@ -0,0 +1,252 @@ +import torch +from collections import OrderedDict +import weakref +import warnings +from typing import Any, Tuple + +__all__ = ["RemovableHandle", "unserializable_hook", "warn_if_has_hooks", "BackwardHook"] + +class RemovableHandle: + r""" + A handle which provides the capability to remove a hook. + + Args: + hooks_dict (dict): A dictionary of hooks, indexed by hook ``id``. + extra_dict (Union[dict, List[dict]]): An additional dictionary or list of + dictionaries whose keys will be deleted when the same keys are + removed from ``hooks_dict``. + """ + + id: int + next_id: int = 0 + + def __init__(self, hooks_dict: Any, *, extra_dict: Any = None) -> None: + self.hooks_dict_ref = weakref.ref(hooks_dict) + self.id = RemovableHandle.next_id + RemovableHandle.next_id += 1 + + self.extra_dict_ref: Tuple = () + if isinstance(extra_dict, dict): + self.extra_dict_ref = (weakref.ref(extra_dict),) + elif isinstance(extra_dict, list): + self.extra_dict_ref = tuple(weakref.ref(d) for d in extra_dict) + + def remove(self) -> None: + hooks_dict = self.hooks_dict_ref() + if hooks_dict is not None and self.id in hooks_dict: + del hooks_dict[self.id] + + for ref in self.extra_dict_ref: + extra_dict = ref() + if extra_dict is not None and self.id in extra_dict: + del extra_dict[self.id] + + def __getstate__(self): + if self.extra_dict_ref is None: + return (self.hooks_dict_ref(), self.id) + else: + return (self.hooks_dict_ref(), self.id, tuple(ref() for ref in self.extra_dict_ref)) + + def __setstate__(self, state) -> None: + if state[0] is None: + # create a dead reference + self.hooks_dict_ref = weakref.ref(OrderedDict()) + else: + self.hooks_dict_ref = weakref.ref(state[0]) + self.id = state[1] + RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1) + + if len(state) < 3 or state[2] is None: + self.extra_dict_ref = () + else: + self.extra_dict_ref = tuple(weakref.ref(d) for d in state[2]) + + def __enter__(self) -> "RemovableHandle": + return self + + def __exit__(self, type: Any, value: Any, tb: Any) -> None: + self.remove() + + +def unserializable_hook(f): + """ + Mark a function as an unserializable hook with this decorator. + + This suppresses warnings that would otherwise arise if you attempt + to serialize a tensor that has a hook. + """ + f.__torch_unserializable__ = True + return f + + +def warn_if_has_hooks(tensor): + if tensor._backward_hooks: + for k in tensor._backward_hooks: + hook = tensor._backward_hooks[k] + if not hasattr(k, "__torch_unserializable__"): + warnings.warn(f"backward hook {repr(hook)} on tensor will not be " + "serialized. If this is expected, you can " + "decorate the function with @torch.utils.hooks.unserializable_hook " + "to suppress this warning") + +class BackwardHook: + """ + A wrapper class to implement nn.Module backward hooks. + + It handles: + - Ignoring non-Tensor inputs and replacing them by None before calling the user hook + - Generating the proper Node to capture a set of Tensor's gradients + - Linking the gradients captures for the outputs with the gradients captured for the input + - Calling the user hook once both output and input gradients are available + """ + + def __init__(self, module, user_hooks, user_pre_hooks): + self.user_hooks = user_hooks + self.user_pre_hooks = user_pre_hooks + self.module = module + + self.grad_outputs = None + self.n_outputs = -1 + self.output_tensors_index = None + self.n_inputs = -1 + self.input_tensors_index = None + + def _pack_with_none(self, indices, values, size): + res = [None] * size + for idx, val in zip(indices, values): + res[idx] = val + + return tuple(res) + + def _unpack_none(self, indices, values): + res = [] + for idx in indices: + res.append(values[idx]) + + return tuple(res) + + def _set_user_hook(self, grad_fn): + def hook(grad_input, _): + if self.grad_outputs is None: + # This happens because the gradient in your nn.Module flows to + # the Module's input without " passing through the Module's + # output, e.g. when you're doing double backward. + return + res = self._pack_with_none(self.input_tensors_index, grad_input, self.n_inputs) + + for hook in self.user_hooks: + out = hook(self.module, res, self.grad_outputs) + + if out is None: + continue + + if len(out) != len(res): + raise RuntimeError("Backward hook returned an invalid number of grad_input, " + f"got {len(out)}, but expected {len(res)}") + + res = out + + self.grad_outputs = None + + return self._unpack_none(self.input_tensors_index, res) + + grad_fn.register_hook(hook) + + def _apply_on_tensors(self, fn, args): + # Can be used to apply the given function to the tensors contained in the + # args. Will return updated args and the tensors indices + tensors_idx = [] + tensors = [] + + requires_grad = False + for i, arg in enumerate(args): + if isinstance(arg, torch.Tensor): + tensors_idx.append(i) + tensors.append(arg) + requires_grad |= arg.requires_grad + + if not (requires_grad and torch.is_grad_enabled()): + return args, None + + new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors) + if len(new_tensors) == 0: + raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.") + + grad_fns = [t.grad_fn for t in new_tensors if t.grad_fn is not None and t.grad_fn.name() == "BackwardHookFunctionBackward"] + if len(grad_fns) == 0: + raise RuntimeError("Error while setting up backward hooks. Please open " + "an issue with a code sample to reproduce this.") + + fn(grad_fns[0]) + + arg_list = list(args) + for idx, val in zip(tensors_idx, new_tensors): + arg_list[idx] = val + + if type(args) is tuple: + out = tuple(arg_list) + else: + out = type(args)(*arg_list) + return out, tensors_idx + + def setup_input_hook(self, args): + def fn(grad_fn): + self._set_user_hook(grad_fn) + + res, input_idx = self._apply_on_tensors(fn, args) + self.n_inputs = len(args) + self.input_tensors_index = input_idx + return res + + def setup_output_hook(self, args): + def fn(grad_fn): + def hook(_, grad_output): + self.grad_outputs = self._pack_with_none(self.output_tensors_index, + grad_output, + self.n_outputs) + + if self.user_pre_hooks: + expected_len = len(self.grad_outputs) + for user_pre_hook in self.user_pre_hooks: + hook_grad_outputs = user_pre_hook(self.module, self.grad_outputs) + if hook_grad_outputs is None: + continue + + actual_len = len(hook_grad_outputs) + if actual_len != expected_len: + raise RuntimeError("Backward pre hook returned an invalid number of grad_output, " + f"got {actual_len}, but expected {expected_len}") + self.grad_outputs = hook_grad_outputs + + # We need to be able to clear self.grad_outputs but also return it + local_grad_outputs = self.grad_outputs + + # Special case if no input required gradients, this hook should call the user + # hook directly + if self.input_tensors_index is None: + grad_inputs = self._pack_with_none([], [], self.n_inputs) + for user_hook in self.user_hooks: + res = user_hook(self.module, grad_inputs, self.grad_outputs) + if res is not None and not (isinstance(res, tuple) and all(el is None for el in res)): + raise RuntimeError("Backward hook for Modules where no input requires " + "gradient should always return None or None for all gradients.") + self.grad_outputs = None + + if local_grad_outputs is not None: + assert self.output_tensors_index is not None # mypy + return tuple(local_grad_outputs[i] for i in self.output_tensors_index) + + grad_fn.register_hook(hook) + + is_tuple = True + if not isinstance(args, tuple): + args = (args,) + is_tuple = False + + res, output_idx = self._apply_on_tensors(fn, args) + self.n_outputs = len(args) + self.output_tensors_index = output_idx + + if not is_tuple: + res = res[0] + return res diff --git a/venv/lib/python3.10/site-packages/torch/utils/mkldnn.py b/venv/lib/python3.10/site-packages/torch/utils/mkldnn.py new file mode 100644 index 0000000000000000000000000000000000000000..2d1d8cd89ff59d99c821ee3c8ec6b737e44a3b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/mkldnn.py @@ -0,0 +1,233 @@ +import torch + + +class MkldnnLinear(torch.jit.ScriptModule): + def __init__(self, dense_module, dtype): + super().__init__() + self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) + if dense_module.bias is not None: + # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy, + # we use fp32 dtype. + self.register_buffer('bias', dense_module.bias.to_mkldnn()) + else: + # TODO: Remove this once ScriptModule supports registering None buffer + self.register_buffer( + 'bias', + torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) + + @torch.jit.script_method + def __getstate__(self): + return (self.weight.to_dense(), self.bias.to_dense(), self.training) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.bias = state[1].to_mkldnn() + self.training = state[2] + + @torch.jit.script_method + def forward(self, x): + x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() + y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias) + y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() + return y + + +class _MkldnnConvNd(torch.jit.ScriptModule): + """Common base of MkldnnConv1d and MkldnnConv2d.""" + + __constants__ = ['stride', 'padding', 'dilation', 'groups'] + + def __init__(self, dense_module): + super().__init__() + + self.stride = dense_module.stride + self.padding = dense_module.padding + self.dilation = dense_module.dilation + self.groups = dense_module.groups + + if dense_module.bias is not None: + self.register_buffer('bias', dense_module.bias.to_mkldnn()) + else: + # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy, + # we use fp32 dtype. + # TODO: Remove this once ScriptModule supports registering None buffer + self.register_buffer( + 'bias', + torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) + + @torch.jit.script_method + def __getstate__(self): + return (self.weight.to_dense(), self.bias.to_dense(), self.training) + + @torch.jit.script_method + def forward(self, x): + return torch.mkldnn_convolution( + x, + self.weight, + self.bias, + self.padding, + self.stride, + self.dilation, + self.groups) + + +class MkldnnConv1d(_MkldnnConvNd): + def __init__(self, dense_module, dtype): + super().__init__(dense_module) + + self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.bias = state[1].to_mkldnn() + self.training = state[2] + + +class MkldnnConv2d(_MkldnnConvNd): + def __init__(self, dense_module, dtype): + super().__init__(dense_module) + + self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight( + dense_module.weight.to_mkldnn(dtype), + self.padding, + self.stride, + self.dilation, + self.groups)) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight( + state[0].to_mkldnn(), + self.padding, + self.stride, + self.dilation, + self.groups) + self.bias = state[1].to_mkldnn() + self.training = state[2] + +class MkldnnConv3d(_MkldnnConvNd): + def __init__(self, dense_module, dtype): + super().__init__(dense_module) + + self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv3d_weight( + dense_module.weight.to_mkldnn(dtype), + self.padding, + self.stride, + self.dilation, + self.groups)) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = torch._C._nn.mkldnn_reorder_conv3d_weight( + state[0].to_mkldnn(), + self.padding, + self.stride, + self.dilation, + self.groups) + self.bias = state[1].to_mkldnn() + self.training = state[2] + + +class MkldnnBatchNorm(torch.jit.ScriptModule): + __constants__ = ['exponential_average_factor', 'eps'] + + def __init__(self, dense_module): + super().__init__() + + assert not dense_module.training + assert dense_module.track_running_stats + assert dense_module.affine + + if dense_module.momentum is None: + self.exponential_average_factor = 0.0 + else: + self.exponential_average_factor = dense_module.momentum + self.eps = dense_module.eps + + self.register_buffer('weight', dense_module.weight.to_mkldnn()) + self.register_buffer('bias', dense_module.bias.to_mkldnn()) + self.register_buffer('running_mean', dense_module.running_mean.to_mkldnn()) + self.register_buffer('running_var', dense_module.running_var.to_mkldnn()) + + @torch.jit.script_method + def __getstate__(self): + weight = self.weight.to_dense() + bias = self.bias.to_dense() + running_mean = self.running_mean.to_dense() + running_var = self.running_var.to_dense() + return (weight, bias, running_mean, running_var, self.training) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.bias = state[1].to_mkldnn() + self.running_mean = state[2].to_mkldnn() + self.running_var = state[3].to_mkldnn() + self.training = state[4] + + @torch.jit.script_method + def forward(self, x): + return torch.batch_norm( + x, + self.weight, + self.bias, + self.running_mean, + self.running_var, + False, # training + self.exponential_average_factor, + self.eps, + False, # cuda_enabled + ) + +class MkldnnPrelu(torch.jit.ScriptModule): + def __init__(self, dense_module, dtype): + super().__init__() + self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) + + @torch.jit.script_method + def __getstate__(self): + return (self.weight.to_dense(), self.training) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.training = state[1] + + @torch.jit.script_method + def forward(self, x): + x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() + y_mkldnn = torch.prelu(x_mkldnn, self.weight) + y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() + return y + +def to_mkldnn(module, dtype=torch.float): + assert dtype in [torch.float, torch.bfloat16, torch.half], \ + "MKLDNN only support float, bfloat16, and half path now" + + def m_fn(m, d): + if isinstance(m, torch.nn.Linear): + return MkldnnLinear(m, d) + elif isinstance(m, torch.nn.Conv1d): + return MkldnnConv1d(m, d) + elif isinstance(m, torch.nn.Conv2d): + return MkldnnConv2d(m, d) + elif isinstance(m, torch.nn.Conv3d): + return MkldnnConv3d(m, d) + elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)): + # For batchnorm bf16 path, OneDNN requires weight and bias need fp32 dtype. + # so it doesn't need dtype argument. + return MkldnnBatchNorm(m) + elif isinstance(m, torch.nn.PReLU): + return MkldnnPrelu(m, d) + else: + return m + + def m_fn_rec(m, d): + new_m = m_fn(m, d) + for name, sub_m in m.named_children(): + setattr(new_m, name, m_fn_rec(sub_m, d)) + return new_m + + return m_fn_rec(module, dtype) diff --git a/venv/lib/python3.10/site-packages/torch/utils/mobile_optimizer.py b/venv/lib/python3.10/site-packages/torch/utils/mobile_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a801850ef7c225993831c222b846cea3def9ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/mobile_optimizer.py @@ -0,0 +1,135 @@ +"""This module contains utility method for mobile model optimization and lint.""" + +import torch +from enum import Enum +from torch._C import _MobileOptimizerType as MobileOptimizerType +from typing import Optional, Set, List, AnyStr + +class LintCode(Enum): + BUNDLED_INPUT = 1 + REQUIRES_GRAD = 2 + DROPOUT = 3 + BATCHNORM = 4 + +def optimize_for_mobile( + script_module: torch.jit.ScriptModule, + optimization_blocklist: Optional[Set[MobileOptimizerType]] = None, + preserved_methods: Optional[List[AnyStr]] = None, + backend: str = 'CPU') -> torch.jit.RecursiveScriptModule: + """ + Optimize a torch script module for mobile deployment. + + Args: + script_module: An instance of torch script module with type of ScriptModule. + optimization_blocklist: A set with type of MobileOptimizerType. When set is not passed, + optimization method will run all the optimizer pass; otherwise, optimizer + method will run the optimization pass that is not included inside optimization_blocklist. + preserved_methods: A list of methods that needed to be preserved when freeze_module pass is invoked + backend: Device type to use for running the result model ('CPU'(default), 'Vulkan' or 'Metal'). + Returns: + A new optimized torch script module + """ + if not isinstance(script_module, torch.jit.ScriptModule): + raise TypeError( + f'Got {type(script_module)}, but ScriptModule is expected.') + + if optimization_blocklist is None: + optimization_blocklist = set() + + if preserved_methods is None: + preserved_methods = [] + + # Convert potential byte arrays into strings (if there is any) to pass type checking + # Here we use a new name as assigning it back to preserved_methods will invoke + # mypy errors (i.e. List[AnyStr] = List[str]) + preserved_methods_str: List[str] = [str(method) for method in preserved_methods] + + bundled_inputs_attributes = _get_bundled_inputs_preserved_attributes(script_module, preserved_methods_str) + if all(hasattr(script_module, method) for method in bundled_inputs_attributes): + preserved_methods_str = list(set(preserved_methods_str + bundled_inputs_attributes)) + + non_exist_methods = [] + for method in preserved_methods_str: + if not hasattr(script_module, method): + non_exist_methods.append(method) + if non_exist_methods: + raise AttributeError( + f"The following methods to preserve do not exist in script_module: {', '.join(non_exist_methods)}") + + backend = backend.lower() + if backend == 'cpu': + optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile( + script_module._c, + optimization_blocklist, + preserved_methods_str) + elif backend == 'vulkan': + optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile( + script_module._c, + optimization_blocklist, + preserved_methods_str) + elif backend == 'metal': + optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str) + else: + raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'") + + return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module) + + +def generate_mobile_module_lints(script_module: torch.jit.ScriptModule): + """ + Generate a list of lints for a given torch script module. + + Args: + script_module: An instance of torch script module with type of ScriptModule. + + Returns: + lint_map: A list of dictionary that contains modules lints + """ + if not isinstance(script_module, torch.jit.ScriptModule): + raise TypeError( + f'Got {type(script_module)}, but ScriptModule is expected.') + + lint_list = [] + + if not hasattr(script_module, "_generate_bundled_inputs_for_forward"): + lint_list.append({"name": LintCode.BUNDLED_INPUT.name, "message": "No bundled input for forward, please add bundled inputs " + "before saving the module using torch.utils.bundled_inputs.augment_model_with_bundled_inputs."}) + + for name, param in script_module.named_parameters(): + if param.requires_grad: + lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": f"Param {name} requires grad, " + "please set torch.no_grad() to reduce memory usage and improve computation speed during " + "inference phase."}) + + op_names = torch.jit.export_opnames(script_module) + for op_name in op_names: + if "dropout" in op_name: + lint_list.append({"name": LintCode.DROPOUT.name, "message": "Operator {} exists, remember to call eval() before " + "saving the module.and call torch.utils.mobile_optimizer.optimize_for_mobile to drop dropout " + "operator.".format(op_name)}) + if "batch_norm" in op_name: + lint_list.append({"name": LintCode.BATCHNORM.name, "message": "Operator {} exists, remember to call eval() before " + "saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm " + "operator.".format(op_name)}) + + return lint_list + +def _get_bundled_inputs_preserved_attributes(script_module: torch.jit.ScriptModule, preserved_methods: List[str]) -> List[str]: + + bundled_inputs_attributes = [] + # Has bundled inputs for forward + if hasattr(script_module, 'get_all_bundled_inputs'): + bundled_inputs_attributes.append('get_all_bundled_inputs') + bundled_inputs_attributes.append('get_num_bundled_inputs') + + # Bundled inputs in module after the change that introduced bundled inputs for multiple functions + if hasattr(script_module, 'get_bundled_inputs_functions_and_info'): + bundled_inputs_attributes.append('get_bundled_inputs_functions_and_info') + all_info = script_module.get_bundled_inputs_functions_and_info() + for function_name in all_info: + if function_name not in preserved_methods: + bundled_inputs_attributes.append(function_name) + bundled_inputs_attributes.append("get_all_bundled_inputs_for_" + function_name) + bundled_inputs_attributes.append("_bundled_inputs_deflated_" + function_name) + + return bundled_inputs_attributes diff --git a/venv/lib/python3.10/site-packages/torch/utils/model_zoo.py b/venv/lib/python3.10/site-packages/torch/utils/model_zoo.py new file mode 100644 index 0000000000000000000000000000000000000000..e0c6004e23ea806a2c83e12cd2998e0279e0b16f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/model_zoo.py @@ -0,0 +1,2 @@ +# torchvision imports tqdm from here. +from torch.hub import tqdm, load_state_dict_from_url as load_url # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/torch/utils/show_pickle.py b/venv/lib/python3.10/site-packages/torch/utils/show_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..e83bed48e66699cba9aa915417919ee5e568ddbf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/show_pickle.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +import sys +import pickle +import struct +import pprint +import zipfile +import fnmatch +from typing import Any, IO, BinaryIO, Union + +__all__ = ["FakeObject", "FakeClass", "DumpUnpickler", "main"] + +class FakeObject: + def __init__(self, module, name, args): + self.module = module + self.name = name + self.args = args + # NOTE: We don't distinguish between state never set and state set to None. + self.state = None + + def __repr__(self): + state_str = "" if self.state is None else f"(state={self.state!r})" + return f"{self.module}.{self.name}{self.args!r}{state_str}" + + def __setstate__(self, state): + self.state = state + + @staticmethod + def pp_format(printer, obj, stream, indent, allowance, context, level): + if not obj.args and obj.state is None: + stream.write(repr(obj)) + return + if obj.state is None: + stream.write(f"{obj.module}.{obj.name}") + printer._format(obj.args, stream, indent + 1, allowance + 1, context, level) + return + if not obj.args: + stream.write(f"{obj.module}.{obj.name}()(state=\n") + indent += printer._indent_per_level + stream.write(" " * indent) + printer._format(obj.state, stream, indent, allowance + 1, context, level + 1) + stream.write(")") + return + raise Exception("Need to implement") + + +class FakeClass: + def __init__(self, module, name): + self.module = module + self.name = name + self.__new__ = self.fake_new # type: ignore[assignment] + + def __repr__(self): + return f"{self.module}.{self.name}" + + def __call__(self, *args): + return FakeObject(self.module, self.name, args) + + def fake_new(self, *args): + return FakeObject(self.module, self.name, args[1:]) + + +class DumpUnpickler(pickle._Unpickler): # type: ignore[name-defined] + def __init__( + self, + file, + *, + catch_invalid_utf8=False, + **kwargs): + super().__init__(file, **kwargs) + self.catch_invalid_utf8 = catch_invalid_utf8 + + def find_class(self, module, name): + return FakeClass(module, name) + + def persistent_load(self, pid): + return FakeObject("pers", "obj", (pid,)) + + dispatch = dict(pickle._Unpickler.dispatch) # type: ignore[attr-defined] + + # Custom objects in TorchScript are able to return invalid UTF-8 strings + # from their pickle (__getstate__) functions. Install a custom loader + # for strings that catches the decode exception and replaces it with + # a sentinel object. + def load_binunicode(self): + strlen, = struct.unpack(" sys.maxsize: + raise Exception("String too long.") + str_bytes = self.read(strlen) # type: ignore[attr-defined] + obj: Any + try: + obj = str(str_bytes, "utf-8", "surrogatepass") + except UnicodeDecodeError as exn: + if not self.catch_invalid_utf8: + raise + obj = FakeObject("builtin", "UnicodeDecodeError", (str(exn),)) + self.append(obj) # type: ignore[attr-defined] + dispatch[pickle.BINUNICODE[0]] = load_binunicode # type: ignore[assignment] + + @classmethod + def dump(cls, in_stream, out_stream): + value = cls(in_stream).load() + pprint.pprint(value, stream=out_stream) + return value + + +def main(argv, output_stream=None): + if len(argv) != 2: + # Don't spam stderr if not using stdout. + if output_stream is not None: + raise Exception("Pass argv of length 2.") + sys.stderr.write("usage: show_pickle PICKLE_FILE\n") + sys.stderr.write(" PICKLE_FILE can be any of:\n") + sys.stderr.write(" path to a pickle file\n") + sys.stderr.write(" file.zip@member.pkl\n") + sys.stderr.write(" file.zip@*/pattern.*\n") + sys.stderr.write(" (shell glob pattern for members)\n") + sys.stderr.write(" (only first match will be shown)\n") + return 2 + + fname = argv[1] + handle: Union[IO[bytes], BinaryIO] + if "@" not in fname: + with open(fname, "rb") as handle: + DumpUnpickler.dump(handle, output_stream) + else: + zfname, mname = fname.split("@", 1) + with zipfile.ZipFile(zfname) as zf: + if "*" not in mname: + with zf.open(mname) as handle: + DumpUnpickler.dump(handle, output_stream) + else: + found = False + for info in zf.infolist(): + if fnmatch.fnmatch(info.filename, mname): + with zf.open(info) as handle: + DumpUnpickler.dump(handle, output_stream) + found = True + break + if not found: + raise Exception(f"Could not find member matching {mname} in {zfname}") + + +if __name__ == "__main__": + # This hack works on every version of Python I've tested. + # I've tested on the following versions: + # 3.7.4 + if True: + pprint.PrettyPrinter._dispatch[FakeObject.__repr__] = FakeObject.pp_format # type: ignore[attr-defined] + + sys.exit(main(sys.argv)) diff --git a/venv/lib/python3.10/site-packages/torch/utils/throughput_benchmark.py b/venv/lib/python3.10/site-packages/torch/utils/throughput_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..5607fadee9e9c6d854491a9517fea4256ebe34f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/throughput_benchmark.py @@ -0,0 +1,159 @@ + +import torch._C + + +def format_time(time_us=None, time_ms=None, time_s=None): + """Define time formatting.""" + assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1 + + US_IN_SECOND = 1e6 + US_IN_MS = 1e3 + + if time_us is None: + if time_ms is not None: + time_us = time_ms * US_IN_MS + elif time_s is not None: + time_us = time_s * US_IN_SECOND + else: + raise AssertionError("Shouldn't reach here :)") + + if time_us >= US_IN_SECOND: + return f'{time_us / US_IN_SECOND:.3f}s' + if time_us >= US_IN_MS: + return f'{time_us / US_IN_MS:.3f}ms' + return f'{time_us:.3f}us' + + +class ExecutionStats: + def __init__(self, c_stats, benchmark_config): + self._c_stats = c_stats + self.benchmark_config = benchmark_config + + @property + def latency_avg_ms(self): + return self._c_stats.latency_avg_ms + + @property + def num_iters(self): + return self._c_stats.num_iters + + @property + def iters_per_second(self): + """Return total number of iterations per second across all calling threads.""" + return self.num_iters / self.total_time_seconds + + @property + def total_time_seconds(self): + return self.num_iters * ( + self.latency_avg_ms / 1000.0) / self.benchmark_config.num_calling_threads + + def __str__(self): + return '\n'.join([ + "Average latency per example: " + format_time(time_ms=self.latency_avg_ms), + f"Total number of iterations: {self.num_iters}", + f"Total number of iterations per second (across all threads): {self.iters_per_second:.2f}", + "Total time: " + format_time(time_s=self.total_time_seconds) + ]) + + +class ThroughputBenchmark: + """ + This class is a wrapper around a c++ component throughput_benchmark::ThroughputBenchmark. + + This wrapper on the throughput_benchmark::ThroughputBenchmark component is responsible + for executing a PyTorch module (nn.Module or ScriptModule) under an inference + server like load. It can emulate multiple calling threads to a single module + provided. In the future we plan to enhance this component to support inter and + intra-op parallelism as well as multiple models running in a single process. + + Please note that even though nn.Module is supported, it might incur an overhead + from the need to hold GIL every time we execute Python code or pass around + inputs as Python objects. As soon as you have a ScriptModule version of your + model for inference deployment it is better to switch to using it in this + benchmark. + + Example:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> from torch.utils import ThroughputBenchmark + >>> bench = ThroughputBenchmark(my_module) + >>> # Pre-populate benchmark's data set with the inputs + >>> for input in inputs: + ... # Both args and kwargs work, same as any PyTorch Module / ScriptModule + ... bench.add_input(input[0], x2=input[1]) + >>> # Inputs supplied above are randomly used during the execution + >>> stats = bench.benchmark( + ... num_calling_threads=4, + ... num_warmup_iters = 100, + ... num_iters = 1000, + ... ) + >>> print("Avg latency (ms): {}".format(stats.latency_avg_ms)) + >>> print("Number of iterations: {}".format(stats.num_iters)) + """ + + def __init__(self, module): + if isinstance(module, torch.jit.ScriptModule): + self._benchmark = torch._C.ThroughputBenchmark(module._c) + else: + self._benchmark = torch._C.ThroughputBenchmark(module) + + def run_once(self, *args, **kwargs): + """ + Given input id (input_idx) run benchmark once and return prediction. + + This is useful for testing that benchmark actually runs the module you + want it to run. input_idx here is an index into inputs array populated + by calling add_input() method. + """ + return self._benchmark.run_once(*args, **kwargs) + + def add_input(self, *args, **kwargs): + """ + Store a single input to a module into the benchmark memory and keep it there. + + During the benchmark execution every thread is going to pick up a + random input from the all the inputs ever supplied to the benchmark via + this function. + """ + self._benchmark.add_input(*args, **kwargs) + + def benchmark( + self, + num_calling_threads=1, + num_warmup_iters=10, + num_iters=100, + profiler_output_path=""): + """ + Run a benchmark on the module. + + Args: + num_warmup_iters (int): Warmup iters are used to make sure we run a module + a few times before actually measuring things. This way we avoid cold + caches and any other similar problems. This is the number of warmup + iterations for each of the thread in separate + + num_iters (int): Number of iterations the benchmark should run with. + This number is separate from the warmup iterations. Also the number is + shared across all the threads. Once the num_iters iterations across all + the threads is reached, we will stop execution. Though total number of + iterations might be slightly larger. Which is reported as + stats.num_iters where stats is the result of this function + + profiler_output_path (str): Location to save Autograd Profiler trace. + If not empty, Autograd Profiler will be enabled for the main benchmark + execution (but not the warmup phase). The full trace will be saved + into the file path provided by this argument + + + This function returns BenchmarkExecutionStats object which is defined via pybind11. + It currently has two fields: + - num_iters - number of actual iterations the benchmark have made + - avg_latency_ms - average time it took to infer on one input example in milliseconds + """ + config = torch._C.BenchmarkConfig() + config.num_calling_threads = num_calling_threads + config.num_warmup_iters = num_warmup_iters + config.num_iters = num_iters + config.profiler_output_path = profiler_output_path + c_stats = self._benchmark.benchmark(config) + return ExecutionStats(c_stats, config) diff --git a/venv/lib/python3.10/site-packages/torch/utils/weak.py b/venv/lib/python3.10/site-packages/torch/utils/weak.py new file mode 100644 index 0000000000000000000000000000000000000000..a5e33a34d7aac3489480b3383e6c76dbedf47c19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/weak.py @@ -0,0 +1,321 @@ +from __future__ import annotations + +import weakref +from weakref import ref +from _weakrefset import _IterationGuard # type: ignore[attr-defined] +from collections.abc import MutableMapping, Mapping +from torch import Tensor +import collections.abc as _collections_abc + + +WeakRef = ref + + +__all__ = ['TensorWeakRef', 'WeakIdRef', 'WeakIdKeyDictionary', 'WeakTensorKeyDictionary'] + + +# This file defines a variant of WeakKeyDictionary that overrides the hashing +# behavior of the key to use object identity, rather than the builtin +# __eq__/__hash__ functions. This is useful for Tensor weak keys, as their +# __eq__ implementation return a Tensor (elementwise equality), which means +# you can't use them directly with the WeakKeyDictionary in standard library. +# +# Our implementation strategy is to create a wrapper weak key object, which we +# use as a key in a stock Python dictionary. This is similar to how weakref +# implements WeakKeyDictionary, but instead of using weakref.ref as the +# wrapper, we use a custom wrapper that has different __eq__ and __hash__ +# behavior. Note that we subsequently store this weak key directly in an +# ORDINARY dictionary, since the newly constructed WeakIdKey's only use would +# be a dictionary so it would have no strong references. Ensuring that +# only live WeakIdKeys are in the map is handled by putting finalizers on the +# original key object. + + +# It is simpler to implement this with composition, but if we want to +# directly reuse the callback mechanism on weakref, we need the weakref +# and the key to be exactly the same object. Reusing the callback mechanism +# minimizes the divergence between our implementation and Lib/weakref.py +# +# NB: Prefer using this when working with weakrefs of Tensors; e.g., do +# WeakIdRef(tensor) rather than weakref.ref(tensor); it handles a number of +# easy to get wrong cases transparently for you. +class WeakIdRef(weakref.ref): + __slots__ = ['_id'] + + def __init__(self, key, callback=None): + # Unlike stock weakref, which preserves hash semantics of the + # original object but lazily defers hash calls until the first + # time the user attempts to hash the weakref, we can eagerly + # cache the id of the key as we know this is definitely the hash + # method + self._id = id(key) + super().__init__(key, callback) # type: ignore[call-arg] + + def __call__(self): + r = super().__call__() + # Special logic for Tensor PyObject resurrection + if hasattr(r, '_fix_weakref'): + r._fix_weakref() # type: ignore[union-attr] + return r + + def __hash__(self): + return self._id + + def __eq__(self, other): + # An attractive but wrong alternate implementation is to only test if + # the stored _ids match. This can lead to an ABA problem if you have: + # + # a1 = A() + # w1 = WeakIdRef(a1) + # del a1 + # a2 = A() # suppose it gets the same ID as a1 + # w2 = WeakIdRef(a2) + # print(w1 == w2) + # + # This should be False, as a1 and a2 are unrelated (and a1 is + # dead anyway) + a = self() + b = other() + if a is not None and b is not None: + return a is b + return self is other + +# This is the same as WeakIdRef but equality is checked using hash() rather than id. +# This will be equivalent to the one above except for classes where hash is not their id. +class _WeakHashRef(weakref.ref): + __slots__ = ['_id'] + + def __init__(self, key, callback=None): + # Unlike stock weakref, which preserves hash semantics of the + # original object but lazily defers hash calls until the first + # time the user attempts to hash the weakref, we can eagerly + # cache the id of the key as we know this is definitely the hash + # method + self._id = hash(key) + super().__init__(key, callback) # type: ignore[call-arg] + + def __call__(self): + r = super().__call__() + # Special logic for Tensor PyObject resurrection + if hasattr(r, '_fix_weakref'): + r._fix_weakref() # type: ignore[union-attr] + return r + + def __hash__(self): + return self._id + + def __eq__(self, other): + # Use hash equality to determine ref equality. + # ScriptObject implements __hash__ to return the wrapped IValue's id, so + # this is equivalent to doing an identity comparison. + a = self() + b = other() + if a is not None and b is not None: + return hash(a) == hash(b) + return self is other + +# This is directly adapted from cpython/Lib/weakref.py +class WeakIdKeyDictionary(MutableMapping): + def __init__(self, dict=None, ref_type=WeakIdRef): # CHANGED + self.data = {} + + self.ref_type = ref_type # CHANGED + + def remove(k, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(k) + else: + try: + del self.data[k] + except KeyError: + pass + self._remove = remove + # A list of dead weakrefs (keys to be removed) + self._pending_removals = [] + self._iterating = set() + self._dirty_len = False + if dict is not None: + self.update(dict) + + def _commit_removals(self): + # NOTE: We don't need to call this method before mutating the dict, + # because a dead weakref never compares equal to a live weakref, + # even if they happened to refer to equal objects. + # However, it means keys may already have been removed. + pop = self._pending_removals.pop + d = self.data + while True: + try: + key = pop() + except IndexError: + return + + try: + del d[key] + except KeyError: + pass + + def _scrub_removals(self): + d = self.data + self._pending_removals = [k for k in self._pending_removals if k in d] + self._dirty_len = False + + def __delitem__(self, key): + self._dirty_len = True + del self.data[self.ref_type(key)] # CHANGED + + def __getitem__(self, key): + return self.data[self.ref_type(key)] # CHANGED + + def __len__(self): + if self._dirty_len and self._pending_removals: + # self._pending_removals may still contain keys which were + # explicitly removed, we have to scrub them (see issue #21173). + self._scrub_removals() + return len(self.data) - len(self._pending_removals) + + def __repr__(self): + return f"<{self.__class__.__name__} at {id(self):#x}>" + + def __setitem__(self, key, value): + self.data[self.ref_type(key, self._remove)] = value # CHANGED + + def copy(self): + new = WeakIdKeyDictionary() + with _IterationGuard(self): + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = value + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + with _IterationGuard(self): + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = deepcopy(value, memo) + return new + + def get(self, key, default=None): + return self.data.get(self.ref_type(key), default) # CHANGED + + def __contains__(self, key): + try: + wr = self.ref_type(key) # CHANGED + except TypeError: + return False + return wr in self.data + + def items(self): + with _IterationGuard(self): + for wr, value in self.data.items(): + key = wr() + if key is not None: + yield key, value + + def keys(self): + with _IterationGuard(self): + for wr in self.data: + obj = wr() + if obj is not None: + yield obj + + __iter__ = keys + + def values(self): + with _IterationGuard(self): + for wr, value in self.data.items(): + if wr() is not None: + yield value + + def keyrefs(self): + """Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + return list(self.data) + + def popitem(self): + self._dirty_len = True + while True: + key, value = self.data.popitem() + o = key() + if o is not None: + return o, value + + def pop(self, key, *args): + self._dirty_len = True + return self.data.pop(self.ref_type(key), *args) # CHANGED + + def setdefault(self, key, default=None): + return self.data.setdefault(self.ref_type(key, self._remove), default) # CHANGED + + def update(self, dict=None, **kwargs): + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, value in dict.items(): + d[self.ref_type(key, self._remove)] = value # CHANGED + if len(kwargs): + self.update(kwargs) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.copy() + c.update(other) + return c + return NotImplemented + + def __ror__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.__class__() + c.update(other) + c.update(self) + return c + return NotImplemented + + # Default Mapping equality will tests keys for equality, but + # we want to test ids for equality + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return {id(k): v for k, v in self.items()} == {id(k): v for k, v in other.items()} + +# Convenience alias +WeakTensorKeyDictionary = WeakIdKeyDictionary + + +class TensorWeakRef: + """Wrapper around a weak ref of a Tensor that handles the _fix_weakref() call required when unwrapping a Tensor weakref.""" + + ref: WeakRef[Tensor] + + def __init__(self, tensor: Tensor): + assert isinstance(tensor, Tensor) + self.ref = weakref.ref(tensor) + + def __call__(self): + out = self.ref() + if out is None: + return out + assert isinstance(out, Tensor) + # TODO, add _fix_weakref type binding + out._fix_weakref() # type: ignore[attr-defined] + return out