diff --git a/ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a6b7f7c845eb75686584f30380802c103fa9c813 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b2cc573b61810c1df8dc2b03222bd65d297aac9ca5803f3d38f647eff438ca +size 16778411 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..27ef0d37de9fb07a9a32b58c3aae293d5475b9e3 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad7b30513b015be4692666eeb92c161196815ac041e1ebd79fe5dc0f1012f17f +size 33555612 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f06533799b79ba7fb085abaf1b74fe1e3f3f6f67 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44e31cb86eaee1ac6d969f86e5db612d9556fb4804e3d992f0d0a9e698be0117 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..671ff9542eb5cd932ff8f0f25a3966a1f47ee003 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ee5ad1b8c398cdd65429a14a609a6d9264b1c8099e07871ac553e74d86bc695 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4614ed77ab68563f6248385861e48932baec1c7c --- /dev/null +++ b/ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c42cd830428106a8dec190cb226a0e64959a8fb710a57f636f718bed1bcc16d9 +size 16778396 diff --git a/venv/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so b/venv/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so new file mode 100644 index 0000000000000000000000000000000000000000..6327e64186148838900fe219ac2ca2cd69d0e3da Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so differ diff --git a/venv/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so b/venv/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so new file mode 100644 index 0000000000000000000000000000000000000000..83dc8db4fda0a3eaf07e69b8f762989b2f92a3cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a02cd2947b293558104d3201b846358df0397d6f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c56f2fe2ab4e21ff94002336a307ab67b801766e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e1770e41633115bc9286ae0ca61da32afd90744 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6c94eb1702397334868779e0f2f9baad2d73919 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d17d375898b3bd0bad4db3d184ce11509b00573 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f031f8fd3e8db86650de0aaee4184d0a859182b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec9d77d4c2135a3ac131152e123613be42c89443 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43671286d9db918ae642716e2f5a62c65993e837 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..955c1ae8a7035a35f5461fb338542514fcf998b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6008605da4125014e711bae3799a115f65d3fb53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..667afcd501940ea992186b87c91215e1f9480baa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4840c444fae2f649375591bdf758ac7ad3d68bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70243090cc17568981844be32b368b175d4a094e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12433f84d1808ff92f8d9686146505320167d173 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5066a29485dfca924e2b495eed3f84677969c2ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad77b743e6a6737b08bd6edb812b702d82282c8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c268e43ca5bc25dcf9f99d7fea9e811428fbc44f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f1ee3f3430c48de45fb62921727562cc5ef158a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d701599dc25c4441ce1041b01872e4c86c5efed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70e07040a30cf3edb97ee27a0428b5072696e257 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..757c4cf5464f785c49307d6aa67718fc74b72aed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d46024b7e1544589cb5bf39b63e8ca6aa49f75 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a970d4ace076cca55575a8a11be20805335b8910 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..130f62dfda3dc591f90ba46af6be683d6768c5eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a842ce0a0e241f43779ebf5e06c8169d228aa0f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..389153e072b3daa2e9478db873ca4bfac34d016f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90bdefafa266559e1c8f4a5cf383f1399b511f32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dea8e578b6b5ef88971cd7fa51772915385622d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f6c4c8ed071c937876ecc3ddb69e6c891fc65a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6fba4b31f60bf750631af175748aa6c0650e938 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a17c4c10f3e1c564d0317dec2837a5c60bda5d1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ad5b51cc3922bb770d34e4d4f16e80c4830173 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..532d6404c867fe0c832683f3031fc74a24efb202 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..955b36286190532ac8be76293bf52b5ba222934c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdd9c0b00d130a4b982e50d4f4a26df727957c1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46fcc8d00d1978d18c0d14d66e55f7b0cce00b57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py new file mode 100644 index 0000000000000000000000000000000000000000..4e2a654562444085b0bfb4e380e4a6292ff80d8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py @@ -0,0 +1,369 @@ +# mypy: ignore-errors + +import torch +from torch.testing._internal.common_utils import TEST_WITH_ROCM + + +class AutocastTestLists: + def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype): + input = (torch.randn((n, n), device=dev, dtype=torch.float32),) + + hx = ((torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else + torch.randn((n, n), device=dev, dtype=torch.float32),) + + weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih + torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh + torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih + torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh + + # returns args as a tuple + return input + hx + weights + + # Supplies ops and arguments for test_autocast_* in test/test_cuda.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py. + # Each op is associated with a tuple of valid arguments. + # In addition, cudnn conv ops are not supported on ROCm and hence will + # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ] + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + # The remaining lists organize ops that autocast treats explicitly. + self.torch_fp16 = [ + # deprecated _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True)), + # the current _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True, True)), + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("conv_tbc", conv_args_fp32[0] + bias_fp32), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)), + ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1), + (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("prelu", pointwise0_fp32 + element0_fp32), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32), + ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32), + ("mm", mat0_fp32 + mat1_fp32), + ("mv", mat0_fp32 + pointwise0_fp32), + ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell. + # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)), + ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ] + self.torch_fp32 = [ + ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("cosh", pointwise0_fp16), + ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)), + ("exp", pointwise0_fp16), + ("expm1", pointwise0_fp16), + ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)), + ("reciprocal", pointwise0_fp16), + ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)), + ("sinh", pointwise0_fp16), + ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)), + # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API. + ("softmax", pointwise0_fp16 + (0,)), + ("log_softmax", pointwise0_fp16 + (0,)), + ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)), + ("group_norm", mat0_fp16 + (1,)), + ("norm", pointwise0_fp16), + ("norm", pointwise0_fp16, {"dim": 0}), + # these need magma + # ("norm", mat0_fp16, {"p": "nuc"}), + # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}), + ("norm", pointwise0_fp16, {"p": 1}), + ("norm", pointwise0_fp16, {"p": 1, "dim": 0}), + ("cosine_similarity", mat0_fp16 + mat1_fp16), + ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)), + ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16), + ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("cumprod", pointwise0_fp16 + (0,)), + ("cumsum", pointwise0_fp16 + (0,)), + ("dist", pointwise0_fp16 + pointwise1_fp16), + ("pdist", mat0_fp16), + ("cdist", mat0_fp16 + mat1_fp16), + ("prod", pointwise0_fp16), + ("prod", pointwise0_fp16 + (0,)), + ("renorm", mat0_fp16 + (2, 0, 1.0)), + ("sum", pointwise0_fp16), + ("sum", mat0_fp16 + (1,)), + ("logsumexp", mat0_fp16 + (1,)), + ] + self.torch_need_autocast_promote = [ + ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)), + ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16), + ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)), + ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev), + torch.randn((1, 2), dtype=torch.float32, device=dev), + torch.randn((1, 2, 2), dtype=torch.float16, device=dev), + torch.randn((1,), dtype=torch.float32, device=dev))), + ("cross", (torch.randn(3, dtype=torch.float32, device=dev), + torch.randn(3, dtype=torch.float16, device=dev))), + ("dot", pointwise0_fp16 + pointwise1_fp32), + ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev), + torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev), + 0, 0, False)), + ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float16))), + ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float32))), + ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float32, device=dev))), + ] + self.nn_fp16 = [ + ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32), + ] + self.nn_fp32 = [ + ("softplus", pointwise0_fp16), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_fp16 + mat1_fp16), + ("smooth_l1_loss", mat0_fp16 + mat1_fp16), + ("mse_loss", mat0_fp16 + mat1_fp16), + ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ] + self.linalg_fp16 = [ + ("linalg_vecdot", mat0_fp32 + mat0_fp32), + ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)), + ] + self.methods_fp16 = [ + ("__matmul__", mat0_fp32 + mat1_fp32) + ] + self.methods_fp32 = [ + ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)), + ] + self.banned = [ + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32), + torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn), + ] + +class AutocastCPUTestLists: + # Supplies ops and arguments for test_autocast_* in test/test_cpu.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + + dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n)) + + dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),) + for dimset in dummy_dimsets] + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev), + torch.randn(dimset, dtype=torch.bfloat16, device=dev)) + for dimset in dimsets] + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),) + for dimset in dummy_dimsets] + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py. + # Each op is associated with a tuple of valid arguments. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + # The remaining lists organize ops that autocast treats explicitly. + self.torch_16 = [ + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("mm", mat0_fp32 + mat1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32), + torch.randn((5, 3, 5), device=dev, dtype=torch.float32), + torch.randn(5, device=dev, dtype=torch.float32), + 0)), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("prelu", pointwise0_fp32 + element0_fp32), + ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32), + torch.randn((3 * n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n), device=dev, dtype=torch.float32))), + ] + self.torch_fp32 = [ + ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)), + ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16), + ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ] + self.nn_16 = [ + ("linear", mat0_fp32 + mat1_fp32, {}), + ] + self.nn_fp32 = [ + ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) + + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_bf16 + mat1_bf16), + ("smooth_l1_loss", mat0_bf16 + mat1_bf16), + ("mse_loss", mat0_bf16 + mat1_bf16), + ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ("huber_loss", mat0_bf16 + mat1_bf16), + ] + self.torch_need_autocast_promote = [ + ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1a872685514fd7a8bc4b77deb191ecd660cab0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py @@ -0,0 +1,632 @@ +# mypy: ignore-errors + +import torch +from functools import partial +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np + +# Note: [autograd.Function db] +# +# This is a collection of autograd.Function test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if a subsystem +# supports autograd.Function. +# +# Axes: +# - saves {output, input, intermediate, non-tensor} +# - {inputs, output} x {single tensor, tensors, arbitrary objects} +# - Uses {mark_dirty, mark_non_differentiable, once_differentiable} + + +def to_numpy(tensor): + return tensor.cpu().numpy() + + +class NumpyCube(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + dinput = torch.tensor(3 * input_np ** 2, device=input.device) + return torch.tensor(input_np ** 3, device=input.device), dinput + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(inputs[0], output[1]) + ctx.save_for_forward(inputs[0], output[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input) + + @staticmethod + def vmap(info, in_dims, input): + result = NumpyCube.apply(input) + return result, (in_dims[0], in_dims[0]) + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +class CubeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x): + return x ** 3, 3 * x ** 2 + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(inputs[0], outputs[1]) + ctx.save_for_forward(inputs[0], outputs[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + result = grad_output * dinput + 6 * dinput + return result + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(1, low=0.8, high=2), args=()) + + +class NumpyCubeNotComposable(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + return torch.tensor(input_np ** 3, device=input.device), input_np + + @staticmethod + def setup_context(ctx, inputs, output): + _, input_np = output + ctx.input_np = input_np + ctx.device = inputs[0].device + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_output, grad_saved): + result_np = 3 * (ctx.input_np ** 2) + return torch.tensor(result_np, device=ctx.device) + + +class NumpyMul(torch.autograd.Function): + @staticmethod + def forward(x, y): + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = NumpyMul.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = NumpyMul.apply(grad_output, x) + return gx, gy + + @staticmethod + def vmap(info, in_dims, x, y): + x_bdim, y_bdim = in_dims + x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) + y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) + result = NumpyMul.apply(x, y) + result = result.movedim(-1, 0) + return result, 0 + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + +def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Broadcasting + yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),)) + + +class MulGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x * y + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = MulGenVmap.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = MulGenVmap.apply(grad_output, x) + return gx, gy + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + + +class NumpyExp_(torch.autograd.Function): + @staticmethod + def forward(x): + x_np = to_numpy(x) + np.exp(x_np, x_np) + return x + + @staticmethod + def setup_context(ctx, inputs, output): + x, = inputs + ctx.mark_dirty(x) + ctx.save_for_backward(output) + ctx.save_for_forward(output) + + @staticmethod + def backward(ctx, grad_output): + output, = ctx.saved_tensors + return NumpyMul.apply(grad_output, output) + + @staticmethod + def vmap(info, in_dims, x): + NumpyExp_.apply(x) + return x, in_dims[0] + + @staticmethod + def jvp(ctx, x_tangent): + # Doesn't call numpy operations because I didn't want to write NumpyMul_ + output, = ctx.saved_tensors + x_tangent.mul_(output) + return x_tangent + +class NumpySort(torch.autograd.Function): + @staticmethod + def forward(x, dim): + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(x, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + + @staticmethod + def setup_context(ctx, inputs, output): + x, dim = inputs + _, ind, ind_inv = output + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def vmap(info, in_dims, x, dim): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 0) + # wrap dim + dim = dim if dim >= 0 else dim + x.dim() - 1 + return NumpySort.apply(x, dim + 1), (0, 0, 0) + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + +class SortGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, dim): + device = x.device + ind = torch.argsort(x, dim=dim) + ind_inv = torch.argsort(ind, axis=dim) + result = torch.take_along_dim(x, ind, dim=dim) + return result, ind, ind_inv + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, dim = inputs + _, ind, ind_inv = outputs + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + + +def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(1,)) + + +def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor = make_arg(3, 5) + dim = 1 + _, ind, ind_inv = NumpySort.apply(tensor, 1) + yield SampleInput(tensor, args=(ind, ind_inv, dim)) + + +class NumpyTake(torch.autograd.Function): + @staticmethod + def forward(x, ind, ind_inv, dim): + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + + @staticmethod + def setup_context(ctx, inputs, output): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def vmap(info, in_dims, x, ind, ind_inv, dim): + x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims + + # wrap dim + logical_dim = x.dim() if x_bdim is None else x_bdim - 1 + dim = dim if dim >= 0 else dim + logical_dim + + def expand_bdim(x, x_bdim): + if x_bdim is None: + return x.expand(info.batch_size, *x.shape) + return x.movedim(x_bdim, 0) + + x = expand_bdim(x, x_bdim) + ind = expand_bdim(ind, ind_bdim) + ind_inv = expand_bdim(ind_inv, ind_inv_bdim) + + return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0 + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + assert ind_tangent is None + assert ind_inv_tangent is None + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim) + +class TakeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, ind, ind_inv, dim): + return torch.take_along_dim(x, ind, dim) + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim) + +class Select(torch.autograd.Function): + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return Select.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return Select.apply(x_tangent, ctx.idx) + +class SelectGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def jvp(ctx, x_tangent, _): + return SelectGenVmap.apply(x_tangent, ctx.idx) + + +def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(2,)) + +class ScaleGradGenVmap(torch.autograd.Function): + generate_vmap_rule = True + scale = 3.14 + + @staticmethod + def forward(x): + return x.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ScaleGradGenVmap.scale + + @staticmethod + def jvp(ctx, x_tangent): + return x_tangent * ScaleGradGenVmap.scale + +class ZeroGradientsGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x.clone(), y.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + # Intentionally too-large gradient + torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + @staticmethod + def jvp(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + +def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5)) + + +class ForwardHasDefaultArgs(torch.autograd.Function): + @staticmethod + def forward(x, idx=(2,)): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return ForwardHasDefaultArgs.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx) + + +autograd_function_db = [ + OpInfo( + 'NumpyCubeAutogradFunction', + op=NumpyCube.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyExpMarkDirtyAutogradFunction', + op=lambda x: NumpyExp_.apply(x.clone()), + inplace_variant=NumpyExp_.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulAutogradFunction', + op=NumpyMul.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyCubeNotComposableAutogradFunction', + op=lambda x: NumpyCubeNotComposable.apply(x)[0], + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortAutogradFunction', + op=NumpySort.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'NumpyTakeAutogradFunction', + op=NumpyTake.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SelectAutogradFunction', + op=Select.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'CubeGenVmapAutogradFunction', + op=CubeGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'MulGenVmapAutogradFunction', + op=MulGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SortGenVmapAutogradFunction', + op=SortGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'SelectGenVmapAutogradFunction', + op=SelectGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ScaleGradGenVmapAutogradFunction', + op=ScaleGradGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ZeroGradientsGenVmapAutogradFunction', + op=ZeroGradientsGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ForwardHasDefaultArgsAutogradFunction', + op=ForwardHasDefaultArgs.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_forward_default_args, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py new file mode 100644 index 0000000000000000000000000000000000000000..661614ffc80937930d4d2709adaab0669b4ebe00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py @@ -0,0 +1,165 @@ +# mypy: ignore-errors + +import os +import re +import sys +from typing import List + +__all__ = [ + "check_code_for_cuda_kernel_launches", + "check_cuda_kernel_launches", +] + +# FILES TO EXCLUDE (match is done with suffix using `endswith`) +# You wouldn't drive without a seatbelt, though, so why would you +# launch a kernel without some safety? Use this as a quick workaround +# for a problem with the checker, fix the checker, then de-exclude +# the files in question. +exclude_files: List[str] = [] + +# Without using a C++ AST we can't 100% detect kernel launches, so we +# model them as having the pattern "<<>>(arguments);" +# We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be +# the next statement. +# +# We model the next statement as ending at the next `}` or `;`. +# If we see `}` then a clause ended (bad) if we see a semi-colon then +# we expect the launch check just before it. +# +# Since the kernel launch can include lambda statements, it's important +# to find the correct end-paren of the kernel launch. Doing this with +# pure regex requires recursive regex, which aren't part of the Python +# standard library. To avoid an additional dependency, we build a prefix +# regex that finds the start of a kernel launch, use a paren-matching +# algorithm to find the end of the launch, and then another regex to +# determine if a launch check is present. + +# Finds potential starts of kernel launches +kernel_launch_start = re.compile( + r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE +) + +# This pattern should start at the character after the final paren of the +# kernel launch. It returns a match if the launch check is not the next statement +has_check = re.compile( + r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE +) + +def find_matching_paren(s: str, startpos: int) -> int: + """Given a string "prefix (unknown number of characters) suffix" + and the position of the first `(` returns the index of the character + 1 past the `)`, accounting for paren nesting + """ + opening = 0 + for i, c in enumerate(s[startpos:]): + if c == '(': + opening += 1 + elif c == ')': + opening -= 1 + if opening == 0: + return startpos + i + 1 + + raise IndexError("Closing parens not found!") + + +def should_exclude_file(filename) -> bool: + for exclude_suffix in exclude_files: + if filename.endswith(exclude_suffix): + return True + return False + + +def check_code_for_cuda_kernel_launches(code, filename=None): + """Checks code for CUDA kernel launches without cuda error checks. + + Args: + filename - Filename of file containing the code. Used only for display + purposes, so you can put anything here. + code - The code to check + + Returns: + The number of unsafe kernel launches in the code + """ + if filename is None: + filename = "##Python Function Call##" + + # We break the code apart and put it back together to add + # helpful line numberings for identifying problem areas + code = enumerate(code.split("\n")) # Split by line breaks + code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines + code = '\n'.join(code) # Put it back together + + num_launches_without_checks = 0 + for m in kernel_launch_start.finditer(code): + end_paren = find_matching_paren(code, m.end() - 1) + if has_check.match(code, end_paren): + num_launches_without_checks += 1 + context = code[m.start():end_paren + 1] + print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr) + + return num_launches_without_checks + + +def check_file(filename): + """Checks a file for CUDA kernel launches without cuda error checks + + Args: + filename - File to check + + Returns: + The number of unsafe kernel launches in the file + """ + if not (filename.endswith((".cu", ".cuh"))): + return 0 + if should_exclude_file(filename): + return 0 + with open(filename) as fo: + contents = fo.read() + unsafeCount = check_code_for_cuda_kernel_launches(contents, filename) + return unsafeCount + + +def check_cuda_kernel_launches(): + """Checks all pytorch code for CUDA kernel launches without cuda error checks + + Returns: + The number of unsafe kernel launches in the codebase + """ + torch_dir = os.path.dirname(os.path.realpath(__file__)) + torch_dir = os.path.dirname(torch_dir) # Go up to parent torch + torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2 + + kernels_without_checks = 0 + files_without_checks = [] + for root, dirnames, filenames in os.walk(torch_dir): + # `$BASE/build` and `$BASE/torch/include` are generated + # so we don't want to flag their contents + if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"): + # Curtail search by modifying dirnames and filenames in place + # Yes, this is the way to do this, see `help(os.walk)` + dirnames[:] = [] + continue + + for x in filenames: + filename = os.path.join(root, x) + file_result = check_file(filename) + if file_result > 0: + kernels_without_checks += file_result + files_without_checks.append(filename) + + if kernels_without_checks > 0: + count_str = f"Found {kernels_without_checks} instances in " \ + f"{len(files_without_checks)} files where kernel " \ + "launches didn't have checks." + print(count_str, file=sys.stderr) + print("Files without checks:", file=sys.stderr) + for x in files_without_checks: + print(f"\t{x}", file=sys.stderr) + print(count_str, file=sys.stderr) + + return kernels_without_checks + + +if __name__ == "__main__": + unsafe_launches = check_cuda_kernel_launches() + sys.exit(0 if unsafe_launches == 0 else 1) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..48c8e4dabbbe8159b183e557ec4b09d29c93eaa3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py @@ -0,0 +1,1322 @@ +# mypy: ignore-errors + +import faulthandler +import logging +import multiprocessing +import os +import queue +import subprocess +import sys +import tempfile +import threading +import time +import traceback +import types +import unittest +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import timedelta +from enum import Enum +from functools import partial, reduce, wraps +from io import StringIO +from typing import Dict, NamedTuple, Optional, Union +from unittest.mock import patch + +import torch +import torch._dynamo.test_case +import torch.cuda.nccl +import torch.distributed as c10d +import torch.nn as nn +from torch.testing._internal.common_utils import ( + FILE_SCHEMA, + find_free_port, + IS_SANDCASTLE, + retry_on_connect_failures, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, + TEST_WITH_ROCM, + TEST_WITH_TSAN, + TestCase, +) +from torch.testing._internal.common_utils import ( + parametrize, + subtest, +) +from torch.testing._internal.distributed.multi_threaded_pg import ( + _install_threaded_pg, + _uninstall_threaded_pg, + ProcessLocalGroup, +) +import operator + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class TestSkip(NamedTuple): + exit_code: int + message: str + + +TEST_SKIPS = { + "backend_unavailable": TestSkip( + 72, "Skipped because distributed backend is not available." + ), + "small_worldsize": TestSkip(73, "Skipped due to small world size."), + "odd_worldsize": TestSkip(87, "Skipped due to odd world size."), + "no_cuda": TestSkip(74, "CUDA is not available."), + "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"), + "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"), + "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"), + "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"), + "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"), + "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"), + "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"), + "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"), + "nccl": TestSkip(76, "c10d not compiled with NCCL support"), + "skipIfRocm": TestSkip(78, "Test skipped for ROCm"), + "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"), + "generic": TestSkip( + 86, "Test skipped at subprocess level, look at subprocess log for skip reason" + ), + "importerror": TestSkip(88, "Test skipped due to missing import"), +} + + +@dataclass +class DistTestCases: + # Backends that do not support a specific collective + skip_collective = {} + skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"} + skip_collective["reduce"] = set() + skip_collective["sendrecv anysource"] = {"nccl", "ucc"} + skip_collective["cpu barrier"] = {"nccl", "ucc"} + + # Sets showing that something is implemented + backend_feature = {} + backend_feature["gpu"] = {"nccl", "gloo", "ucc"} + backend_feature["cuda"] = {"nccl", "gloo", "ucc"} + backend_feature["ddp"] = {"nccl", "gloo", "ucc"} + backend_feature["subgroup"] = {"nccl", "gloo", "ucc"} + backend_feature["plugin"] = set() + + +def skip_if_no_gpu(func): + """Skips if the world size exceeds the number of GPUs, ensuring that if the + test is run, each rank has its own GPU via ``torch.cuda.device(rank)``.""" + + @wraps(func) + def wrapper(*args, **kwargs): + if not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + world_size = int(os.environ["WORLD_SIZE"]) + if torch.cuda.device_count() < world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_small_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2: + sys.exit(TEST_SKIPS["small_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_odd_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1: + sys.exit(TEST_SKIPS["odd_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def require_n_gpus_for_nccl_backend(n, backend): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < n: + sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code) + else: + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def import_transformers_or_skip(): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + from transformers import ( # noqa: F401 + AutoModelForMaskedLM, + BertConfig, + ) + + return func(*args, **kwargs) + except ImportError: + sys.exit(TEST_SKIPS["importerror"].exit_code) + + return wrapper + + return decorator + + +def skip_if_lt_x_gpu(x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +# This decorator helps avoiding initializing cuda while testing other backends +def nccl_skip_if_lt_x_gpu(backend, x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend != "nccl": + return func(*args, **kwargs) + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +def verify_ddp_error_logged(model_DDP, err_substr): + # Verify error was logged in ddp_logging_data. + ddp_logging_data = model_DDP._get_ddp_logging_data() + assert "iteration" in ddp_logging_data + assert "has_error" in ddp_logging_data + assert "error" in ddp_logging_data + logging_err = ddp_logging_data["error"] + # Remove C++ stacktrace if needed. + actual = ( + err_substr + if err_substr.find("\nException raised from ") == -1 + else err_substr.split("\nException raised from ")[0] + ) + assert ( + actual in logging_err + ), f"Did not find expected {actual} in ddp logging data error: {logging_err}" + + +def with_nccl_blocking_wait(func): + """ + Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of + this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for + the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and + TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING + try: + cached_nccl_async_error_handling: Union[str, None] = os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] + del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] + except KeyError: + # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset + cached_nccl_async_error_handling = None + + # Save val of TORCH_NCCL_BLOCKING_WAIT and set it. + try: + cached_nccl_blocking_wait: Union[str, None] = os.environ[ + "TORCH_NCCL_BLOCKING_WAIT" + ] + except KeyError: + cached_nccl_blocking_wait = None + finally: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + + try: + ret = func(*args, **kwargs) + return ret + finally: + # restore old values. + if cached_nccl_async_error_handling is not None: + os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] = cached_nccl_async_error_handling + + if cached_nccl_blocking_wait is not None: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait + + return wrapper + + +def with_dist_debug_levels(levels): + """ + Runs a test for each distributed debug level specified in levels. + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None) + for level in levels: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = level + c10d.set_debug_level_from_env() + ret = func(*args, **kwargs) + c10d.barrier() + if old_level is not None: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level + # Only returns test return for last test, but since these are + # unittests the return value is not really used and earlier tests + # would've raised had they failed. + return ret + + return wrapper + + return decorator + + +def requires_gloo(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_gloo_available(), + "c10d was not compiled with the Gloo backend", + ) + + +def requires_nccl_version(version, msg): + if not c10d.is_nccl_available(): + return skip_but_pass_in_sandcastle( + "c10d was not compiled with the NCCL backend", + ) + else: + return skip_but_pass_in_sandcastle_if( + torch.cuda.nccl.version() < version, + "Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format( + version, torch.cuda.nccl.version(), msg + ), + ) + + +def requires_nccl(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_nccl_available(), + "c10d was not compiled with the NCCL backend", + ) + +def requires_ucc(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_ucc_available(), + "c10d was not compiled with the UCC backend", + ) + +def requires_mpi(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_mpi_available(), + "c10d was not compiled with the MPI backend", + ) + + +def skip_if_rocm(func): + """Skips a test for ROCm""" + func.skip_if_rocm = True + + @wraps(func) + def wrapper(*args, **kwargs): + if not TEST_WITH_ROCM: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS["skipIfRocm"].exit_code) + + return wrapper + + +def skip_if_win32(): + return skip_but_pass_in_sandcastle_if( + sys.platform == "win32", + "This unit test case is not supported on Windows platform", + ) + + +@retry_on_connect_failures +def create_tcp_store( + addr="localhost", + world_size=1, + is_master=True, + timeout=timedelta(minutes=5), + wait_for_workers=True, + jit_class=False, + use_libuv=False +): + """ + Creates a TCP store. Retries if the chosen port is already in use. + """ + port = find_free_port() + if jit_class: + timeout_millisecond = int(timeout / timedelta(milliseconds=1)) + return torch.classes.dist_c10d.TCPStore( + addr, port, world_size, is_master, timeout_millisecond + ) + else: + return c10d.TCPStore( + addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv + ) + + +if TEST_WITH_TSAN: + # TSAN runs much slower. + TIMEOUT_DEFAULT = 500 +else: + TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300')) +TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400} + + +# https://github.com/pytorch/pytorch/issues/75665 +if TEST_WITH_ROCM: + TIMEOUT_OVERRIDE["test_join_kwargs"] = 200 + + +def create_device(interface=None): + if sys.platform == "win32" or interface is None: + return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1") + else: + return c10d.ProcessGroupGloo.create_device(interface=interface) + + +def get_timeout(test_id) -> int: + return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT) + + +@contextmanager +def captured_output(): + new_out, new_err = StringIO(), StringIO() + old_out, old_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = new_out, new_err + yield sys.stdout, sys.stderr + finally: + sys.stdout, sys.stderr = old_out, old_err + + +def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1): + """ + Generate a number of basic test cases for sparse reduction. + These cover tensors with a varying number of sparse dimensions and a varying + number of dense dimensions. The only reduction operation we support is sum. + """ + + def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0): + # First sparse dimension is [0..rank]. + # Subsequent dimensions are always 0, so we know there is + # a non-empty intersection between any two sparse tensors. + indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1)) + shape = [world_size] + [2 for _ in range(dense_dims)] + for _ in range(sparse_dims - 1): + indices = torch.cat((indices, torch.zeros(1, rank + 1))) + shape.append(world_size) + values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)]) + return torch.sparse_coo_tensor(indices, values, shape) + + def compute_sum(fn, world_size: int): + return reduce( + operator.add, [fn(rank, world_size) for rank in range(world_size)] + ) + + return [ + ( + [ + fn(num_inputs * rank + i, num_inputs * world_size) + for i in range(num_inputs) + ], + [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)], + ) + for fn in [ + partial(generate, sparse_dims=1), + partial(generate, sparse_dims=2), + partial(generate, sparse_dims=3), + partial(generate, dense_dims=1), + partial(generate, dense_dims=2), + partial(generate, dense_dims=3), + ] + ] + + +# HELPER FOR MULTIGPU TESTS +def init_multigpu_helper(world_size: int, backend: str): + """Multigpu tests are designed to simulate the multi nodes with multi + GPUs on each node. Nccl backend requires equal #GPUs in each process. + On a single node, all visible GPUs are evenly + divided to subsets, each process only uses a subset. + """ + nGPUs = torch.cuda.device_count() + visible_devices = range(nGPUs) + + # If rank is less than or equal to number of available GPU's + # then each rank can be mapped to corresponding GPU. + nGPUs_per_process = 1 + if world_size > nGPUs: + nGPUs_per_process = nGPUs // world_size + rank_to_GPU = { + i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]) + for i in range(world_size) + } + return rank_to_GPU + + +tmp_dir: Optional[tempfile.TemporaryDirectory] = None + + +def initialize_temp_directories(init_method: Optional[str] = None) -> None: + global tmp_dir + tmp_dir = tempfile.TemporaryDirectory() + os.environ["TEMP_DIR"] = tmp_dir.name + os.mkdir(os.path.join(tmp_dir.name, "barrier")) + os.mkdir(os.path.join(tmp_dir.name, "test_dir")) + init_dir_path = os.path.join(tmp_dir.name, "init_dir") + os.mkdir(init_dir_path) + # Set init method if specified. + if init_method is not None: + os.environ["INIT_METHOD"] = init_method + else: + os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join( + init_dir_path, "shared_init_file" + ) + + +def cleanup_temp_dir() -> None: + if tmp_dir is not None: + tmp_dir.cleanup() + + +# Most tests operate with this worldsize +DEFAULT_WORLD_SIZE = 4 + +# [How does MultiProcessTestCase work?] +# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by +# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an +# example which inherits from this class. Its `Setup()` methods calls into +# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` +# subprocesses. During the spawn, the main process passes the test name to +# subprocesses, and the name is acquired from self.id(). The subprocesses +# then use the provided test function name to retrieve the function attribute +# from the test instance and run it. The main process simply waits for all +# subprocesses to join. + + +class MultiProcessTestCase(TestCase): + MAIN_PROCESS_RANK = -1 + # This exit code is used to indicate that the test code had an error and + # exited abnormally. There are certain tests that might use sys.exit() to + # simulate failures and in those cases, we can't have an exit code of 0, + # but we still want to ensure we didn't run into any other errors. + TEST_ERROR_EXIT_CODE = 10 + + # do not early terminate for distributed tests. + def _should_stop_test_suite(self) -> bool: + return False + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_PROCESS_RANK: + self._join_processes(fn) + else: + fn() + + return types.MethodType(wrapper, self) + + # The main process spawns N subprocesses that run the test. + # Constructor patches current instance test method to + # assume the role of the main process and join its subprocesses, + # or run the underlying test function. + def __init__(self, method_name: str = "runTest") -> None: + super().__init__(method_name) + fn = getattr(self, method_name) + setattr(self, method_name, self.join_or_run(fn)) + + def setUp(self) -> None: + super().setUp() + self.skip_return_code_checks = [] # type: ignore[var-annotated] + self.processes = [] # type: ignore[var-annotated] + self.rank = self.MAIN_PROCESS_RANK + self.file_name = tempfile.NamedTemporaryFile(delete=False).name + # pid to pipe consisting of error message from process. + self.pid_to_pipe = {} # type: ignore[var-annotated] + + def tearDown(self) -> None: + super().tearDown() + for p in self.processes: + p.terminate() + # Each Process instance holds a few open file descriptors. The unittest + # runner creates a new TestCase instance for each test method and keeps + # it alive until the end of the entire suite. We must thus reset the + # processes to prevent an effective file descriptor leak. + self.processes = [] + + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def _start_processes(self, proc) -> None: + self.processes = [] + for rank in range(int(self.world_size)): + parent_conn, child_conn = torch.multiprocessing.Pipe() + process = proc( + target=self.__class__._run, + name="process " + str(rank), + args=(rank, self._current_test_name(), self.file_name, child_conn), + ) + process.start() + logger.info("Started process %s with pid %s", rank, process.pid) + self.pid_to_pipe[process.pid] = parent_conn + self.processes.append(process) + + def _spawn_processes(self) -> None: + proc = torch.multiprocessing.get_context("spawn").Process + self._start_processes(proc) + + class Event(Enum): + GET_TRACEBACK = 1 + + @staticmethod + def _event_listener(parent_pipe, signal_pipe, rank: int): + logger.info("Starting event listener thread for rank %s", rank) + while True: + ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe]) + + if parent_pipe in ready_pipes: + + if parent_pipe.closed: + logger.info( + "Pipe closed for process %s, stopping event listener thread", rank + ) + return + + event = parent_pipe.recv() + logger.info("Received event %s on process %s", event, rank) + + if event == MultiProcessTestCase.Event.GET_TRACEBACK: + # Return traceback to the parent process. + with tempfile.NamedTemporaryFile(mode="r+") as tmp_file: + faulthandler.dump_traceback(tmp_file) + # Flush buffers and seek to read from the beginning + tmp_file.flush() + tmp_file.seek(0) + parent_pipe.send(tmp_file.read()) + + logger.info("Process %s sent traceback", rank) + + if signal_pipe in ready_pipes: + return + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + def run_test(self, test_name: str, parent_pipe) -> None: + # Start event listener thread. + signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False) + event_listener_thread = threading.Thread( + target=MultiProcessTestCase._event_listener, + args=(parent_pipe, signal_recv_pipe, self.rank), + daemon=True, + ) + event_listener_thread.start() + if sys.platform != "win32" and sys.platform != "darwin": + # Register signal handler to dump stack traces on FATALs. + # Windows and MacOS do not support the signal handlers. + torch._C._set_print_stack_traces_on_fatal_signal(True) + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + # self.id() == e.g. '__main__.TestDistributed.test_get_rank' + # We're retrieving a corresponding test and executing it. + try: + getattr(self, test_name)() + except unittest.SkipTest as se: + logger.info( + "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se) + ) + sys.exit(TEST_SKIPS["generic"].exit_code) + except Exception as e: + logger.error( + "Caught exception: \n%s exiting " + "process %s with exit code: %s", + traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ) + # Send error to parent process. + parent_pipe.send(traceback.format_exc()) + sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE) + finally: + if signal_send_pipe is not None: + signal_send_pipe.send(None) + + assert event_listener_thread is not None + event_listener_thread.join() + # Close pipe after done with test. + parent_pipe.close() + + def _get_timedout_process_traceback(self) -> None: + pipes = [] + for i, process in enumerate(self.processes): + if process.exitcode is None: + pipe = self.pid_to_pipe[process.pid] + try: + pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK) + pipes.append((i, pipe)) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", i, e + ) + + # Wait for results. + for rank, pipe in pipes: + try: + # Wait for traceback + if pipe.poll(5): + if pipe.closed: + logger.info( + "Pipe closed for process %s, cannot retrieve traceback", rank + ) + continue + + traceback = pipe.recv() + logger.error( + "Process %s timed out with traceback: \n\n%s", rank, traceback + ) + else: + logger.error( + "Could not retrieve traceback for timed out process: %s", rank + ) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", rank, e + ) + + def _join_processes(self, fn) -> None: + timeout = get_timeout(self.id()) + start_time = time.time() + subprocess_error = False + try: + while True: + # check to see if any subprocess exited with an error early. + for (i, p) in enumerate(self.processes): + # This is the exit code processes exit with if they + # encountered an exception. + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE: + print( + f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes." + ) + active_children = torch.multiprocessing.active_children() + for ac in active_children: + ac.terminate() + subprocess_error = True + break + if subprocess_error: + break + # All processes have joined cleanly if they all a valid exitcode + if all(p.exitcode is not None for p in self.processes): + break + # Check if we should time out the test. If so, we terminate each process. + elapsed = time.time() - start_time + if elapsed > timeout: + self._get_timedout_process_traceback() + print( + f"Timing out after {timeout} seconds and killing subprocesses." + ) + for p in self.processes: + p.terminate() + break + # Sleep to avoid excessive busy polling. + time.sleep(0.1) + + elapsed_time = time.time() - start_time + + if fn in self.skip_return_code_checks: + self._check_no_test_errors(elapsed_time) + else: + self._check_return_codes(elapsed_time) + finally: + # Close all pipes + for pipe in self.pid_to_pipe.values(): + pipe.close() + + def _check_no_test_errors(self, elapsed_time) -> None: + """ + Checks that we didn't have any errors thrown in the child processes. + """ + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} timed out after {elapsed_time} seconds" + ) + self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode) + + def _check_return_codes(self, elapsed_time) -> None: + """ + Checks that the return codes of all spawned processes match, and skips + tests if they returned a return code indicating a skipping condition. + """ + # If no processes are spawned, there is nothing to check. + if not self.processes: + logger.warning("Note: no subprocesses were spawned, test was likely skipped.") + return + + first_process = self.processes[0] + # first, we check if there are errors in actual processes + # (via TEST_ERROR_EXIT CODE), and raise an exception for those. + # the reason we do this is to attempt to raise a more helpful error + # message than "Process x terminated/timed out" + # TODO: we should pipe the exception of the failed subprocess here. + # Currently, the actual exception is displayed as a logging output. + errored_processes = [ + (i, p) + for i, p in enumerate(self.processes) + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ] + if errored_processes: + error = "" + for i, process in errored_processes: + # Get error from pipe. + error_message = self.pid_to_pipe[process.pid].recv() + error += ( + "Process {} exited with error code {} and exception:\n{}\n".format( + i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message + ) + ) + + raise RuntimeError(error) + # If no process exited uncleanly, we check for timeouts, and then ensure + # each process exited cleanly. + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} terminated or timed out after {elapsed_time} seconds" + ) + self.assertEqual( + p.exitcode, + first_process.exitcode, + msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format( + i, first_process.exitcode, p.exitcode + ), + ) + for skip in TEST_SKIPS.values(): + if first_process.exitcode == skip.exit_code: + if IS_SANDCASTLE: + # Don't use unittest.skip to skip the test on sandcastle + # since it creates tasks for skipped tests assuming there + # is some follow-up needed. Instead just "pass" the test + # with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + self.assertEqual( + first_process.exitcode, + 0, + msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}", + ) + + @property + def is_master(self) -> bool: + return self.rank == 0 + + +# Cannot use functools.cache as it requires python 3.9 +EFA_PROBE_RESULT = None + + +def has_efa() -> bool: + """ + If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has + Libfabric EFA interfaces and EFA software components installed, + see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html. + """ + global EFA_PROBE_RESULT + if EFA_PROBE_RESULT is not None: + return EFA_PROBE_RESULT + + try: + EFA_PROBE_RESULT = ( + subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0 + ) + except FileNotFoundError: + EFA_PROBE_RESULT = False + return EFA_PROBE_RESULT + + +def tp_transports(): + """ + If the machine has Libfabric EFA interfaces and EFA software components installed it may cause + 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe + uses InfiniBand transport, so we exclude it from tensorpipe transports, + see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022 + """ + return ["shm", "uv"] if has_efa() else None + + +def spawn_threads_and_init_comms( + func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE +): + """ + Wrapper to use with a test method + """ + if func is None: + return partial( + spawn_threads_and_init_comms, timeout=timeout, world_size=world_size + ) + + + def _run_test_method_with_multi_threads(world_size, callback): + world = _install_threaded_pg() + global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + def worker(rank, world_pg, store): + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=store + ) + try: + callback() + except BaseException as ex: + # Exceptions are handled in MultiThreadedTestCase + MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + if world_is_valid(): + c10d.destroy_process_group() + + threads = [] + for rank in range(world_size): + t = threading.Thread(target=worker, args=(rank, world, global_store)) + t.start() + threads.append(t) + + return threads + + + @wraps(func) + def wrapper(self, *args, **kwargs): + # TODO: get test name from kwargs + torch._C._distributed_c10d._set_thread_isolation_mode(True) + try: + threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs)) + # join and error handling + MultiThreadedTestCase._join_threads(threads, func) + finally: + torch._C._distributed_c10d._set_thread_isolation_mode(False) + + return wrapper + + +class MultiThreadedTestCase(TestCase): + """ + Test runner that runs all tests with the in-proc process group using + multiple threads with the threaded process group. + + Each test spawns world_size threads and run the test method in each thread. + + Difference from regular MultiProcess test runner: + Must explicitly defines SetUp and call self._spawn_threads() to run the tests. + Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown) + to set up / tear down each thread when running each test. + No global state possible + How bad of a limitation is this? + """ + exception_queue = queue.Queue() + + MAIN_THREAD_RANK = -1 + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_THREAD_RANK: + self._join_threads(self.threads, fn) + else: + fn() + + return types.MethodType(wrapper, self) + + def __init__(self, method_name: str = "runTest") -> None: + super().__init__(method_name) + test_fn = getattr(self, method_name, None) + setattr(self, method_name, self.join_or_run(test_fn)) + + def perThreadSetUp(self): + # super().setUp() # TestCase.setUp() calls torch.manual_seed() + pass + + def perThreadTearDown(self): + pass + + def setUp(self) -> None: + """ + setUp only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadSetUp + """ + super().setUp() + self.rank = self.MAIN_THREAD_RANK + self.threads = [] + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + def tearDown(self): + """ + tearDown only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadTearDown + """ + super().tearDown() + self.threads = [] + + def _spawn_threads(self): + """ + class method to spawn threads and run test, use this method in the SetUp of your TestCase + """ + torch._C._distributed_c10d._set_thread_isolation_mode(True) + test_name = self._current_test_name + # for each test case, we need to create thread local world, and a global store + world = _install_threaded_pg() + self.__class__.global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + if not world_is_valid(): + raise RuntimeError("Invalid world") + + for rank in range(self.world_size): + t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size)) + t.start() + self.threads.append(t) + + @classmethod + def _run(cls, test_name, rank, world_size): + self = cls(test_name) + self.rank = rank + + # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make + # every thread have the same value. This would be relevant when we use op db tests, where it + # needs those states to be set i.e. using instantiate_device_type_tests() + # TODO: figure out a better way to do this + if hasattr(self, "_tls"): + self._tls = threading.local() + self._tls.precision = TestCase._precision + self._tls.rel_tol = TestCase._rel_tol + + self.run_test_with_threaded_pg(test_name, rank, world_size) + + def run_test_with_threaded_pg(self, test_name, rank, world_size): + """ + Run the current test associated with `test_name` using the threaded process group. + """ + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store + ) + self.perThreadSetUp() + + try: + getattr(self, test_name)() + except BaseException as ex: + self.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + c10d.destroy_process_group() + self.perThreadTearDown() + + + @classmethod + def _join_threads(cls, threads, fn): + timeout = TIMEOUT_DEFAULT + try: + for idx, thread in enumerate(threads): + thread.join(max(0, timeout)) + if thread.is_alive(): + MultiThreadedTestCase.exception_queue.put( + ( + idx, + ( + TimeoutError, + TimeoutError( + f"Rank failed to join in under {timeout} seconds" + ), + None, + ), + ) + ) + ProcessLocalGroup.reset() + failed_ranks = [] + while not cls.exception_queue.empty(): + failure = cls.exception_queue.get() + failed_ranks.append(failure) + finally: + _uninstall_threaded_pg() + torch._C._distributed_c10d._set_thread_isolation_mode(False) + + cls._check_return_codes(failed_ranks, timeout, fn) + + @classmethod + def _check_return_codes(cls, failed_ranks, timeout, fn): + # Print based on exceptions raised from threads + # SkipTest: print info for each thread + # TimeoutError: raise RuntimeError for any timed out thread + # Normal Exception: print error for each thread that raises exception + # and raise a RuntimeError + error_msg = "" + skip_code = -1 + for rank, exc_info in failed_ranks: + exc = exc_info[1] + if isinstance(exc, unittest.SkipTest): + logger.info( + "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc) + ) + if skip_code < 0: + skip_code = TEST_SKIPS["generic"].exit_code + elif isinstance(exc, TimeoutError): + msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n" + logger.error(msg) + raise RuntimeError(msg) + elif isinstance(exc, Exception): + msg = "".join(traceback.format_exception(*exc_info)) + logger.error( + "Caught exception: \n%s exiting thread %s", msg, rank + ) + error_msg += ( + f"Thread {rank} exited with exception:\n{msg}\n" + ) + elif isinstance(exc, SystemExit): + if type(exc.code) == int and skip_code < 0: + skip_code = exc.code + + # check exceptions + if len(error_msg) > 0: + raise RuntimeError(error_msg) + # check skip + if skip_code > 0: + for skip in TEST_SKIPS.values(): + if skip_code == skip.exit_code: + if IS_SANDCASTLE: + # "pass" the test with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", fn, skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + @property + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def assertEqualOnRank(self, x, y, msg=None, *, rank=0): + """ + The reason why we have this util function instead of + self.assertEqual is all threads are sharing one CPU RNG + so the assertion result is only reliable on rank 0 + """ + if self.rank == rank: + self.assertEqual(x, y, msg) + + def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0): + if self.rank == rank: + self.assertNotEqual(x, y) + + +class SaveForwardInputsModule(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.l = nn.Linear(100, 100) + self.forward_inputs = forward_inputs + self.cast_forward_inputs = cast_forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x) + + +class SaveForwardInputsModel(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.forward_inputs = forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.c2(self.c1(x)) + +@contextmanager +def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True): + # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase, + # Just manually implement the most important part of the dynamo behavior to reset/clear. + torch.cuda.set_device(rank) + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '6789' + if init_pg: + c10d.init_process_group("nccl", rank=rank, world_size=world_size) + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + try: + yield + finally: + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + if init_pg: + c10d.destroy_process_group() + + +class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase): + """ + Test harness for single-process dynamo distributed tests, + initializes dist process group. + + Prefer this for simple tests, as it's easier to debug. + """ + + @classmethod + def setUpClass(cls): + super().setUpClass() + # _exit_stack is set up in TestCase + cls._exit_stack.enter_context( + patch.dict( + os.environ, + { + "MASTER_ADDR": "localhost", + "MASTER_PORT": "12355", + }, + ) + ) + cls.rank = 0 + cls.device = f"cuda:{cls.rank}" + cls.device_ids = None if "cuda" in cls.device else [cls.rank] + c10d.init_process_group("nccl", rank=cls.rank, world_size=1) + + @classmethod + def tearDownClass(cls): + c10d.destroy_process_group() + super().tearDownClass() + + +class DynamoDistributedMultiProcTestCase(MultiProcessTestCase): + """ + Use this for tests that actually run on multiple GPUs. + + Decorate tests with @skip_if_lt_x_gpu(ngpu) + + Note: MultiProcTestCase spawns processes per test and is slow. + Prefer MultiThreadedTestCase for most tests. Perhaps use this one + sparingly for integration tests. + """ + def setUp(self): + super().setUp() + self._spawn_processes() + + def tearDown(self): + super().tearDown() + try: + os.remove(self.file_name) + except OSError: + pass + + @property + def world_size(self) -> int: + return torch.cuda.device_count() + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: + # The rest is copypasta from MultiProcessTestCase._run + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + +# NOTE [test parametrization utils for native funcol migration] +# +# Between the time we switch to the native funcol by default and the time when +# we are confident that we can remove the legacy implementation, we want to +# ensure that the legacy funcol remains covered by unit tests. This is to +# prepare for any potential (but unlikely) reverts. The following utilities +# help achieve this goal. +# +# run_with_{native,legacy}_funcol - mark a test to run with only +# {native,legacy} funcol. These decorators are for impl specific tests (e.g. +# verifying generated code with FileCheck). +# +# run_with_both_funcol_impls - parametrize a test to run with both legacy and +# native funcol. +# +# run_with_both_funcol_impls_with_arg - same as run_with_both_funcol_impls, but +# passes `enable_native_funcol` to the test so impl specific checks can be +# carried out. +def with_native_funcol(use_native_funcol: bool, remove_arg: bool): + import torch.distributed._functional_collectives_impl as funcol_impl + + def decorator(fn): + def inner(*args, **kwargs): + if remove_arg: + del kwargs["use_native_funcol"] + with patch.object(funcol_impl, '_use_native_funcol', new=use_native_funcol): + return fn(*args, **kwargs) + + return inner + + return decorator + + +run_with_native_funcol = with_native_funcol(True, remove_arg=False) +run_with_legacy_funcol = with_native_funcol(False, remove_arg=False) + + +run_with_both_funcol_impls = parametrize( + "use_native_funcol", + [ + subtest(True, decorators=[with_native_funcol(True, remove_arg=True)]), + subtest(False, decorators=[with_native_funcol(False, remove_arg=True)]), + ] +) + +run_with_both_funcol_impls_with_arg = parametrize( + "use_native_funcol", + [ + subtest(True, decorators=[with_native_funcol(True, remove_arg=False)]), + subtest(False, decorators=[with_native_funcol(False, remove_arg=False)]), + ] +) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..087cc854e87908d64a2473211393c8edf96d7420 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py @@ -0,0 +1,1441 @@ +# Owner(s): ["oncall: distributed"] + +import contextlib +import itertools +import os +import re +import sys +import warnings +from abc import ABC, abstractmethod +from contextlib import nullcontext +from copy import deepcopy +from enum import auto, Enum +from functools import partial, wraps +from typing import ( + Any, + Callable, + Dict, + List, + no_type_check, + Optional, + Tuple, + Type, + Union, +) +from unittest import mock + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.distributed._composable.fsdp._fsdp_param_group import ( + FSDPParamGroup, + RegisterPostBackwardFunction, +) +from torch.distributed._tensor import distribute_tensor, DTensor, Shard +from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP +from torch.distributed.fsdp._common_utils import TrainingState +from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES +from torch.distributed.fsdp.fully_sharded_data_parallel import ( + BackwardPrefetch, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler +from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap +from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer +from torch.nn.parallel.distributed import DistributedDataParallel as DDP +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + TEST_SKIPS, +) +from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms +from torch.utils._triton import has_triton + + +class FSDPInitMode(Enum): + # No FSDP wrapping + NO_FSDP = auto() + # FSDP recursive wrapping + RECURSIVE = auto() + # TODO: FSDP non-recursive wrapping + # NONRECURSIVE = auto() + + +class CUDAInitMode(Enum): + # Move model to CUDA before passing to the FSDP constructor + CUDA_BEFORE = auto() + # Move model to CUDA after passing to the FSDP constructor + CUDA_AFTER = auto() + # Keep on CPU + CUDA_NEVER = auto() + + +class FSDPTestModel(nn.Module, ABC): + """This defines the interface expected from all models used commonly for + FSDP unit tests.""" + + @abstractmethod + def get_input(self, device) -> Tuple[torch.Tensor, ...]: + """Returns an input for the model as as tuple.""" + ... + + @abstractmethod + def get_loss(self, input, output) -> torch.Tensor: + """Returns the loss given the input and output.""" + ... + + @abstractmethod + def run_backward(self, loss) -> None: + """Runs the backward pass (e.g. including ``loss.backward()``).""" + ... + + @staticmethod + @abstractmethod + def init(*args: Any, **kwargs: Any) -> nn.Module: + """Initializes an instance of this model.""" + ... + + +def _assert_module_states( + model: nn.Module, + process_group: dist.ProcessGroup, + assert_fn: Callable, +): + """ + All-gathers module states across ranks and calls ``assert_fn`` on each pair + of corresponding states from rank 0 and a nonzero rank. For example, if + ``assert_fn`` is ``self.assertEqual()``, then this checks that all module + states are equal across ranks. + """ + # Include names for debugging convenience + named_module_states = [ + (param_name, param.detach().cpu()) + for param_name, param in model.named_parameters() + ] + named_module_states += [ + (buffer_name, buffer.detach().cpu()) + for buffer_name, buffer in model.named_buffers() + ] + world_size = dist.get_world_size(process_group) + olist = [None for _ in range(world_size)] + dist.all_gather_object(olist, named_module_states, group=process_group) + rank0_states = olist[0] + assert rank0_states is not None # mypy + for state in olist[1:]: + assert state is not None # mypy + for (_, p1), (_, p2) in zip(rank0_states, state): + assert_fn(p1, p2) + + +def _zero_model( + model: nn.Module, + zero_buffers: bool = False, + summon_full=True, +): + """Zeros the parameters and optionally buffers of ``model`` in place.""" + ctx = FSDP.summon_full_params(model) if summon_full else nullcontext() + with ctx: + for param in model.parameters(): + with torch.no_grad(): + param.zero_() + if zero_buffers: + for buffer in model.buffers(): + with torch.no_grad(): + buffer.zero_() + + +def _get_state_dict(model, cpu_offload=False, half=False): + if not cpu_offload: + model = model.cuda() + if half: + model.half() + + return model.state_dict() + + +def subtest_name(test_name_mapping, *args): + return "_".join( + [test_name_mapping[str(s)] if s is not None else "none" for s in args] + ) + + +def _broadcast_state_dict(rank, state_dict): + # For non-FSDP roots, some parts of the model state on rank 0 may + # not be on CPU, so we move everything to CPU to avoid issues like: + # https://github.com/pytorch/pytorch/issues/77113. + for param_name, param in state_dict.items(): + if param.device != torch.device("cpu"): + state_dict[param_name] = param.cpu() + + olist = [state_dict if rank == 0 else None] + dist.broadcast_object_list(olist) + state_dict = olist[0] + # Ensure that the state is on CUDA + for param_name in state_dict.keys(): + state_dict[param_name] = state_dict[param_name].cuda() + return state_dict + + +def get_full_params(model: nn.Module, recurse: bool = True): + """ + Returns the full unsharded parameters of ``model``. Any FSDP-managed + parameters offloaded to CPU are moved to GPU in the returned list. + + Args: + recurse (bool): If ``False``, only unshards the parameters immediate to + ``model``; if ``True``, recurses through the module hierarchy + rooted at ``model``. + """ + with FSDP.summon_full_params(model, recurse=recurse): + return deepcopy(list(model.parameters())) + + +def _maybe_cuda(model: nn.Module, move_to_cuda: bool): + return model.cuda() if move_to_cuda else model + + +def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs): + return model if not wrap_fsdp else FSDP(model, *args, **kwargs) + + +class DummyProcessGroup: + def __init__(self, rank: int, size: int): + self._rank = rank + self._size = size + + def rank(self) -> int: + return self._rank + + def size(self) -> int: + return self._size + + def allreduce(self, *args, **kwargs): + dist_wait = mock.Mock() + + def get_future(): + future: torch.futures.Future = torch.futures.Future() + future.set_result(1) + return future + + dist_wait.get_future = get_future + return dist_wait + + +class TransformerWithSharedParams(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + cuda_init_mode: CUDAInitMode, + add_bn: bool, + deterministic: bool, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + if deterministic: + torch.manual_seed(0) + d_vocab = 23 + d_model = 16 + + self.embed_tokens = nn.Embedding(d_vocab, d_model) + self.transformer = nn.Transformer( + d_model=d_model, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=8, + dropout=0.1, + ) + self.output_proj = nn.Linear(d_model, d_vocab) + + # share the embedding and output projection weights + self.output_proj.weight = self.embed_tokens.weight + self.register_buffer( + "vocab_bias", self.embed_tokens.weight.new_ones((d_model,)) + ) + self.register_buffer( + "long_buffer", + torch.zeros_like(self.vocab_bias, dtype=torch.long), + ) # type: ignore[arg-type] + + self.bs = 2 + self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity() + if cuda_init_mode == CUDAInitMode.CUDA_BEFORE: + self = self.cuda() + if deterministic: + self.eval() + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + src = torch.arange(12, device=device).view(6, self.bs) # T x B + tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B + return (src, tgt) + + def forward(self, src_ids, tgt_ids): + src = self.embed_tokens(src_ids) + src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator] + tgt = self.embed_tokens(tgt_ids) + tgt = self.bn(tgt) + x = self.transformer(src, tgt) + return self.output_proj(x) + + def get_loss(self, input, output): + _, tgt = input + return nn.functional.cross_entropy( + output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum" + ) + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + add_bn: bool = True, + ) -> Union[nn.Module, FSDP]: + """ + Initializes a :class:`TransformerWithSharedParams` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps with + top-level FSDP. By default, the top-level FSDP uses the + ``ModuleWrapPolicy`` for encoder and decoder layers, but a + different auto wrap policy may be specified via + ``fsdp_kwargs``. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + add_bn (bool): Whether to include batch norm in the model. + """ + + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + if isinstance(group, tuple): + pg = group[0] + else: + pg = group + return TransformerWithSharedParams( + pg, cuda_init_mode, add_bn, deterministic + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Default to the `ModuleWrapPolicy` + if "auto_wrap_policy" not in fsdp_kwargs: + auto_wrap_policy = ModuleWrapPolicy( + { + TransformerEncoderLayer, + TransformerDecoderLayer, + } + ) + else: + auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy") + + if ( + "sharding_strategy" in fsdp_kwargs + and fsdp_kwargs["sharding_strategy"] + in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2} + and not isinstance(group, tuple) + ): + fsdp_pg = None + else: + fsdp_pg = group + + if isinstance(group, tuple): + tformer_pg = group[0] + else: + tformer_pg = group + + m = TransformerWithSharedParams( + tformer_pg, cuda_init_mode, add_bn, deterministic + ) + fsdp_model = FSDP( + m, + fsdp_pg, + auto_wrap_policy=auto_wrap_policy, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + def get_ignored_modules(self): + return [self.transformer] + + +class NestedWrappedModule(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ) + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + return (torch.rand(4, 8, device=device),) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = output.sum() + return loss + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ) -> nn.Module: + """ + Initializes a :class:`NestedWrappedModule` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP but not the top-level module. The model may + later be wrapped with a top-level FSDP external to this method + if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return NestedWrappedModule( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = NestedWrappedModule( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class AlwaysWrapNestedWrappedModule(NestedWrappedModule): + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this + wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap + policy. + """ + model = super( + AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule + ).init( + group=group, + fsdp_init_mode=FSDPInitMode.NO_FSDP, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + ) + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + fsdp_kwargs = fsdp_kwargs or {} + fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + + +class NonUniformReqGradNWM(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super(NestedWrappedModule, self).__init__() + # This `__init__` only differs from `NestedWrappedModule.__init__` in that + # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential` + # container. This arrangement results in all elements of the last two parameters + # residing on a single rank. Freezing all parameters except those two allows us + # to verify that `ShardedGradScaler` accommodates situations where some ranks + # have no (non-zero sized) parameter shards. + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap( + nn.Sequential( + _maybe_cuda(nn.Linear(16, 4), move_to_cuda), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ), + ), + ) + + @staticmethod + def _set_nonuniform_req_grad(model, req_grad_mask) -> None: + for n, p in model.named_parameters(): + if not re.match(req_grad_mask, n): + p.requires_grad_(False) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential` + container to enable the desired non-uniform ``requires_grad`` + ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP`` + init modes, freezes all parameters except the last two to validate + ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in + FSDP ``use_orig_params=True`` mode. + """ + # The parameters that should remain unfrozen are in `module.2.1`. The regex + # pattern below matches the relevant parameter names both with and without + # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present. + req_grad_pattern = re.compile(r"module\.2.*\.1.*") + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + ddp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern) + return ddp_model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + if fsdp_kwargs is None: + fsdp_kwargs = {} + fsdp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern) + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class ModuleWithDelay(FSDPTestModel): + """This class wraps a :class:`FSDPTestModel` to optionally add a delay + after computing the loss and/or before the gradient reduction.""" + + def __init__( + self, + module: nn.Module, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + ): + super().__init__() + self.delay_after_loss_ms = delay_after_loss_ms + self.delay_before_reduction_ms = delay_before_reduction_ms + self.module = module + + def get_input(self, device): + return self.module.get_input(device) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = self.module.get_loss(input, output) + if self.delay_after_loss_ms > 0: + torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms())) + return loss + + def run_backward(self, loss): + orig_reduce_scatter = torch.distributed.reduce_scatter_tensor + + def _delayed_reduce_scatter(*args, **kwargs): + if self.delay_before_reduction_ms > 0: + torch.cuda._sleep( + int(self.delay_before_reduction_ms * get_cycles_per_ms()) + ) + return orig_reduce_scatter(*args, **kwargs) + + with mock.patch( + "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter + ): + self.module.run_backward(loss) + + @staticmethod + def init( + module_class: Type[FSDPTestModel], + *model_args: Any, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + **model_kwargs: Any, + ): + """ + Args: + module_class (Type[FSDPTestModel]): Wrapped module class to which + to add delays. + model_args: Positional arguments forwarded to the ``module_class`` + ``init()``. + delay_after_loss_ms (int): Delay after computing the loss/before + the optimizer step (in ms). + delay_before_reduction_ms (int): Delay before reduce-scattering + gradients (in ms). + model_kwargs: Keyword arguments forwarded to the ``module_class`` + ``init()``. + """ + return ModuleWithDelay( + module_class.init(*model_args, **model_kwargs), + delay_after_loss_ms, + delay_before_reduction_ms, + ) + + +class NestedWrappedModuleWithDelay(ModuleWithDelay): + @staticmethod + def init( # type: ignore[override] + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_after_loss_ms: int = 0, + delay_before_reduction_ms: int = 0, + ): + return ModuleWithDelay.init( + NestedWrappedModule, + group=group, + fsdp_init_mode=fsdp_init_mode, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + delay_after_loss_ms=delay_after_loss_ms, + delay_before_reduction_ms=delay_before_reduction_ms, + ) + + +class DummyDDP(nn.Module): + def __init__(self, module): + super().__init__() + self.module = module + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + +class MixtureOfExperts(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + delay_before_free_ms: int, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__( + group=group, + wrap_fsdp=wrap_fsdp, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + self.group = group + self.delay_before_free_ms = delay_before_free_ms + self.wrap_fsdp = wrap_fsdp + self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + if deterministic: + # Give each rank different expert parameters + torch.manual_seed(42 + self.rank) + d_expert = 23 + d_shared = 12 + d_input = 8 + expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda) + + self.num_expert_params = sum([p.numel() for p in expert.parameters()]) + for p in expert.parameters(): + p.expert = True # type: ignore[attr-defined] + + if deterministic: + # Keep all other parameters the same across ranks + torch.manual_seed(0) + + shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda) + + if wrap_fsdp: + # we create a process group of size 1 for the expert params + expert_group = torch.distributed.new_group( + [group.rank()] + ) # world size 1 means no shard + expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment] + shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment] + + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda), + shared, + expert, + _maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda), + ) + + def forward(self, x): + if self.delay_before_free_ms > 0: + expert = self.module[2] + if isinstance(expert, FSDP): + orig_reshard = torch.distributed.fsdp._runtime_utils._reshard + + def _delayed_reshard(*args, **kwargs): + torch.cuda._sleep( + int(self.delay_before_free_ms * get_cycles_per_ms()) + ) + return orig_reshard(*args, **kwargs) + + # This patch covers any `import torch..._reshard` uses. + with mock.patch( + "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard + ): + return self.module(x) + + return self.module(x) + + def run_backward(self, loss): + loss.backward() + # Manually reduce gradients if not wrapped in FullyShardedDataParallel + if not self.wrap_fsdp: + with torch.no_grad(): + for p in self.parameters(): + if hasattr(p, "expert"): + continue # these params don't need grad reduction + if p.grad is not None: + p.grad.div_(self.world_size) + torch.distributed.all_reduce(p.grad, group=self.group) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_before_free_ms: int = 0, + ): + """ + Initializes a :class:`MixtureOfExperts` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP, including the expert and shared layers, but + not the top-level module. The model may later be wrapped with a + top-level FSDP external to this method if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + delay_before_free_ms (int): Delay before resharding expert + parameters in the forward pass (in ms). + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return MixtureOfExperts( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = MixtureOfExperts( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class MLP(nn.Module): + def __init__( + self, + dim: int, + device: Optional[torch.device] = None, + with_buffer: bool = False, + dim_multiplier: int = 4, + ): + super().__init__() + self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device) + self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device) + if with_buffer: + self.register_buffer("buffer", torch.randn((dim,), device=device)) + else: + self.buffer = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + z = self.in_proj(x) + z = F.relu(z) + z = self.out_proj(z) + z = F.relu(z) + if self.buffer is not None: + z = z + self.buffer + return z + + def reset_parameters(self): + if self.buffer is not None: + torch.nn.init.normal_(self.buffer) + + +class DoubleLinear(nn.Module): + """ + This can be used for returning multiple outputs from a module + (``use_second_linear=True``) or for having an unused module (``False``). + """ + + def __init__(self, dim: int, use_second_linear: bool = True): + super().__init__() + self.lin1 = nn.Linear(dim, dim) + self.lin2 = nn.Linear(dim, dim) + self.relu = nn.ReLU() + self.use_second_linear = use_second_linear + + def forward( + self, x: torch.Tensor + ) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]: + if self.use_second_linear: + return self.relu(self.lin1(x)), self.relu(self.lin2(x)) + return self.relu(self.lin1(x)) + + +@contextlib.contextmanager +def patch_all_gather(new_all_gather_into_tensor: Callable): + orig_all_gather = dist.all_gather_into_tensor + dist.all_gather_into_tensor = new_all_gather_into_tensor + try: + yield + finally: + dist.all_gather_into_tensor = orig_all_gather + + +@contextlib.contextmanager +def patch_reduce_scatter(new_reduce_scatter_tensor: Callable): + orig_reduce_scatter = dist.reduce_scatter_tensor + dist.reduce_scatter_tensor = new_reduce_scatter_tensor + try: + yield + finally: + dist.reduce_scatter_tensor = orig_reduce_scatter + + +@no_type_check +@contextlib.contextmanager +def patch_unshard(new_unshard: Callable): + orig_unshard = FSDPParamGroup.unshard + FSDPParamGroup.unshard = new_unshard + try: + yield + finally: + FSDPParamGroup.unshard = orig_unshard + + +@no_type_check +@contextlib.contextmanager +def patch_post_backward(new_post_backward: Callable): + orig_post_backward = FSDPParamGroup.post_backward + FSDPParamGroup.post_backward = new_post_backward + try: + yield + finally: + FSDPParamGroup.post_backward = orig_post_backward + + +@no_type_check +@contextlib.contextmanager +def patch_register_post_backward_hook_backward(new_backward: Callable): + orig_backward = RegisterPostBackwardFunction.backward + RegisterPostBackwardFunction.backward = new_backward + try: + yield + finally: + RegisterPostBackwardFunction.backward = orig_backward + + +def reduce_scatter_with_assert( + cls, + orig_reduce_scatter: Callable, + assert_fn: Callable, # `assert_fn(output: Tensor)` + *args: Any, + **kwargs: Any, +): + if len(args) > 0: + output = args[0] + elif "output" in kwargs: + output = kwargs["output"] + else: + raise AssertionError( + f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}" + ) + assert_fn(output) + return orig_reduce_scatter(*args, **kwargs) + + +def check_sharded_parity( + cls, # unit test class + replicated_module: nn.Module, + sharded_module: nn.Module, + prefixes_to_ignore: Tuple[str, ...] = (), +): + for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip( + replicated_module.named_parameters(), sharded_module.named_parameters() + ): + clean_sharded_name = sharded_name + for prefix in prefixes_to_ignore: + clean_sharded_name = clean_sharded_name.replace(prefix, "") + cls.assertEqual(replicated_name, clean_sharded_name) + cls.assertIsInstance(sharded_param, DTensor) + assert isinstance(sharded_param, DTensor) # mypy + mesh, placements = sharded_param.device_mesh, sharded_param.placements + if tuple(placements) == (Shard(0), Shard(0)): + raise AssertionError( + "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), " + "so we cannot check for equality using it" + ) + sharded_ref_param = distribute_tensor(replicated_param, mesh, placements) + cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local()) + if replicated_param.grad is None: + cls.assertIsNone(sharded_param.grad) + continue + cls.assertIsNotNone(sharded_param.grad) + sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements) + cls.assertIsInstance(sharded_param.grad, DTensor) + assert isinstance(sharded_param.grad, DTensor) # mypy + cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local()) + + +def run_subtests( + cls_inst, + subtest_config: Dict[str, List[Any]], + test_fn: Callable, + *test_args, + **test_kwargs: Any, +): + """ + Runs a test function given by ``test_fn`` as a subtest according to the + configurations specified by ``subtest_config``. This amortizes the + costly setup overhead (including process spawn and initializing the + process group) over the subtests. + + Args: + subtest_config (Dict[str, List[Any]]): A mapping from subtest + keyword argument name to a list of its possible values. + test_fn (Callable): A callable that runs the actual test. + test_args: Positional arguments to pass to ``test_fn``. + test_kwargs: Keyword arguments to pass to ``test_fn``. + """ + # Convert the config mapping to a list to have a fixed order + subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items()) + subtest_config_keys: List[str] = [item[0] for item in subtest_config_items] + subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items] + for values in itertools.product(*subtest_config_values): + # Map keyword to chosen value + subtest_kwargs = dict(zip(subtest_config_keys, values)) + with cls_inst.subTest(**subtest_kwargs): + test_fn(*test_args, **test_kwargs, **subtest_kwargs) + dist.barrier() + + +class FSDPTestMultiThread(MultiThreadedTestCase): + @property + def world_size(self): + return torch.cuda.device_count() if torch.cuda.is_available() else 4 + + def setUp(self): + super().setUp() + self._spawn_threads() + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + +class FSDPTest(MultiProcessTestCase): + def setUp(self): + super().setUp() + # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`, + # which can cause unit test flakiness: + # https://github.com/pytorch/pytorch/issues/90848 + os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0" + self._spawn_processes() + + @property + def world_size(self): + return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4 + + @property + def process_group(self): + return dist.distributed_c10d._get_default_group() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + def _check_cpu_offload(self, fsdp_model, cpu_offload): + self.assertEqual(cpu_offload, fsdp_model.cpu_offload) + + def _check_backward_prefetch(self, fsdp_model, backward_prefetch): + self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch) + + def _check_forward_prefetch(self, fsdp_model, forward_prefetch): + self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + @classmethod + def _run(cls, rank, test_name, file_name, pipe): + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + print(f"dist init r={self.rank}, world={self.world_size}") + + # Specify gloo backend to make 'init_process_group()' succeed, + # Actual tests will be skipped if there is no enough GPUs. + backend = "nccl" if torch.cuda.is_available() else "gloo" + + try: + dist.init_process_group( + init_method=self.init_method, + backend=backend, + world_size=int(self.world_size), + rank=self.rank, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + if torch.cuda.is_available() and torch.cuda.device_count(): + torch.cuda.set_device(self.rank % torch.cuda.device_count()) + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + dist.barrier() + + self.run_test(test_name, pipe) + + dist.barrier() + + dist.destroy_process_group() + + def _train_for_several_steps( + self, + model: nn.Module, + num_steps: int, + autocast: bool, + lr: float = 0.01, + fsdp_cpu_offload: Optional[CPUOffload] = None, + save_model: bool = False, + mixed_precision: Optional[MixedPrecision] = None, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + ): + cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params + + model_device = next(model.parameters()).device + if sharded_grad_scaler_kwargs is None: + sharded_grad_scaler_kwargs = {} + sharded_grad_scaler = ShardedGradScaler( + enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs + ) + # use SGD with momentum instead of Adam, since Adam is scale invariant + # and this makes it bad for tests + optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) + for _ in range(num_steps): + optim.zero_grad() + with torch.cuda.amp.autocast(enabled=autocast): + # Inputs always cuda regardless of cpu offloading, or model.device + input = model.module.get_input(torch.device("cuda")) + if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)): + if isinstance(input, torch.Tensor): + input = input.half() + else: + input = tuple(x.half() for x in input) + output = model(*input) + # Post-forward, if CPU offloading model param should be on CPU. + if ( + cpu_offload_params + and isinstance(model, FSDP) + # If not resharding after forward, the parameters are still + # exposed as unsharded views into the GPU flat parameter + and model.sharding_strategy + not in NO_RESHARD_AFTER_FORWARD_STRATEGIES + ): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + + loss = model.module.get_loss(input, output).to(model_device) + loss = sharded_grad_scaler.scale(loss) + + if not mixed_precision and not use_pure_fp16: + assert ( + loss.dtype == torch.float32 + ), "loss data type should be float32, as the original \ + parameter data type is float32." + else: + if use_pure_fp16: + self.assertEqual(loss.dtype, torch.float16) + # FSDP loss is fp16, DDP AMP loss is fp32 + elif isinstance(model, FSDP): + assert mixed_precision is not None # mypy + self.assertEqual(loss.dtype, mixed_precision.param_dtype) + else: + self.assertEqual(loss.dtype, torch.float32) + model.module.run_backward(loss) + # Post-backward, if CPU offloading model params should be on CPU. + if cpu_offload_params and isinstance(model, FSDP): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + # Unscale the gradients and step + sharded_grad_scaler.step(optim) + # Update the scale factor + sharded_grad_scaler.update() + # if save_model, simulate save + load. + if save_model: + state_dict = {k: v.clone() for k, v in model.state_dict().items()} + # Zero params, if save/load state_dict did not work properly, this + # would break the parity test with DDP. + _zero_model(model) + model.load_state_dict(state_dict) + + if isinstance(model, FSDP): + model._assert_state(TrainingState.IDLE) + return loss.detach() # type: ignore[possibly-undefined] + + def _test_fsdp_parity( + self, + model_class: Type[FSDPTestModel], + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + ref_init_fn: Optional[Callable] = None, + num_iters: int = 2, + save_model: bool = True, + cpu_offload: CPUOffload = CPUOffload(), + backward_prefetch: Optional[BackwardPrefetch] = None, + sharding_strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + forward_prefetch: bool = False, + use_orig_params: bool = False, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + init_kwargs: Optional[Dict[str, Any]] = None, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + **fsdp_kwargs, + ): + """ + Tests FSDP training against a reference, which defaults to DDP but + may be customized with ``ref_init_fn``. + + Args: + model_class (Type[FSDPTestModel]): A model class that inherits from + ``FSDPTestModel``, which defines the expected interface. + fsdp_init_mode (FSDPInitMode): The mode to initialize the + FSDP-wrapped model. This should not be ``NO_FSDP``. + ref_init_fn (Optional[Callable]): A callable to invoke that wraps a + non-wrapped model to construct the reference model, where this + wrapper should provide data parallel semantics. If ``None``, + then the callable defaults to the DDP constructor. + """ + assert ( + fsdp_init_mode != FSDPInitMode.NO_FSDP + ), "Expects an FSDP init mode that wraps with FSDP" + if init_kwargs is None: + init_kwargs = {} + lr = 1e-2 + rank = self.process_group.rank() + # Establish reference behavior with DDP + model = model_class.init( + self.process_group, + FSDPInitMode.NO_FSDP, + CUDAInitMode.CUDA_BEFORE, + deterministic=True, + **init_kwargs, + ) + if ref_init_fn is None: + ref_model = DDP(model, device_ids=[rank], output_device=rank) + else: + ref_model = ref_init_fn(model) + if use_pure_fp16: + ref_model = ref_model.half() + ref_loss = self._train_for_several_steps( + ref_model, + num_iters, + autocast=mixed_precision is not None, + lr=lr, + fsdp_cpu_offload=cpu_offload, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + ddp_params = list(ref_model.parameters()) + # Check against FSDP behavior + fsdp_kwargs.update( + { + "cpu_offload": cpu_offload, + "backward_prefetch": backward_prefetch, + "sharding_strategy": sharding_strategy, + "mixed_precision": mixed_precision, + "forward_prefetch": forward_prefetch, + "use_orig_params": use_orig_params, + } + ) + try: + fsdp_model = model_class.init( + self.process_group, + fsdp_init_mode, + cuda_init_mode, + fsdp_kwargs, + deterministic=True, + **init_kwargs, + ) + except Exception as e: + raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e + if not isinstance(fsdp_model, FSDP): + # Enforce that we wrap with top-level FSDP since we are comparing + # assuming a data parallel reference and some test models may not + # do so in their `init()` method + fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs) + if use_pure_fp16: + # Change the model parameter dtype after FSDP initialization + fsdp_model = fsdp_model.half() + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + offload_params = cpu_offload is not None and cpu_offload.offload_params + # Offloading parameters with `CUDA_AFTER` should raise an error during + # lazy initialization due to the parameter devices not being CPU; + # otherwise, all parameter devices should be CPU + expects_device_error = ( + offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER + ) + expects_cpu_device = ( + offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER + ) + if expects_cpu_device: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + context = ( + self.assertRaisesRegex( + RuntimeError, + "An FSDP-managed module with parameter CPU offloading enabled " + "has parameters on cuda", + ) + if expects_device_error + else nullcontext() + ) + with context: + fsdp_loss = self._train_for_several_steps( + fsdp_model, + num_iters, + autocast=False, + lr=lr, + fsdp_cpu_offload=cpu_offload, + save_model=save_model, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + # No need to check for parameter and loss parity if expecting an error + if expects_device_error: + return + # Check parameter devices are CPU if offloading to CPU before calling + # `get_full_params()`, which will cast the parameters to FP32 + if offload_params: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + fsdp_loss = fsdp_loss.cuda() + fsdp_unsharded_params = get_full_params(fsdp_model) + # Do not check dtype since the reference DDP loss may not be the same + # dtype as the FSDP loss in the case of mixed precision + torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False) + # Do not check for parameter parity if using mixed precision since (1) + # the DDP parameters are in FP16 (from `half()`) while the FSDP + # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs + # the optimizer in FP16 while FSDP runs it in FP32 + # TODO: Disable checking the parameters for pure FP16 due to floating + # point inaccuracy. Note that this means that the backward pass is not + # checked: https://github.com/pytorch/pytorch/issues/90784 + if mixed_precision is None and not use_pure_fp16: + self.assertEqual( + ddp_params, + fsdp_unsharded_params, + exact_device=True, + msg="FSDP did not match DDP", + ) + + +def test_compiled_fsdp(compile_compute_on_module: Optional[type] = None): + def fully_shard_with_compiled_compute(*args, **kwargs): + # compile ``module._call_impl`` + # to showcase how to include user-registered hooks + if compile_compute_on_module is None or isinstance( + args[0], compile_compute_on_module + ): + args[0].compile() + return torch.distributed._composable.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator] + + class FullyShardPatch(Enum): + # apply ``partial`` in order to use ``Enum.value`` + EAGER = partial(torch.distributed._composable.fsdp.fully_shard) # type: ignore[var-annotated, arg-type] + COMPILED_COMPUTE = partial(fully_shard_with_compiled_compute) # type: ignore[arg-type] + # add FULL for tracing FSDP + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + original_fully_shard = torch.distributed._composable.fsdp.fully_shard + for fully_shard_patch in FullyShardPatch: + if fully_shard_patch != FullyShardPatch.EAGER and not has_triton(): + warnings.warn("Inductor on GPU needs Triton and recent GPU arch") + continue + imported_fully_shard = ( + f"{func.__module__}.{original_fully_shard.__name__}" + ) + with mock.patch( + imported_fully_shard, + fully_shard_patch.value, + ): + func(*args, **kwargs) + torch.distributed.barrier() + # mock.patch.__exit__ does not work with multi-thread + # thread 1 set {func.__module__}.fully_shard + # thread 2 read {func.__module__}.fully_shard and thought it is original + # hence we manually reset them after __exit__ + import_path, _ = mock._get_target(imported_fully_shard) # type: ignore[attr-defined] + setattr( + import_path(), original_fully_shard.__name__, original_fully_shard + ) + + return wrapper + + return decorator + + +class SkipModule(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.lin(x) + + +class NestedLinear(nn.Module): + def __init__(self, fsdp_wrap): + super().__init__() + if fsdp_wrap: + self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda()) + else: + self.nested_linear = nn.Linear(10, 10, bias=False).cuda() + + def forward(self, x): + return self.nested_linear(x) + + +class SkipModel(nn.Module): + def __init__(self, double_nest): + super().__init__() + self.linear = nn.Linear(10, 10, bias=False).cuda() + self.linear_skip = SkipModule().cuda() + self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest)) + + def forward(self, x): + x = self.linear(x) + x = self.linear_skip(x) + x = self.nested_linear(x) + return x diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..8a676c7e16c780b52f156cece3a7b7347c6f465b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py @@ -0,0 +1,323 @@ +# mypy: ignore-errors + +# Torch +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized + +# Testing utils +from torch.testing._internal.common_dtype import floating_and_complex_types_and +from torch.testing._internal.common_utils import TestCase, \ + freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from itertools import chain +from typing import List, Union +from torch._C import TensorType + +import io + +def check_output_types(self, func, ref_outputs, args, kwargs): + graph = getattr(func, 'last_graph', None) + types = [o.type() for o in graph.outputs()] + self.assertTrue(len(types) == 1) + t = types[0] + torch._C._jit_assert_is_instance(ref_outputs, t) + +# Test names in this set are only checked for a single derivative +nn_functional_single_grad = frozenset('test_nn_' + name for name in [ + 'pdist', + 'multilabel_margin_loss', + 'max_unpool3d', + 'multi_margin_loss', + 'binary_cross_entropy', + 'binary_cross_entropy_size_average', + 'ctc_loss', + 'grid_sample', +]) + +def check_against_reference(self, func, reference_func, output_func, args, kwargs=None, + allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False): + """Verifies a function performs identically to some reference implementation. + + Commonly, this is used to verify that a JIT implementation + (output_func) matches the behavior of the eager implementation + (reference_func). + """ + kwargs = kwargs if kwargs else {} + + def allSum(vs): + if isinstance(vs, torch.Tensor): + vs = (vs,) + return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum() + for i, v in enumerate(vs) + if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16)) + + def clone_tensor(t, preserve_requires_grad): + require_grad = preserve_requires_grad and t.requires_grad + return t.detach().clone().requires_grad_(require_grad) + + def clone_inputs(preserve_requires_grad: bool): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(clone_tensor(arg, preserve_requires_grad)) + elif is_iterable_of_tensors(arg): + inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg]) + else: + inputs.append(arg) + + return inputs + + # Returns tensors in args that requires_grad, including tensors in TensorList args + def get_recording_tensors(args): + recording_tensors: List[torch.Tensor] = [] + + for arg in args: + if isinstance(arg, torch.Tensor) and arg.requires_grad: + recording_tensors.append(arg) + elif is_iterable_of_tensors(arg): + recording_tensors.extend(filter(lambda t: t.requires_grad, arg)) + + return recording_tensors + + # test no gradients case + nograd_inputs = clone_inputs(preserve_requires_grad=False) + outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs) + with enable_profiling_mode_for_profiling_tests(): + outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs) + self.assertEqual(outputs, outputs_test) + + if check_types: + check_output_types(self, func, outputs_test, nograd_inputs, kwargs) + + if no_grad: + # skip grad tests + return + + with enable_profiling_mode_for_profiling_tests(): + # test single grad case + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + grads = torch.autograd.grad(allSum(outputs), recording_tensors, + allow_unused=allow_unused) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + # test the grad grad case + if self._testMethodName in nn_functional_single_grad or no_gradgrad: + return + + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + l1 = allSum(outputs) + grads = torch.autograd.grad(l1, recording_tensors, create_graph=True, + allow_unused=allow_unused) + + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused) + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + l1_test = allSum(outputs_test) + grads_test = torch.autograd.grad( + l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused) + + l2_test = (allSum(grads_test) * l1_test) + grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + for g2, g2_test in zip(grads2, grads2_test): + if g2 is None and g2_test is None: + continue + self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4) + +class JitCommonTestCase(TestCase): + def createFunctionFromGraph(self, trace): + graph = trace if isinstance(trace, torch._C.Graph) else trace.graph() + return torch._C._create_function_from_graph("forward", graph) + + def assertExportImport(self, trace, inputs): + m = self.createFunctionFromGraph(trace) + self.assertExportImportModule(m, inputs) + + def assertExportImportModule(self, m, inputs): + m_import = self.getExportImportCopy(m) + a = self.runAndSaveRNG(m, inputs) + b = self.runAndSaveRNG(m_import, inputs) + self.assertEqual(a, b, "Results of original model and " + "exported/imported version of model differed") + + def runAndSaveRNG(self, func, inputs, kwargs=None): + kwargs = kwargs if kwargs else {} + with freeze_rng_state(): + results = func(*inputs, **kwargs) + return results + + def getExportImportCopy(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + torch.jit.save(m, buffer) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + + if not also_test_file: + return imported + + with TemporaryFileName() as fname: + torch.jit.save(imported, fname) + return torch.jit.load(fname, map_location=map_location) + + def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph, + fusion_nodes_not_found, non_fusible_nodes_being_fused, + fusion_nodes_found, nodes_in_diff_graph): + err_msg = "\nFailure in testing nodes' autodifferentiation. " + if should_autodiff_node: + err_msg += "One or more nodes were expected to be autodiffed, " \ + "but were not found in specified fusible/nonfusible " \ + "DifferentiableGraph groups. \nSpecifically:" + # The node is intended to appear in a differentiable graph but doesn't + diff_nodes_missing = [] + # The node is intended to appear in a differentiable graph + # outside of a fusion group but instead is in a fusion group + diff_nodes_in_fusion = [] + # The node is intended to appear in a fusion group but doesn't + fusion_nodes_missing = [] + # The node is intended to appear in a fusion group but instead + # is just in an outer differentiable graph + fusion_nodes_in_diff = [] + for node in nodes_not_in_diff_graph: + if node in non_fusible_nodes_being_fused: + diff_nodes_in_fusion.append(node) + else: + diff_nodes_missing.append(node) + for node in fusion_nodes_not_found: + if node in nodes_in_diff_graph: + fusion_nodes_in_diff.append(node) + else: + fusion_nodes_missing.append(node) + if len(diff_nodes_missing) > 0: + err_msg += f"\n {diff_nodes_missing} were not in one of the " \ + "DifferentiableGraphs when they were expected to be. " \ + "Did you intend for these nodes to be autodiffed? " \ + "If not, remove them from the list of nonfusible nodes." + if len(diff_nodes_in_fusion) > 0: + err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \ + "when they were expected to be just in a DifferentiableGraph. If it was " \ + "intended for these nodes to be in FusionGroups, reclassify these nodes as " \ + "fusible nodes. If these nodes were not intended to be fused, your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_missing) > 0: + err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be. " \ + "They were also not found in an outer DifferentiableGraph. Did you " \ + "intend for these nodes to be autodifferentiated? If not, you should " \ + "remove these nodes from the test's fusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_in_diff) > 0: + err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be, " \ + "instead they were found just in an outer DifferentiableGraph. " \ + "Did you intend for these nodes to be fused? If not, you should " \ + "move these nodes into the test's nonfusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + else: + err_msg += "One or more nodes were not expected to be autodiffed " \ + "but were found in a DifferentiableGraph or in a FusionGroup " \ + "of a DifferentiableGraph. Did you intend for these nodes to be " \ + "autodiffed? If so, change this test to expect autodifferentiation. " \ + "\nSpecifically:" + if len(fusion_nodes_found) > 0: + err_msg += f"\n {fusion_nodes_found} were not expected to be in " \ + "one of the DifferentiableGraphs, but appeared in a FusionGroup " \ + "of a DifferentiableGraph. " + if len(nodes_in_diff_graph) > 0: + err_msg += f"\n {nodes_in_diff_graph} were not expected to " \ + "be in one of the DifferentiableGraphs but were." + return err_msg + + def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes): + diff_nodes = graph.findAllNodes('prim::DifferentiableGraph') + diff_subgraphs = [node.g('Subgraph') for node in diff_nodes] + + # Note: currently no tests have fusible_nodes + fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs])) + fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes] + + # For any non-fusible node, it must show up in one of the DifferentiableGraphs. + nodes_in_diff_graph = [] + nodes_not_in_diff_graph = [] + non_fusible_nodes_being_fused = [] + for node in nonfusible_nodes: + if any(g.findNode(node) is not None for g in diff_subgraphs): + nodes_in_diff_graph.append(node) + else: + nodes_not_in_diff_graph.append(node) + if any(g.findNode(node) is not None for g in fusion_subgraphs): + non_fusible_nodes_being_fused.append(node) + found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes) + + # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs. + fusion_nodes_found = [] + fusion_nodes_not_found = [] + for node in fusible_nodes: + if any(g.findNode(node) is not None for g in fusion_subgraphs): + fusion_nodes_found.append(node) + else: + fusion_nodes_not_found.append(node) + found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes) + + if should_autodiff_node is not None: + err_msg = self.autoDiffErrorMessage(should_autodiff_node, + nodes_not_in_diff_graph, + fusion_nodes_not_found, + non_fusible_nodes_being_fused, + fusion_nodes_found, + nodes_in_diff_graph) + self.assertEqual(should_autodiff_node, + found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg) + + def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]], + traced_graph, assert_propagation, constant_prop=True): + # repropagte input shapes provided by tracing, + prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled() + for enable_test_mode in [True, False]: + # here we are testing allowing/disallowing substituting in complete shapes as constants, + # disallowing constants helps stress test partial eval and substitution pipeline + torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode) + torch._C._jit_erase_non_input_shape_information(traced_graph) + if constant_prop: + torch._C._jit_pass_constant_propagation(traced_graph) + torch._C._jit_pass_propagate_shapes_on_graph(traced_graph) + # Add sizes to default tensor type to avoid checking something out of scope + # and difficulties with tracer leaving in other parts of tensor type + output = next(traced_graph.outputs()).type() + + def test_type(type, actual_size): + sizes = type.symbolic_sizes() + out_type = TensorType.get().with_sizes(sizes) + actual_type = TensorType.get().with_sizes(actual_size) + + # always check actual shape is a subtype of the output + self.assertTrue(actual_type.isSubtypeOf(out_type)) + + # and then if assertion flag is provided, check shape analysis + # is successful + if assert_propagation: + self.assertEqual(out_type.sizes(), actual_size) + + if output.isSubtypeOf(torch._C.TensorType.get()): + test_type(output, out_sizes) + else: + tuple_elements = output.elements() + for i in range(len(tuple_elements)): + test_type(tuple_elements[i], out_sizes[i]) + + torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py new file mode 100644 index 0000000000000000000000000000000000000000..630f8509918e2f6f4e93c5a33d6c3a2caff9be06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py @@ -0,0 +1,22742 @@ +# mypy: ignore-errors + +from functools import wraps, partial +from itertools import product, chain, islice +import itertools +import functools +import copy +import operator +import random +import unittest +import math +import enum + +import torch +import numpy as np +from torch import inf, nan + +from typing import Any, Dict, List, Tuple, Union, Sequence +from torch.testing import make_tensor +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, + floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, + all_types, empty_types, complex_types_and, integral_types, custom_types +) +from torch.testing._internal.common_device_type import \ + (onlyCPU, onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIf, precisionOverride, + skipCPUIfNoMklSparse, + toleranceOverride, tol) +from torch.testing._internal.common_cuda import ( + PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_FUSED_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, + SM53OrLater, SM80OrLater, SM90OrLater, with_tf32_off, TEST_CUDNN, _get_torch_cuda_version, + _get_torch_rocm_version, +) +from torch.testing._internal.common_utils import ( + make_fullrank_matrices_with_distinct_singular_values, + TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY, + torch_to_numpy_dtype_dict, TEST_WITH_ASAN, + GRADCHECK_NONDET_TOL, freeze_rng_state, slowTest, TEST_WITH_SLOW, + TEST_WITH_TORCHINDUCTOR +) + +import torch._refs as refs # noqa: F401 +import torch._refs.nn.functional +import torch._refs.special +import torch._refs.linalg +import torch._prims as prims # noqa: F401 +from torch.utils import _pytree as pytree + + +from packaging import version + +from torch.testing._internal.opinfo.core import ( # noqa: F401 + L, + M, + S, + XS, + _NOTHING, + _getattr_qual, + DecorateInfo, + SampleInput, + ErrorInput, + AliasInfo, + NumericsFilter, + OpInfo, + _generate_reduction_inputs, + _generate_reduction_kwargs, + sample_inputs_reduction, + ReductionOpInfo, + reference_inputs_elementwise_binary, + make_error_inputs_elementwise_binary, + generate_elementwise_binary_tensors, + generate_elementwise_binary_arbitrarily_strided_tensors, + generate_elementwise_binary_small_value_tensors, + generate_elementwise_binary_large_value_tensors, + generate_elementwise_binary_extremal_value_tensors, + generate_elementwise_binary_broadcasting_tensors, + generate_elementwise_binary_with_scalar_samples, + generate_elementwise_binary_with_scalar_and_type_promotion_samples, + generate_elementwise_binary_noncontiguous_tensors, + sample_inputs_elementwise_binary, + BinaryUfuncInfo, + sample_inputs_elementwise_unary, + generate_elementwise_unary_tensors, + generate_elementwise_unary_small_value_tensors, + generate_elementwise_unary_large_value_tensors, + generate_elementwise_unary_extremal_value_tensors, + reference_inputs_elementwise_unary, + UnaryUfuncInfo, + sample_inputs_spectral_ops, + SpectralFuncType, + SpectralFuncInfo, + ShapeFuncInfo, + sample_inputs_foreach, + ForeachFuncInfo, + gradcheck_wrapper_hermitian_input, + gradcheck_wrapper_triangular_input, + gradcheck_wrapper_triangular_input_real_positive_diagonal, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + clone_sample, +) +from torch.testing._internal.opinfo.refs import ( # NOQA: F401 + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, + ReductionPythonRefInfo, + ElementwiseUnaryPythonRefInfo, + ElementwiseBinaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, + reference_reduction_numpy, + prod_numpy +) +from torch.testing._internal import opinfo +from torch.testing._internal.opinfo.definitions.linalg import ( + sample_inputs_linalg_cholesky, + sample_inputs_linalg_cholesky_inverse, + sample_inputs_cross, + sample_inputs_linalg_qr_geqrf, + sample_inputs_linalg_invertible, + sample_inputs_lu_solve, + sample_inputs_legacy_solve, + sample_inputs_svd, + sample_inputs_linalg_det_logdet_slogdet, + sample_inputs_linalg_lu, + sample_inputs_diagonal_diag_embed, + error_inputs_diagonal_diag_embed, +) +from torch.testing._internal.opinfo.definitions.special import ( + sample_inputs_i0_i1, + sample_inputs_polygamma, + reference_polygamma, +) +from torch.testing._internal.opinfo.definitions._masked import ( + sample_inputs_softmax_variant, +) +from torch.testing._internal.opinfo.definitions.sparse import ( + error_inputs_sparse_like_fns, + sample_inputs_sparse_like_fns, + error_inputs_sparse_mul, + sample_inputs_sparse_mul, + error_inputs_sparse_reduction_sum, + sample_inputs_sparse_reduction_sum +) + +if TEST_SCIPY: + from scipy import stats + import scipy.spatial + import scipy.special + + +# test if a tensor is close to an integer +def close_to_int(x, eps=0.1): + if x.is_complex(): + y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) + else: + y = torch.abs(torch.frac(x)) + return (y < eps) | (y > (1 - eps)) + + +def sample_inputs_slice(op_info, device, dtype, requires_grad, **kwargs): + + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + yield SampleInput(make_input(3), 0) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2, step=3) + + yield SampleInput(make_input(20, 30, 40), dim=0, start=-10, end=-2, step=2) + + +def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + args_cases = ( + # Cases with tensor indices. + (torch.tensor([1, 2, 3]),), + (torch.tensor(1),), + (torch.tensor([1, 2, 3]), 1), + (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), + # Cases with list of indices. + ((2, 4),), + ((2, 4), 1), + ((2, 4), -1), + # Cases with integer section. + (3,), + (3, 1), + (3, -1), + ) + + for args in args_cases: + yield SampleInput(make_input((S, S, S)), args=args) + + +def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6, S), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + yield SampleInput(make_arg(S, S, 6), 2) + +def error_inputs_hsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " + "but got a tensor with 0 dimensions!") + yield ErrorInput(SampleInput(make_arg(()), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg((S, S, S)), 0), error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput( + SampleInput(make_arg((S, S, S)), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_vsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), + error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput(SampleInput(make_arg(S, S, S), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_dsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), error_regex=err_msg2) + + +def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = ( + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), + ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), + ) + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + kwargs = dict(storage_offset=storage_offset) + yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) + +def sample_inputs_as_strided_partial_views(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(): + base = make_tensor((20,), device=device, dtype=dtype) + return base[5:15].requires_grad_(requires_grad) + + # as_strided on offset, partial views + yield SampleInput(make_arg(), (2, 2), (1, 2)) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=0) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=10) + +def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = [ + ((1,), (), (), 0), + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((3, 3), (2, 2), (2, 1), 0), + # Scatter to larger dimensions + ((16,), (2, 2, 2, 2), (8, 4, 2, 1), 0), + # Scatter to larger dimensions with strides inverted + ((16,), (2, 1, 1, 2), (1, 2, 4, 8), 0), + ] + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + input_src = make_arg(output_shape) + yield SampleInput(input_t, input_src, output_shape, stride, storage_offset=storage_offset) + + +def error_inputs_as_strided_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # Create a small tensor and try to scatter it out of bounds + input_t = make_arg([4, 4]) + input_src = make_arg([2, 2]) + yield ErrorInput( + SampleInput(input_t, input_src, [2, 2], [200, 200], storage_offset=0), + error_regex="itemsize 4 requiring a storage size of 1604 are out of bounds for storage of size 64" + ) + + +def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): + inputs = ( + (0,), + (0, 1), + (0, 1, 2, 3), + ) + + rvals = [1, 2, 4] + + products = product(inputs, rvals, [False, True]) + + for input_data, r, with_replacement in products: + input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(input_t, r=r, with_replacement=with_replacement) + +def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # constructs 1-D tensors with varying number of elements + a = make_arg((0,)) + b = make_arg((0, 1)) + c = make_arg((0, 1, 2, 3)) + + # sample with only 1 tensor + yield SampleInput(a) + + # sample with 2 tensors + yield SampleInput(a, b) + + # sample with 3 tensors + yield SampleInput(a, b, c) + +def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input_shape, dict of dim and eps + cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S), {'dim': 1}), + ((S, 2), {'dim': -1}), + ((S,), {'dim': 0, 'eps': 0.5}), + ((), {'dim': 0}), + ((S, S, M), {'dim': 2}), + ((S, S), {}) + ) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) + # Test for Broadcasting + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) + yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + + +def sample_inputs_item(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + cases = ( + (), + (()), + (1), + ((1,)), + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def error_inputs_item(op, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + + cases = ( + (M), + ((S,)), + (S, S), + (S, M, L), + ) + + for shape in cases: + yield ErrorInput( + SampleInput(make_arg(shape)), error_type=RuntimeError, + error_regex="elements cannot be converted to Scalar") + + +def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for training, momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), + ((3, 2, 4), {'training': False, 'momentum': -1.2}), + ((3, 1), {'training': True, 'momentum': 0.0}), + ((0,), {'training': True}), + ((0,), {'training': False}), + ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), + ((2, 1), {}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight = make_arg(channels) if channels > 0 else None + bias = make_arg(channels) if channels > 0 else None + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + weight, + bias + ), + kwargs=kwargs + ) + + # Checking for permutations of weights and biases as `None` + weights = [channels, None, None] + biases = [None, channels, None] + is_training = [True, False, False] + + for weight, bias, training in zip(weights, biases, is_training): + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + make_arg(channels), + make_arg(channels) + ), + kwargs={'training': training} + ) + + # Test case for no optional kwargs + # running_mean and running_var are required in evaluation mode (training: False) but not in training mode + yield SampleInput(make_arg((1, 2, 3)), args=(None, None, None, None), kwargs={'training': True}) + +def sample_inputs_softmax_backward_data(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), 0), + ((S, S), 0), + ((S, M, S), -1), + ] + input_dtypes = [dtype] + if dtype == torch.float and device == 'cuda': + input_dtypes += [torch.float16] + + for (shape, dim), input_dtype in product(cases, input_dtypes): + input = make_arg(shape) + output = torch.nn.functional.softmax(input, dim=dim, dtype=input_dtype) + yield SampleInput(make_arg(shape), output, dim, input_dtype) + +def sample_inputs_native_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + + +def sample_inputs__native_batch_norm_legit(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + if args[0] is not None and args[1] is not None: + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + else: + yield SampleInput(sample.input, args=(args[2], args[3], training, momentum, eps)) + + +def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_prelu(op_info, device, dtype, requires_grad, **kwargs): + op_kwargs = op_info.sample_kwargs(device, dtype, None)[0] + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad, + op_kwargs=op_kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + for weight in [-1., 0., 0.8, 1.]: + weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(shape), args=(weight_tensor,)) + + channel_size = shape[1] if len(shape) >= 2 else 1 + yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) + + weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) + +def reference_inputs_prelu(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_prelu(op, device, dtype, requires_grad, **kwargs) + yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs) + +def sample_kwargs_prelu_scalar_weight(device, dtype, input): + weight = torch.rand(tuple(), device=device, dtype=dtype) + # NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case + if dtype == torch.bfloat16: + weight_cpu = weight.to(dtype=torch.float32, device="cpu") + else: + weight_cpu = weight.cpu() + np_weight = weight_cpu.numpy() + return ({'weight': weight}, {'weight': np_weight}) + +def error_inputs_prelu(op, device): + # Weight has numel != 1, but self.ndim is zero-dim tensor + inp = make_tensor(tuple(), device=device, dtype=torch.float32) + weight = make_tensor((2,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Not allow zero-dim input tensor.") + + # Weight has numel != 1, but numel does not match channel size + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((9,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Mismatch of parameter numbers and input channel size.") + + # Weight is neither a scalar nor 1-D tensor + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((2, 4), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="prelu: Expected `weight` to be a scalar or 1D tensor, but got: ndim = 2") + + # src and index tensors must have the same # of dimensions +def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # ord = inf is tested in inputs_norm_inf as it fails on some tests + cases = [ + ((S, S), (2,), '2'), + ((S, S), (0,), '0'), + ((S, S), (0.5,), '0_5'), + ((S, S), (1,), '1'), + ((S, S), (3,), '3'), + ((S, S), (-1,), 'neg_1'), + ((S, S), (-2,), 'neg_2'), + ((S, S), (-0.5,), 'neg_0_5'), + ((S, S), (-1.5,), 'neg_1_5'), + ] + + cases_nonzero_input = ( + ((S, S, S), (1.5,), '1_5_default'), + ((S, S, S), (1.5, 1), '1_5_dim'), + ((S, S, S), (1.5, -1), '1_5_neg_dim'), + ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), + ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), + ) + + cases_posdim = ( + ((S, S), (-2, 1,), 'neg_2_dim'), + ((S, S), (-1, 1,), 'neg_1_dim'), + ((S, S), (0, 1,), '0_dim'), + ((S, S), (1, 1,), '1_dim'), + ((S, S), (2, 1,), '2_dim'), + ((S, S), (3, 1,), '3_dim'), + ((S, S, S), (2, 1), '2_dim'), + ((S, S, S), (3, 1), '3_dim'), + ((S, S, S), (2, 1, True), 'keepdim_2_dim'), + ((S, S, S), (3, 1, True), 'keepdim_3_dim'), + ((), (2, 0), '2_dim_scalar'), + ((), (3, 0), '3_dim_scalar'), + ((), (2, 0, True), 'keepdim_2_dim_scalar'), + ((), (3, 0, True), 'keepdim_3_dim_scalar'), + ) + + cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) + for shape, args, name in cases_posdim) + + for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): + yield SampleInput(make_arg(shape), args=args, name=name) + + for shape, args, name in cases_nonzero_input: + yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) + + +def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (), 'default'), + ((S, S), ('fro',), 'fro_default'), + ((S, S), ('fro', [0, 1],), 'fro'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), ('nuc',), 'nuc'), + ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (-inf,), '-inf'), + ((S, S), (inf,), 'inf'), + ((S, S), (inf, 1,), 'inf_2_dim'), + ((S, S), (inf, -1,), 'inf_2_neg_dim'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_equal(op, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((), (S,)), + ((S, 1), (S,)), + ((M, S), ()), + ((S, S), (S, S)) + ) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + rhs = make_arg(shape_rhs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=broadcasts_input) + if shape_lhs == shape_rhs: + yield SampleInput(lhs, args=(lhs.clone().detach_(),)) + + + +def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + num_inputs = kwargs.get('num_inputs') + sample_kwargs = kwargs.get('sample_kwargs', {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + + args = [] + for i in range(num_inputs - 1): + args.append(make_arg(shape_rhs)) + broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) + + yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) + +def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((S, 1), S), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + for shape in shapes: + inp, *arg0 = shape + yield SampleInput(inp, args=tuple(arg0)) + +def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds alpha kwarg cases + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) + neg_alpha = -3.125 if (dtype.is_floating_point or dtype.is_complex) else -3 + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) + +def error_inputs_arange(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, 0)), error_type=RuntimeError, error_regex='step must be nonzer') + yield ErrorInput(SampleInput(0, args=(-3, 2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(3, -2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(float('inf'), 2)), error_type=RuntimeError, error_regex='unsupported range') + yield ErrorInput(SampleInput(float('-inf'), args=(1, 2)), error_type=RuntimeError, error_regex='unsupported range') + +def sample_inputs_arange(op, device, dtype, requires_grad, **kwargs): + int_samples = ( + # positive direction + (-1, 2, 2), + # negative direction + (2, -3, -1), + # start == end + (1, 1, 1), + (1, 1, -1), + # divides evenly + (0, -8, -4), + (1, 5, 2), + # bool + (False, True, True), + # default step + (0, 1, None), + # default start + (None, 3, None), + ) + + def to_float(start, end, step): + start = start + 0.1 if start is not None else None + end = end + 0.1 + step = float(step) if step is not None else None + return start, end, step + + float_samples = ( + # includes endpoint + (0., -8. - 1e-6, -4.), + (1., 5. + 1e-6, 2.), + (0., -8., -4.), + (1., 5., 2.), + *(to_float(start, end, step) for (start, end, step) in int_samples), + ) + + large_samples = ( + (0, 10000, None), + ) + + samples = int_samples + float_samples + if dtype not in (torch.int8, torch.uint8): + samples += large_samples + + for start, end, step in samples: + if start is None: + assert step is None + # Pass end as positional arg + yield SampleInput(end, kwargs={"dtype": dtype, "device": device}) + # (Similar to) calling torch.arange(end=3) + yield SampleInput(0, kwargs={"end": end, "dtype": dtype, "device": device}) + elif step is None: + yield SampleInput(start, args=(end,), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, step), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(2) + yield SampleInput(1, args=(3, 1)) + +def sample_inputs_randn(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + shapes = ( + (M,), + (S, S) + ) + + for shape in shapes: + yield SampleInput(input=shape, kwargs=dict(dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((S, S), 0, 5), + ((S, S, S), -2, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + +def error_inputs_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = -1 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"normal expects std >= 0.0, but found std {invalid_std}", + ) + +def sample_inputs_cauchy(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.5), + ((S, S), 0, 1), + ((S, S, S), -2, 1), + ) + for shape, median, gamma in samples: + yield SampleInput(make_arg(shape), args=(median, gamma)) + + +def error_inputs_cauchy(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_scale = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_scale,)), + error_type=RuntimeError, + error_regex=fr"cauchy_ expects sigma > 0.0, but found sigma={invalid_scale}", + ) + + +def sample_inputs_exponential(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.5), + ((S, S), 1), + ((S, S, S), 1.5), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_exponential(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_rate = 0 + yield ErrorInput( + SampleInput(t, args=(invalid_rate,)), + error_type=RuntimeError, + error_regex=fr"exponential_ expects lambda > 0.0, but found lambda={invalid_rate}", + ) + + +def sample_inputs_geometric(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.2), + ((S, S), 0.5), + ((S, S, S), 0.8), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_geometric(op, device, **kwargs): + t = torch.zeros([10], device=device) + neg_prob = -1 + yield ErrorInput( + SampleInput(t, args=(neg_prob,)), + error_type=RuntimeError, + error_regex=fr"geometric_ expects p to be in \(0, 1\), but got p={neg_prob}", + ) + + +def sample_inputs_log_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.25), + ((S, S), 0.5, 1), + ((S, S, S), 0, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + + +def error_inputs_log_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"log_normal_ expects std > 0.0, but found std={invalid_std}", + ) + + +def sample_inputs_uniform(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), -100, 100), + ((S, S), 0, 1), + ((S, S, S), 1, 2), + ) + for shape, hi, lo in samples: + yield SampleInput(make_arg(shape), args=(hi, lo)) + +def sample_inputs_ones_zeros(op, device, dtype, requires_grad, **kwargs): + # this is a bit messy, as we want the args to be tuples + # so if we pass size as a tuple, we have a tuple containing a tuple + sizes = ( + (M,), + (S, S), + ) + for size in sizes: + yield SampleInput(size, kwargs={'dtype': dtype, 'device': device}) + +def sample_inputs_full(op, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + sizes = ( + (M,), + (S, S), + ) + fill_values = [get_val(dtype), get_val(torch.int)] + + for size, fill_value in product(sizes, fill_values): + yield SampleInput(size, fill_value, dtype=dtype, device=device) + + +def error_inputs_uniform(op, device, **kwargs): + t = torch.zeros([10], device=device) + yield ErrorInput( + SampleInput(t, args=(3, -1)), + error_type=RuntimeError, + error_regex=r"uniform_ expects to return a \[from, to\) range, but found from=3 > to=-1", + ) + + +def error_inputs_linspace(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, -1)), error_type=RuntimeError, error_regex='number of steps must be non-negative') + yield ErrorInput( + SampleInput(0, args=(3, 1.)), + error_type=TypeError, + error_regex="received an invalid combination of arguments - got \\(int, int, float", + ) + yield ErrorInput( + SampleInput(torch.tensor([1, 1], device=device), args=(torch.tensor([3, 3], device=device), 1)), + error_type=RuntimeError, + error_regex="only supports 0-dimensional start and end tensors" + ) + + +def sample_inputs_linspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps)) + [(0, 7, 50)] + for start, end, nstep in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_linspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device, requires_grad=False) + + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps, is_start_end_tensors)) + [(0, 7, 50, (True, True))] + for start, end, nstep, (is_start_tensor, is_end_tensor) in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + + tensor_options = {"dtype": dtype, "device": device} + if is_start_tensor: + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if is_end_tensor: + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_logspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + for start, end, nstep, base in product(starts, ends, nsteps, bases): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_logspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device) + for start, end, nstep, base, (is_start_tensor, is_end_tensor) in product(starts, ends, nsteps, bases, is_start_end_tensors): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + + tensor_options = {"dtype": dtype, "device": device} + + if (is_start_tensor): + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if (is_end_tensor): + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Creates additional inputs to test the rtol, atol, and equal_nan params + rtols = [0., 1e-7] + atols = [0., 1e-7] + equal_nans = [False, True] + + products = product(rtols, atols, equal_nans) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for rtol, atol, equal_nan in products: + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs, args=(rhs,), + kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) + + +def error_inputs_isclose(op, device, **kwargs): + make_float_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'rtol': -0.4}), + error_type=RuntimeError, + error_regex='rtol must be greater than or equal to zero') + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'atol': -0.4}), + error_type=RuntimeError, + error_regex='atol must be greater than or equal to zero') + + +def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((1, 2))) + yield SampleInput(make_arg((2,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + first_shape, second_shape = (S, M), (M, S) + + yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) + + if dtype.is_complex: + yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) + + # Matmul of empty matrices + yield SampleInput(make_arg((0, S)), args=(make_arg(S, M),)) + yield SampleInput(make_arg((S, 0)), args=(make_arg(0, M),)) + + +def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) + beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) + tests_list = [ + ((2, 3), (2, 2), (2, 3), False), + ((3, 3), (3, 3), (3, 3), False), + ] + tests_with_lhs_broadcasting = [ + ((1,), (2, 2), (2, 3), True), + ((), (2, 2), (2, 3), True), + ] + test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] + + kwargs = dict(alpha=alpha_val, beta=beta_val) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape_a, shape_b, shape_c, broadcasts_input in test_cases: + yield SampleInput( + make_arg(shape_a), + make_arg(shape_b), + make_arg(shape_c), + **kwargs, + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shape = (3, 3) + yield SampleInput( + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + make_arg(shape), + **kwargs, + ) + yield SampleInput( + make_arg(shape), + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + **kwargs, + ) + # addmm of empty matrices + if dtype.is_floating_point: + yield SampleInput(make_arg(S, M), make_arg(S, 0), make_arg(0, M), **kwargs) + # empty matmul with broadcastable input + yield SampleInput(make_arg(M), make_arg(S, 0), make_arg(0, M), **kwargs).with_metadata(broadcasts_input=True) + +def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha = 2 + 3j if dtype.is_complex else 0.6 + beta = 1 + 2j if dtype.is_complex else 0.2 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C + for m, n, k in itertools.product([0, 5], repeat=3): + yield SampleInput( + torch.eye(m, n, device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + make_arg((k, n)), + alpha=alpha, + beta=beta, + ) + +def sample_inputs_sparse_mm_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + reductions = ["sum", "mean", "amax", "amin"] + for m, k, reduce in product([5, 7], [3, 11], reductions): + yield SampleInput( + torch.eye(m, m) + .to(device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + reduce, + ) + + +def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, M), make_arg(M)) + +def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(M, S, M), make_arg(M, M, S)) + +def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + yield SampleInput(make_arg((S, )), make_arg((S, ))) + if dtype.is_complex: + # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) + # is tested in test_conj_view (which tests operations with only conjugated input tensor + # -- not conjugated arg tensors) + yield SampleInput(make_arg((S, )), make_arg_conj((S, ))) + + +def error_inputs_dot_vdot(op_info, device, is_ref=False, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + if not is_ref: + yield ErrorInput(SampleInput(make_input(1), args=(make_input(3, dtype=torch.float16),)), + error_regex='dot : expected both vectors to have same dtype') + yield ErrorInput(SampleInput(make_input(1, 1), args=(make_input(3),)), + error_regex='1D tensors expected') + yield ErrorInput(SampleInput(make_input(9), args=(make_input(3),)), + error_regex='inconsistent tensor size') + if device != "cpu" and not is_ref: + yield ErrorInput(SampleInput(make_input(3), args=(make_input(3, device="cpu"),)), + error_regex='Expected all tensors to be on the same device') + + +def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = (((S,), (S, M), (M,), 1, 1, False), + ((S,), (S, M), (M,), 0.2, 0.6, False), + ) + + test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), + ((1,), (S, M), (M,), 0.2, 0.6, True), + ((), (S, M), (M,), 1, 1, True), + ((), (S, M), (M,), 0.2, 0.6, True), + ) + + cases = test_cases + test_cases_with_broadcast + + # addmv performs: beta * M + alpha * (mat @ vec) + for size, mat, vec, beta, alpha, broadcasts_input in cases: + yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) + +def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting + test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + + for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: + if dtype.is_complex: + beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) + +def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + test_cases = [(((S, S), (S, S), (S, S)), False), + (((S, S), (S, 1), (1, S)), False), + (((1,), (S, S, 1), (1, S)), True), + (((), (), ()), False), + (((S, S), (), ()), True), + (((), (S, S, 1), (1, S)), True) + ] + + for input_args, broadcasts_input in test_cases: + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput(*args).with_metadata(broadcasts_input=broadcasts_input) + + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput( + *args, value=3.14 if dtype.is_floating_point or dtype.is_complex else 3 + ).with_metadata(broadcasts_input=broadcasts_input) + +def reference_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_addcmul_addcdiv( + op_info, device, dtype, requires_grad, **kwargs) + + # type promotion cases + supported_dtypes = op_info.supported_dtypes(device) + make_arg = partial(make_tensor, device=device, requires_grad=requires_grad) + + types = ( + (torch.float64, torch.complex128), + (torch.bfloat16, torch.float32), + ) + + values = ( + None, + True, False, + 3.14, 3, + 1.0, 1, + 0.0, 0, + -3.14, -3, + 3.14 + 2.71j, + ) + + for (type2, type3), value in product(types, values): + if (type2 not in supported_dtypes or + type3 not in supported_dtypes): + continue + + # RuntimeError: value cannot be converted without overflow + if (type(value) is complex and + type2 is not torch.complex128): + continue + + arg1 = make_arg([5, 5], dtype=dtype) + arg2 = make_arg([5, 5], dtype=type2) + arg3 = make_arg([1, 5], dtype=type3) + + # TypeError: addcdiv(): argument 'value' must be Number, not NoneType + if value is not None: + yield SampleInput(arg1, args=(arg2, arg3), kwargs=dict(value=value)) + else: + yield SampleInput(arg1, args=(arg2, arg3)) + +def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): + test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta, + alpha=alpha + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shapes = [(S, S, S), (S, M, S), (S, S, M)] + args = tuple(make_arg(s) for s in shapes) + yield SampleInput( + args[0].transpose_(-1, 1), + args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), + args[2].transpose(-1, 1).conj().requires_grad_(requires_grad), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ) + +# TODO: add reduction kwargs +def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (S,), + (S, S), + ) + + for shape in shapes: + # Produce one with weight and one without. + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), + kwargs={'weight': _make_tensor(shape, requires_grad=False)}) + +def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None + ) + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M)) + + yield SampleInput(make_arg(), make_arg(S), make_arg(M)).with_metadata(broadcasts_input=True) + + if dtype.is_complex: + alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j + elif dtype.is_floating_point: + alpha, beta = 0.2, 0.6 + else: + alpha, beta = 2, 3 + + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M), beta=beta, alpha=alpha) + + yield SampleInput( + make_arg(), + make_arg(S), + make_arg(M), + beta=beta, + alpha=alpha, + ).with_metadata(broadcasts_input=True) + + # These samples fail gradcheck + if dtype.is_floating_point and not requires_grad: + tensor_options = dict(device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput( + torch.tensor([[math.nan]], **tensor_options), + torch.tensor([0.0], **tensor_options), + torch.tensor([0.0], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + + yield SampleInput( + torch.tensor([[0.0]], **tensor_options), + torch.tensor([math.nan], **tensor_options), + torch.tensor([math.nan], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + +def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ((), (S, S, S), (S,)) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1), {}), + ((S,), make_target([], low=0, high=S), {"p": 1}), + ((S,), make_target([1], low=0, high=S), {"p": 2}), + ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), + ((S, M), make_target([S], low=0, high=M), {"margin": -3.14}), + ((M, S), make_target([M], low=0, high=S), {"weight": None}), + ((M, S), make_target([M], low=0, high=S), {"weight": make_weight([S], low=-10., high=10.)}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "none"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "mean"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "sum"}), + ) + + for input_shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1)), + ((S,), make_target([], low=0, high=S)), + ((S,), make_target([1], low=0, high=S)), + ((M, S), make_target([M], low=0, high=S)), + ) + ps = (1, 2) + margins = (0, 7, -3.14) + weights = (False, True) + reductions = (None, "none", "mean", "sum") + + for (input_shape, target), p, margin, weight, reduction in product(inputs, ps, margins, weights, reductions): + input = _make_tensor(input_shape) + weight_shape = [input.size(-1)] if input.ndim > 0 else [1] + weight = make_weight(weight_shape, low=-10., high=10.) if weight else None + kwargs = {"p": p, "margin": margin, "weight": weight} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(input, args=(target,), kwargs=kwargs) + + +def error_inputs_multi_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, error_regex=r'inconsistent target size, expected 5 but got \[5, 4\]') + # invalid target dtype + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, error_regex='expected scalar type Long but found Float') + # invalid weight + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(())}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5, 4)}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5,)}), + error_type=RuntimeError, error_regex=r'inconsistent weight size, expected 4 but got \[5\]') + # invalid p + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'p': 3}), + error_type=ValueError, error_regex='only p == 1 and p == 2 supported') + + +def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((), (0,), True), + ((S, S), (1,), True), + ((S, S), (1,), False), + ((S, S), (-2,), False), + ((S, S), (0, 1), False), + ) + # Test large inputs to check numerical stability + lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64) else (None,) + for low in lows: + high = low * 2 if low is not None else None + for shape, dim, keepdim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=low, high=high, + requires_grad=requires_grad) + yield SampleInput(t, dim, keepdim) + +def reference_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs) + + # https://github.com/pytorch/pytorch/issues/91843 + t = torch.tensor([20, 30, 100], dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + t = torch.tensor((), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + # tests masking + # https://github.com/pytorch/pytorch/pull/91860#pullrequestreview-1241344073 + t = torch.tensor(float("inf")) + yield SampleInput(t, 0, True) + +def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + inputs = [ + ((), {}), + ((S, S), {}), + ((0, S, 0), {}), + ((S,), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), {'dtype': torch.double}), + ((S,), {'device': 'cpu'}), + ((S,), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), {'device': 'cuda'})) + + for shape, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, **kwargs) + +def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) + + # shape + cases = ( + (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in cases: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + +def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + + inputs = ( + ([], make_target([], low=0, high=1), {}), + ([S], make_target([S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "none"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "mean"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "sum"}), + ) + + for shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_target_tensor = partial(torch.tensor, device=device, dtype=torch.long, requires_grad=False) + + inputs = ( + # random tests including -1 target labels + ([], make_target([], low=-1, high=1)), + ([S], make_target([S], low=-1, high=S)), + ([M, S], make_target([M, S], low=-1, high=S)), + # repeated target labels and -1 (labels after the first -1 are ignored) + ([], make_target_tensor(-1)), + ([7], make_target_tensor([2, 0, 6, -1, 4, -1, 6])), + ([4, 5], make_target_tensor([[4, -1, 0, -1, 2], [0, 0, 4, 1, 4], [-1, 3, -1, 1, 0], [4, 3, 2, 1, 0]])), + ) + reductions = (None, "none", "mean", "sum") + + for (shape, target), reduction in product(inputs, reductions): + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def error_inputs_multilabel_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(0,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(4,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[4\] for input of size: \[5, 4\]') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input((),),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[\] for input of size: \[5, 4\]') + + +def get_independent_tensor(tensor): + return tensor.clone().requires_grad_(tensor.requires_grad) + +def sample_inputs_randint(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + sample.kwargs.setdefault('device', device) + # With high + yield SampleInput(high, sample.input.shape, *sample.args, **sample.kwargs) + # With low and high + yield SampleInput(low, high, sample.input.shape, *sample.args, **sample.kwargs) + +def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + # With high + yield SampleInput( + sample.input, + high, + *sample.args, + **sample.kwargs) + # With low and high + yield SampleInput( + get_independent_tensor(sample.input), + low, + high, + *sample.args, + **sample.kwargs) + +def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (), + (S,), + (S, S), + (S, S, S), + ) + + margins = (0., 1.) + reductions = ('sum', 'mean', 'none') + + for shape in shapes: + for margin, reduction in product(margins, reductions): + kwargs = {'margin': margin, 'reduction': reduction} + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=False), + _make_tensor(shape, requires_grad=False)), + kwargs=kwargs) + +def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp1 = make_input((10, )) + inp1[2] = float('nan') + inp2 = make_input((10, )) + inp2[4] = float('nan') + target = make_input((10, )) + inp2[9] = float('nan') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Inf handling + inp1 = make_input((10, )) + inp2[1] = float('inf') + inp2 = make_input((10, )) + inp2[4] = float('inf') + target = make_input((10, )) + inp2[7] = float('inf') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Broadcasting + inp1 = make_input((5, 2)) + inp2 = make_input((5, 1)) + target = make_input((1, 2)) + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + +def error_inputs_margin_ranking_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value. + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + # invalid input shapes + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), + error_regex='margin_ranking_loss : All input tensors should') + +def sample_inputs_new_fns(self, device, dtype, requires_grad, *, is_strided=False, **kwargs): + # input_shape, output_shape, strides, kwargs + # lengths of output_shape and strides must be equal + inputs = [ + ((), (), (), {}), + ((S, S), (2, 0), (3, 4), {}), + ((0, S, 0), (3, 2, 2), (1, 2, 3), {}), + ((S,), (2, 3), (7, 8), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), (10,), (S,), {'dtype': torch.double}), + ((S,), (1, 1, 12), (S, L, M), {'device': 'cpu'}), + ((S,), (2, 2, 2), (L, M, S), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), (7, 2), (3, 4), {'device': 'cuda'})) + + for input_shape, output_shape, strides, kwargs in inputs: + t = make_tensor(input_shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + if is_strided: + yield SampleInput(t, output_shape, strides, **kwargs) + else: + yield SampleInput(t, output_shape, **kwargs) + +def sample_inputs_empty_strided(op, device, dtype, requires_grad=False, **kwargs): + + inputs = [ + ((), (), {'dtype': dtype, 'device': device}), + ((S,), (4,), {'dtype': dtype, 'device': device}), + ((S, S), (2, 1), {'dtype': dtype, 'device': device}), + ((S, S, S), (2, 0, 1), {'dtype': dtype, 'device': device}), + ] + + for shape, strides, kwargs in inputs: + yield SampleInput(shape, strides, requires_grad=requires_grad, **kwargs) + +def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + yield SampleInput(case, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_empty_permuted(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + for layout in itertools.permutations(range(len(case))): + yield SampleInput(case, layout, device=device, dtype=dtype, requires_grad=requires_grad) + +def error_inputs_empty_permuted(op_info, device, **kwargs): + yield ErrorInput( + SampleInput((2,), args=((0, 1),)), + error_type=RuntimeError, + error_regex="Number of dimensions in size does not match the length of the physical_layout" + ) + yield ErrorInput( + SampleInput((2,), args=((3,),)), + error_type=RuntimeError, + error_regex="Dimension out of range" + ) + yield ErrorInput( + SampleInput((2, 3), args=((0, 0),)), + error_type=RuntimeError, + error_regex="Duplicate dim not allowed" + ) + +def sample_inputs_scalar_tensor(op, device, dtype, requires_grad, **kwargs): + # Not including a scalar tensor in vals because meta tests start failing due to + # lack of meta support for _local_scalar_dense + # torch.tensor(2, device=device) + vals = (-5, 0, 1) + + for item in vals: + yield SampleInput(item, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_eye(op, device, dtype, requires_grad, **kwargs): + # only ints >= 0 are allowed for both arguments, unless m is omitted + sizes = (None, 0, 1, 2, 3, 4, 7, L, M, S) + + for n, m in product(sizes, sizes): + if n is None: + continue + + # TODO: no layout + _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} + if m is None: + yield SampleInput(n, args=(), kwargs=_kwargs) + else: + yield SampleInput(n, args=(m,), kwargs=_kwargs) + +def error_inputs_eye(op_info, device, **kwargs): + # TODO: no layout + _kwargs = {'device': device, 'dtype': torch.float32} + + yield ErrorInput( + SampleInput(-1, args=(), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -1" + ) + + yield ErrorInput( + SampleInput(-7, args=(42,), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -7" + ) + + yield ErrorInput( + SampleInput(0, args=(-3,), kwargs=_kwargs), + error_regex="m must be greater or equal to 0, got -3" + ) + + +def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): + # The scalar we are passing to new_full must be the same dtype + # as the one of the resulting tensor + use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype + yield SampleInput( + sample.input, *sample.args, get_val(use_dtype), **sample.kwargs) + +def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + inputs = [ + ((), get_val(dtype), {}), + ((S, S), get_val(dtype), {}), + ((0, S, 0), get_val(dtype), {}), + ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), get_val(torch.double), {'dtype': torch.double}), + ((S,), get_val(dtype), {'device': 'cpu'}), + ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) + + for shape, fill_value, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, fill_value, **kwargs) + +def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): + cases = [ + ([3], 3, {}), + ([10], 3, {}), + ([3, 10], 3, {}), + ([3], 3, dict(replacement=False)), + ([3], 3, dict(replacement=True)), + ([3, 4], 4, dict(replacement=True)), + ([3, 4], 4, dict(replacement=False)), + ] + + for shape, num_samples, kwargs in cases: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + yield SampleInput(t, num_samples, **kwargs) + +def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): + def get_value_or_make_tensor(value_or_shape): + if isinstance(value_or_shape, list): + return make_tensor(value_or_shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + return value_or_shape + + for value_or_mean_shape, value_or_std_shape, kwargs in cases: + mean = get_value_or_make_tensor(value_or_mean_shape) + std = get_value_or_make_tensor(value_or_std_shape) + yield SampleInput(mean, std, **kwargs) + +def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): + # value_or_size, value_or_size, kwargs + cases = [ + ([], [], {}), + ([3], [3], {}), + ([3, 4, 2], [3, 4, 2], {}), + ([2, 3], 1.1, {}), + ([1, 2, 3], [5, 2, 3], {}), # broadcasting + ] + + return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) + +def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): + yield SampleInput(1.6, 0.3, [2, 3], dtype=dtype, device=device) + yield SampleInput(1.6, 0.3, [2, 2, 2], dtype=dtype, layout=torch.strided, device=device) + yield SampleInput(2.7, make_tensor([4, 3], dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad)) + +def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): + shapes = [ + [3], + [], + [0, 3], + [2, 3, 4], + ] + + for shape in shapes: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=1, + requires_grad=requires_grad) + yield SampleInput(t) + +def error_inputs_bernoulli(op_info, device, **kwargs): + # more than one element of the written-to tensor refers to a single memory location + x = torch.rand((1,), device=device).expand((6,)) + err_msg = 'unsupported operation' + yield ErrorInput(SampleInput(torch.rand_like(x), kwargs={'out': x}), + error_regex=err_msg) + +def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((S, S, S), 0), + ((S, S, S), 1), + ((), 0), + ) + + for large_number in (True, False): + for shape, dim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + + if large_number and t.dim() > 0: + t[0] = 10000 + yield SampleInput(t, dim) + +def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): + yield SampleInput( + make_tensor((S, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + + +def error_inputs_trace(op, device): + yield ErrorInput(SampleInput(make_tensor((3, 4, 5), dtype=torch.float32, device=device)), error_regex="expected a matrix") + + +def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((S, S, S), (2, 1, 0.5)), + ((S, S, S), (2, -1, 0.5)), + ((S, S, S), (1, 2, 3)), + ((S, S, S), (float('inf'), 2, 0.5)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((1, 2, 3), (-1, -2)), + ((1, 2, 3), (-1, 2)), + ((1, 2, 3), (1, -2)), + ((1, 2, 3), (1, 2)), + ((), (0, 0)), + ((1, ), (0, 0)), + ((M, M), (0, 1)), + ((S, S, S), (2, 0)), ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def _numpy_ref_transpose(a, dim0, dim1): + if a.ndim <= 1: + return a + + return np.swapaxes(a, dim0, dim1) + +def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((1, 2, 3), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((M, M), (M, L)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def error_inputs_T(self, device, has_ndims_error=False): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # Deprecated behavior in regular PyTorch, but throws an error in primTorch: + # https://github.com/pytorch/pytorch/issues/86968 + if has_ndims_error: + # ndims == 1 + yield ErrorInput(SampleInput(make_arg(M)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + # ndims > 2 + yield ErrorInput(SampleInput(make_arg(M, S, L)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + +def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). + Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. + """ + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batches = [(), (0, ), (2, ), (1, 1)] + size = [1, 5, 10] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + a = make_arg((*batch, m, k)) + b = make_arg((*batch, n, k)) + yield SampleInput(a, b, **kwargs) + + +def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + for sample in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad, **kwargs): + *batch, m, k = sample.input.shape + *_, n, _ = sample.args[0].shape + + # NOTE: since svd_lowrank relies on non rank-revealing SVD, + # it inherits the problem of unstable behavior with repeated + # singular values including zeros. + # Since we want to avoid (repeated) zeros as singular values, + # we can only use k for q. + # This issues could be resolved with using a rank-revealing SVD + # which does not include "zero" singular values. + op_kwargs = { + 'q': k, + 'M': None + } + + # without M specified + yield clone_sample(sample, **op_kwargs) + + # now with M + # TODO: fix bug in the documentation for svd_lowrank: + # M has to be (*, m, n), and not (*, 1, n) as written + # in the documentation + op_kwargs['M'] = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) + yield clone_sample(sample, **op_kwargs) + +def chunk_iter(iterable, size): + it = iter(iterable) + while True: + chunk = tuple(islice(it, size)) + if not chunk: + break + yield chunk + +def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + # we reuse samples from svd_lowrank which come in group of two with + # kwarg['M'] = None and with kwarg['M'] = + samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) + for s1, s2 in chunk_iter(samples, 2): + del s1.kwargs['M'] + del s2.kwargs['M'] + s1.kwargs['center'] = False + s2.kwargs['center'] = True + yield s1 + yield s2 + +def np_sinc_with_fp16_as_fp32(x): + # Wraps numpy's sinc function so that fp16 values are promoted to fp32 + # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated + # at 0 for fp16. + if x.dtype == np.float16: + return np.sinc(x.astype(np.float32)) + else: + return np.sinc(x) + +def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + return ( + SampleInput( + make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), + shape, + ) for size, shape in test_cases) + +def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) + + m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + + cases = ( + ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), + ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) + ) + + for a, b, c, d in cases: + yield SampleInput(m(a), args=(m(b), m(c), m(d))) + yield SampleInput(n(a), args=(n(b), n(c), n(d))) + +def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = ( + ((1, S), (2, S), (3, S),), + ((S, 1), (S, 2), (S, 3),), + ((1,), (2,), (3,),), + ((2, S), (S,)) + ) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + # We also want to test mixed complex-non-complex inputs to block_diag + if dtype == torch.complex32 or dtype == torch.complex64: + non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 + make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): + small_S = 2 + test_cases = ( + ((S, S, 2), (S, S + 1, 2)), + ((S, S), (S, S)), + ((S, S, S), (S, S, S)), + ((3, 5), (3, 5)), + ((2, 3, 5), (2, 3, 5)), + ((1, 2, 3), (1, 2, 3)), + ((1, 1), (S, 1)), + ((0, 5), (4, 5)), + ((4, 5), (0, 5)), + ((0, 4, 5), (3, 5)), + ((4, 5), (0, 3, 5)), + ((0, 4, 5), (1, 3, 5)), + ((1, 4, 5), (0, 3, 5)), + # Using S here would make this one test take 9s + ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), + ((small_S, 1, 1, small_S), (1, small_S, small_S)), + ((1, 1, small_S), (small_S, 1, small_S, small_S)), + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: + # FIXME add an override for JIT and revert 0. back to 0 + # since it's accepted by eager + for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: + for t1_size, t2_size in test_cases: + # The args should never be non-contiguous as this is not supported in the backward + yield SampleInput(make_arg(t1_size), make_arg(t2_size), p, cm) + +def _fill_np(a, value): + a = a.copy() + a.fill(value) + return a + +def _fill_sample_kwargs(device, dtype, input): + if dtype is torch.bool: + value = True + else: + value = 3 + + return ({'value': value}, {'value': value}) + +def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds a sample input where both tensors have the same values + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + lhs = make_arg((S, S)) + yield SampleInput(lhs, args=(lhs.clone(),)) + +def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape x number of tensors + cases = ( + ((3, 4), 1), + ((1, 2, 1, 4), 3), + ((0, 1, 0), 2),) + + for shape, num_tensors in cases: + tensors = [] + for _ in range(num_tensors): + tensors.append(make_arg(shape)) + for dim in range(-1, len(shape) - 1): + yield SampleInput(tensors, args=(dim,)) + + +def sample_inputs_chunk_cat(op_info, device, dtype, requires_grad, **kwargs): + # 1. If input tensors have different ndims, dim should be non-negative and be less than the ndims of every input tensors. + # If all input tensors have the same ndims, we support both negative and non-negative dim. + # 2. For wrapped_dim, all tensors should have the same size for 0,...,wrapped_dim-1 dimensions. + # No requirements for (wrapped_dim, ...)-th dimension. + # 3. Expect positive num_chunks + # 4. Expect non-empty input tensor list and each input tensor should have at least 1 element + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + same_ndim_cases = ( + ( + [ + torch.Size([1, 2, 3]), + torch.Size([1, 2, 3]), + ], -1, 5 + ), + ( + [ + torch.Size([1, 2, 3]), + torch.Size([1, 2, 3]), + ], 1, 5 + ), + ( + [ + torch.Size([3, 3, 2, 1]), + torch.Size([1, 4, 2, 2]), + torch.Size([2, 1, 3, 3]), + ], 0, 2 + ), + ) + for sizes, dim, num_chunks in same_ndim_cases: + tensors = [] + for size in sizes: + tensors.append(make_arg(size)) + yield SampleInput(tensors, args=(dim, num_chunks)) + + different_ndim_case = [ + torch.Size([2, 3, 3]), + torch.Size([2, 3, 1, 2]), + torch.Size([2, 3]), + torch.Size([2, 3, 2]), + ] + max_dim, num_chunks = 2, 3 + for dim in range(max_dim): + tensors = [] + for size in different_ndim_case: + tensors.append(make_arg(size)) + yield SampleInput(tensors, args=(dim, num_chunks)) + + +def error_inputs_chunk_cat(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # input tensors have different ndims but dim is negative + sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], -1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects non-negative dim when input tensors have different ndims', + ) + + # input tensors have different ndims but dim >= ndim of some input tensors + sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], 1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects dim < ndim for all input tensors', + ) + + # some tensors have different sizes for 0, ..., dim-1 dimensions. + sizes, dim, num_chunks = [torch.Size([2, 3, 4]), torch.Size([4, 3])], 1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects same sizes of 0,...,dim-1 dimensions for all tensors', + ) + + # negative num_chunks + sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, -1 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects positive num_chunks', + ) + + # zero as num_chunks + sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, 0 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects positive num_chunks', + ) + + # empty input tensor list + dim, num_chunks = 0, 1 + yield ErrorInput( + SampleInput([], args=(dim, num_chunks)), + error_regex='_chunk_cat expects a non-empty input tensor list', + ) + + # empty input tensor with 0 elements + sizes, dim, num_chunks = [torch.Size([0,]), torch.Size([3,])], 0, 1 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects non-empty tensor', + ) + + +def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment] + ((S, S), (S, S), {'dim': -1}), + ((S, S), (S, S), {'dim': 1}), + ((M, S), (S, S), {'dim': 0}), # different shapes + ((1, 2, 3), (1, 2, 3), {'dim': -2}), + ((0,), (0,), {'dim': 0}), # empty tensor + ((0,), (S, S), {'dim': 1}), # empty tensor with unempty and dim=1 (special case for legacy_cat_wrap_dim) + ((0, S), (S, S), {'dim': 0}), + ((1,), (1,), {}) # dim not passed, fallback to default + ) + + for input_shape1, input_shape2, kwargs in cases: + yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) + + # from coat_lite_mini + yield SampleInput([make_arg((2, 2, 2, 2), memory_format=torch.channels_last)], args=(1,),) + +def error_inputs_cat(op_info, device, **kwargs): + + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for more than one element of the written-to tensor refer to a single memory location + yield ErrorInput(SampleInput([make_arg((S, S)), make_arg((S, S))], + kwargs={'out': make_arg((1, S)).expand((2 * S, S))}), + error_regex='unsupported operation') + + # error inputs for empty tensors + yield ErrorInput(SampleInput([], kwargs={'dim': 1}), + error_regex='non-empty list of Tensors') + + # error inputs for different sizes + yield ErrorInput(SampleInput([make_arg((S, S, L, L)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S, S, L, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + + # error inputs for different dimensions + yield ErrorInput(SampleInput([make_arg((S - 1, 0)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S - 1, 0))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + + # error inputs for same memory locations + x = torch.zeros((0), device=device) + y = torch.randn((4, 6), device=device) + + err_msg = "the written-to tensor refer to a single memory location" + + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': x}), + error_regex=err_msg) + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': y}), + error_regex=err_msg) + + z = torch.zeros((4, 6), device=device) + yield ErrorInput(SampleInput((y, z), kwargs={'out': z[:2, :]}), + error_regex=err_msg) + + # error inputs for different devices + if torch.device(device).type == 'cuda': + x_cuda = make_tensor((3, 3), device=device, dtype=torch.float32) + y_cpu = make_tensor((3, 3), device='cpu', dtype=torch.float32) + yield ErrorInput(SampleInput((x_cuda, y_cpu)), + error_regex='Expected all tensors to be on the same device') + + # error inputs for different input sizes for more than 2 tensors + yield ErrorInput(SampleInput([make_arg((L, 1)), make_arg((L, 1, 1)), make_arg((L, 1, 1))]), + error_regex='Tensors must have same number of dimensions') + + yield ErrorInput(SampleInput([make_arg((S, 1, M)), make_arg((S, 1, 1)), make_arg((S, M, 1))], + kwargs={'dim': 1}), + error_regex='Sizes of tensors must match') + + # error inputs for None input + yield ErrorInput(SampleInput((make_arg((S, 1, 1)), None)), error_type=TypeError, + error_regex='got None') + + # error inputs for zero-dimensional tensors + yield ErrorInput(SampleInput([make_arg(()), make_arg(())]), + error_regex='zero-dimensional.*cannot be concatenated') + + # error inputs for different dtype of out tensors + d = make_tensor((2, 3), device=device, dtype=torch.double) + x = make_tensor((2, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'out': d}), error_type=TypeError, + error_regex='invalid combination of arguments') + +def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Noncontiguous type promoting tensors + a = make_arg((3, 4, 2)) + b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) + c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) + + yield SampleInput((a, b, c), kwargs={'dim': 1}) + + # Special 1D tensor with dim length of 0 case + a = make_arg((0,)) + b = make_arg((3, 2, 2)) + + yield SampleInput((a, b, a)) + yield SampleInput((a, a, a)) + +def _elementwise_type_promo_np(*args, type_promotion_kind): + def _maybe_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x) + return x + + flattened = pytree.arg_tree_leaves(*args) + transformed = tuple(_maybe_torch(a) for a in flattened) + result_dtype, _ = prims.utils.elementwise_dtypes( + *transformed, + type_promotion_kind=type_promotion_kind) + return torch_to_numpy_dtype_dict[result_dtype] + +def _cat_np(input_seq, dim=0): + inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) + + if len(inputs) == 0: + np_dtype = _elementwise_type_promo_np( + input_seq, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) + return np.empty(0, dtype=np_dtype) + + return np.concatenate(inputs, axis=dim) + +def _floor_divide_np(a, b): + dtype = _elementwise_type_promo_np( + a, + b, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + if isinstance(a, np.ndarray): + a = a.astype(dtype) + if isinstance(b, np.ndarray): + b = b.astype(dtype) + return np.floor_divide(a, b) + +def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + tensor_shapes = ( + # First Tensor being 1-D is special + # case for hstack + ((S,), (S,), (S,)), + ((S, S), (S, S), (S, S)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + yield SampleInput(tensors) + +def error_inputs_hstack_dstack_vstack(op, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + tensor_shapes = ( + ((S,), (S, S, S, S), (S,)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + # Different dimension tensor + yield ErrorInput(SampleInput(tensors), error_regex="Tensors must have same number of dimensions") + + # empty tensor list + yield ErrorInput(SampleInput(()), error_regex="expects a non-empty TensorList") + +def sample_inputs_unbind(op_info, device, dtype, requires_grad, **kwargs): + # Note: we don't do any tests where we unbind along 0-length dims + # because in that case unbind returns and empty tuple, and that breaks + # some assumptions in some backward tests in test_ops.py + shape_dims = (((S,), 0), + ((S, S), 0), + ((S, S), 1), + ((S, S), -1), + ((S, 0, S), 0), + ((S, S, S), 1), + ) + for shape, dim in shape_dims: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + requires_grad=requires_grad), + args=(dim,)) + +def error_inputs_unbind(op_info, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg(()), args=(0,)), error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions") + yield ErrorInput(SampleInput(make_arg((2,)), args=(2,)), error_type=IndexError, + error_regex="Dimension out of range") + +def reference_unbind(t, dim): + """A numpy implementation of torch.unbind""" + return tuple(s.squeeze(dim) for s in np.split(t, t.shape[dim], dim)) + +def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((M, S)), + 0, + gather_variable((S, S), 1, M, True, device=device)) + yield SampleInput( + make_arg((M, S)), + 1, + gather_variable((M, S // 2), 0, S, True, device=device)) + # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 + yield SampleInput( + make_arg((S,)), + 0, + torch.tensor([], dtype=torch.uint8, device=device)) + # 0D tensor case + yield SampleInput( + make_arg(()), + 0, + torch.tensor([0], dtype=torch.int64, device=device)) + yield SampleInput( + make_arg(()), + 0, + torch.tensor(0, dtype=torch.int64, device=device)) + +def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): + for i in range(1 if dim == 0 else m): + for j in range(1 if dim == 1 else n): + for k in range(1 if dim == 2 else o): + ii = [i, j, k] + ii[dim] = slice(0, idx.size(dim) + 1) + idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] + +def error_inputs_gather(op_info, device, **kwargs): + # src is [1, 2] + # [3, 4] + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + + # idx is [0, 0] + # [1, 0] + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + + # Index should be smaller than self except on dimension 1 + bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), + error_regex="Size does not match at dimension 0") + + # Index must have long dtype + bad_idx = idx.to(torch.int32) + yield ErrorInput(SampleInput(src, args=(1, bad_idx)), + error_regex="Expected dtype int64 for index") + + # TODO: FIXME + # out.dtype must match src.dtype + # Creates new src & idx since SampleInputs can't share tensors + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + out = torch.empty((2, 2), device=device, dtype=torch.float64) + yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), + error_regex="Expected out tensor to have dtype") + + # src and index tensors must have the same # of dimensions + # idx too few dimensions + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor((0, 0), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # src too few dimensions + src = torch.tensor((1, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(0, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx,)), + error_regex="index 23 is out of bounds for dimension") + + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_take(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +# Error inputs for scatter +def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): + # Error when self.dtype != src.dtype (and src is not a scalar) + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.double) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected self.dtype to be equal to src.dtype") + + # Index dtype must be long + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected dtype int64 for index") + + # Index and destination must have the same number of dimensions + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as self tensor") + + # Index and src must have the same number of dimensions when src is not a scalar + src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as src tensor") + + # Index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="index 34 is out of bounds for dimension 0 with size 3") + +def error_inputs_renorm(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, + error_regex="needs at least 2 dimensions, got 0 dimensions") + + +def error_inputs_ormqr(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, + error_regex="input must have at least 2 dimensions") + + # https://github.com/pytorch/pytorch/issues/85218 + tensor_0 = torch.full((5, 0,), 1, device=device) + tensor_1 = torch.full((5,), 1, device=device) + tensor_2 = torch.full((5, 5,), 1, device=device) + bool_3 = True + bool_4 = True + yield ErrorInput(SampleInput(tensor_0, args=(tensor_1, tensor_2, bool_3, bool_4)), error_type=RuntimeError, + error_regex=r"tau.shape\[-1\] must be less than or equal to input.shape\[-1\]") + + +def error_inputs_diag(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + zero_d = torch.randn(1, 1, 1, device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + +def error_inputs_embedding(op_info, device, **kwargs): + indices = torch.rand(2, 2, device=device).long() + weights = [ + torch.tensor(1.0, device=device), + torch.tensor(1.0, device=device).reshape(1, 1, 1), + ] + + for weight in weights: + yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, + error_regex="'weight' must be 2-D") + + +def error_inputs_t(op_info, device, **kwargs): + yield ErrorInput( + SampleInput(torch.randn(2, 3, 4, 5, device=device)), + error_regex="expects a tensor with <= 2", + ) + + +def error_inputs_multinomial(op_info, device, **kwargs): + x = torch.empty(1, 2, 3, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="prob_dist must be 1 or 2 dim") + + x = torch.empty(1, 2, dtype=torch.long, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="multinomial only supports floating-point dtypes for input") + + x = torch.empty(1, 2, dtype=torch.double, device=device) + y = torch.empty(1, 2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), + error_regex="multinomial expects Long tensor out") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(0,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(-1,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3, False,)), + error_regex="cannot sample n_sample > prob_dist") + + x = torch.empty(16777217, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3,)), + error_regex="number of categories cannot exceed") + + inputs = ((1., -1., 1.), (1., inf, 1.), (1., -inf, 1.), (1., 1., nan)) + + err_msg1 = "probability tensor contains either `inf`, `nan` or element < 0" + err_msg2 = "invalid multinomial distribution" + + rep_arg = (False, True) if torch.device(device).type == 'cpu' else (False,) + + for rep in rep_arg: + kwargs = {'num_samples': 2, 'replacement': rep} + + for shape in inputs: + # error case when input tensor contains `inf`, `nan` or negative element + yield ErrorInput(SampleInput(torch.tensor(shape), kwargs=kwargs), + error_regex=err_msg1 if rep is False else err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 1-D input + x = torch.zeros(3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 2-D input + x = torch.zeros(3, 3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution + x[1, :] = 1 + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + +def error_inputs_gradient(op_info, device, **kwargs): + for dtype in [torch.long, torch.float32, torch.complex64]: + t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) + + dim = (1, 0) + spacing = [0.1] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected spacing to be unspecified, a scalar ') + + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), + error_type=RuntimeError, + error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') + + dim = (1, 1) + spacing = 0.1 + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='dim 1 appears multiple times in the list of dims') + + dim = (0, 1) + coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each tensor to be on the same device,') + + yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), + error_type=IndexError, error_regex='') + + t = torch.tensor([[1], [2], [3]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + + t = torch.tensor([[1, 2], [3, 4]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + +def error_inputs_rrelu(op_info, device, **kwargs): + input = make_tensor((S, S), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(input, kwargs={'lower': 0.3, 'upper': 0.1}), + error_regex='Lower bound should be less than or equal to the upper bound') + +def error_inputs_masked_select(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + y = torch.rand((6,), device=device) + mask = torch.tensor([True, False, True, True, False, False], device=device) + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_median(op_info, device, **kwargs): + x = torch.tensor([[[[[[[[[[[[[[[[[[[[[[[[[nan], + [nan]]]]]]]]]]]]]]]]]]]]]]]]], device=device) + if device == 'cuda': + yield ErrorInput(SampleInput(x, kwargs=dict(dim=(-1))), + error_type=RuntimeError, + error_regex='CUDA Tensors cannot have more than 25 dimensions') + else: + return + + +def error_inputs_index_select(op_info, device, **kwargs): + x = torch.rand((1, 6), device=device).expand((2, 6)) + y = torch.rand((3, 6), device=device) + ind = torch.tensor([0, 1], dtype=torch.int64, device=device) + + yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_index_add(op_info, device, **kwargs): + result = torch.tensor([[1., 2.], [4., 5.], [7., 8.]]) + source = torch.tensor([2., 4.]) + + yield ErrorInput(SampleInput(result, args=(0, torch.tensor([0, 2]), source)), + error_type=RuntimeError, + error_regex=r'source tensor shape must match self tensor shape, ' + r'excluding the specified dimension. Got self.shape = \[3, 2\] source.shape = \[2\]') + +def error_inputs_logcumsumexp(op_info, device, **kwargs): + dim = 3 + srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] + for src in srcs: + yield ErrorInput(SampleInput(src, args=(dim,)), + error_type=IndexError, + error_regex='Dimension out of range') + +def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((S, S)), gather_variable((S, S), 1, S, True, device=device), 0) + + # `indices` broadcast + yield SampleInput( + make_arg((S, S)), gather_variable((1, S // 2), 0, S, True, device=device), 1) + + # `self` broadcast + yield SampleInput( + make_arg((1, S)), gather_variable((S, S // 2), 0, S, True, device=device), 1) + + # without `dim` arg + yield SampleInput( + make_arg((S, S)), gather_variable((S, S // 2), 0, S, True, device=device)) + + +def error_inputs_aminmax_amax_amin(op_info, device, is_ref=False, **kwargs): + + # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. + shape = (S, 0, S) + err_msg_amax_amin = "reduction" + err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) + + # Error Inputs for tensors with more than 64 dimension + sizes = [1] * 65 + err_msg1 = "only tensors with up to 64 dims are supported" + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), + error_regex=err_msg1) + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), + error_regex=err_msg1) + + # Error Inputs for repeated 'dim' + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + dims = [(0, 0), (0, -4)] + err_msg2 = "in the list of dims" + x = torch.randn(S, S, S, S, device=device) + for dim in dims: + yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) + + # Error Input for illegal dtype + input5 = torch.randn(L, L, dtype=torch.float32, device=device) + max_values = torch.empty(L, dtype=torch.float32, device=device) + min_values = torch.empty(L, dtype=torch.double, device=device) + illegal_values = torch.empty(L, dtype=torch.int, device=device) + + # Unlike regular PyTorch, amax and amin refs don't require input and out + # dtypes to match exactly: + # https://github.com/pytorch/pytorch/pull/87765#pullrequestreview-1162023824 + if is_ref: + err_msg_amax_amin2 = ("Attempting to cast from torch.float32 to out tensor with dtype " + "torch.int32, but this can't be cast because it is not safe!") + else: + err_msg_amax_amin2 = ("Expected the dtype for input and out to match, but got Float " + "for input's dtype and Int for out's dtype.") + err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" + + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), + error_regex=err_msg_amax_amin2) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), + error_regex=err_msg_aminmax2) + + # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim + err_msg3 = "reduction" + # FIXME: eager and ref impl throw different types of errors + error_type = IndexError if 'refs' not in op_info.name else RuntimeError + yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), + error_type=error_type, error_regex=err_msg3) + +def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): + test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S, S), {}), + ((S, S, S), {'dim': 1}), + ((S, S, S), {'dim': 1, 'keepdim': True}), + ((), {'dim': 0}), + ((), {}), + ((), {'dim': 0, 'keepdim': True}), + ((S, 0, S), {'dim': 0}), + ) + + for shape, kwargs in test_cases: + yield SampleInput( + make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), + **kwargs) + +def error_inputs_diff(op_info, device, **kwargs): + t = torch.rand((1, 3), device=device) + n = -1 + yield ErrorInput(SampleInput(t, args=(n, ), kwargs=kwargs), + error_type=RuntimeError, + error_regex=f'order must be non-negative but got {n}') + +def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = ( + ((1,), 0, None, None), + ((S,), 0, None, None), + ((S, 1), 0, None, None), + ((S, 1), 1, None, None), + ((S, S), 0, None, None), + ((S, S), 1, None, None), + ((S, S), 0, (1, S), (2, S)), + ((S, S), 0, None, (2, S)), + ((XS, XS, XS), 1, None, None), + ((XS, XS, XS), 2, None, None), + ((XS, XS, XS), 1, (XS, 1, XS), (XS, 1, XS)), + ((XS, XS, XS), 2, (XS, XS, 1), (XS, XS, 1)), + ((XS, XS, XS), 2, (XS, XS, XS), (XS, XS, XS)),) + + sample_inputs = [] + for size, dim, size_prepend, size_append in test_cases: + prepend_size = 0 if (size_prepend is None) else size_prepend[dim] + append_size = 0 if (size_append is None) else size_append[dim] + dim_size = size[dim] + prepend_size + append_size + for n in range(dim_size): + input_tensor = make_arg(size) + prepend = make_arg(size_prepend) if size_prepend else None + append = make_arg(size_append) if size_append else None + yield SampleInput(input_tensor, n, dim, prepend, append) + + # add some samples with n > dim_size + yield SampleInput(make_arg((XS, XS, XS)), S + 1, 1) + yield SampleInput(make_arg((XS, XS, XS)), S * 3 + 2, 2, make_arg((XS, XS, XS)), make_arg((XS, XS, XS))) + +def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): + input_tensor = make_arg(size) + weight_tensor = make_arg(size) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = make_arg((bin_ct + 1,)) + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) + bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) + + for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): + input_tensor = make_arg(size) + bin_ct = bin_ct_pattern[:size[-1]] + weight_tensor = make_arg(size[:-1]) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = [make_arg(ct + 1) for ct in bin_ct] + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def error_inputs_histogramdd(opinfo, device, **kwargs): + invalid_bins = [1, 1, 1, 1, 1] + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + msg = "histogramdd: The size of bins must be equal to the innermost dimension of the input." + yield ErrorInput(SampleInput(make_arg(5, 6), invalid_bins), error_regex=msg) + +def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, min, max in product(sizes, [0, -10], [0, 10]): + # construct sample input omitting bins arg + yield SampleInput(make_arg(size), min=min, max=max) + + # construct sample inputs with a few different bins values + for bins in [1, 3, 10]: + yield SampleInput(make_arg(size), bins=bins, min=min, max=max) + +def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for size, weighted in product((S, M), [False, True]): + input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) + weight_tensor = make_arg((size,)) if weighted else None + + max_val = int(input_tensor.max().item()) + + for minlength in [0, max_val // 2, max_val, 2 * max_val]: + yield SampleInput( + input_tensor, weights=weight_tensor, minlength=minlength) + +def sample_inputs_bucketize(op_info, device, dtype, requires_grad, reference_inputs_mode=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = (((), S), ((S,), S), ((S, S), S), ((S, S, S), S), ((S, 1, S), S), ((S, 0, S), S)) + + if reference_inputs_mode: + sizes += (((256,), 128), ((128,), 256), ((32, 32), 11), ((32, 4, 32), 33)) + + for (input_shape, nb), out_int32, right in product(sizes, [False, True], [False, True]): + input_tensor = make_arg(input_shape) + boundaries = make_arg(nb).msort() + + yield SampleInput(input_tensor, boundaries, + out_int32=out_int32, right=right) + +reference_inputs_bucketize = partial(sample_inputs_bucketize, reference_inputs_mode=True) + +def error_inputs_bucketize(opinfo, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg((S, S, S)), make_arg((S, S))), + error_regex="boundaries tensor must be 1 dimension") + +def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # (unsorted tensor size, (input sizes,), is_scalar) + sizes = ( + ((0,), ((0,),), False), + ((M,), ((), (M,), (M, M)), False), + ((0, 0), ((0, 0),), False), + ((M, M), ((M, M),), False), + ((0, 0, 0), ((0, 0, 0),), False), + ((M, M, M), ((M, M, M),), False), + ((L,), ((),), True), + ) + + for (size, input_sizes, is_scalar), noncontiguous, out_int32, right in product( + sizes, [False, True], [False, True], [False, True] + ): + unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) + for input_size in input_sizes: + input = make_arg(input_size, noncontiguous=noncontiguous) + if is_scalar: + input = input.item() + if np.prod(size) == 0: + boundary_tensor = unsorted_tensor + sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) + else: + boundary_tensor, sorter = torch.sort(unsorted_tensor) + side = "right" if right else "left" + + yield SampleInput(boundary_tensor, input, out_int32=out_int32, right=right) + yield SampleInput(boundary_tensor, input, out_int32=out_int32, side=side) + + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, right=right, sorter=sorter) + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, side=side, sorter=sorter) + +def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + test_cases_float = ( + ((S,), None, None, 1), + ((S,), 2., None, 1), + ((S, S), None, None, 2), + ((S, S), [2.0, 2.1], None, 1), + ((S, S), [2.0, 2.1], (0, 1), 1), + ((4, 4, 4), [2., 1.], (0, 1), 2), + ) + for size, spacing, dim, edge_order in test_cases_float: + t = make_arg(size) + yield SampleInput(t, dim=dim, spacing=spacing, edge_order=edge_order) + + test_cases_tensor = ( + ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), + ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), + ) + for size, coordinates, dim, edge_order in test_cases_tensor: + t = make_arg(size) + coordinates_tensor_list = [] + for coords in coordinates: + # `coords` will always contain floating point values and Python 3.10 does not support this + # implicit conversion to an integer using `__int__` + # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed + a = torch.tensor(coords, device=device) + coordinates_tensor_list.append(a.to(dtype)) + yield SampleInput(t, dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order) + +def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_args = [ + ([1, 2],), + (slice(0, 3),), + ([slice(0, 3), 1],), + ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), + ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), + ([slice(None), slice(None), [0, 3]],), + ([slice(None), [0, 3], slice(None)],), + ([[0, 3], slice(None), slice(None)],), + ([[0, 3], [1, 2], slice(None)],), + ([[0, 3], ],), + ([[0, 3], slice(None)],), + ([[0, 3], Ellipsis],), + ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), + (index_variable(2, S, device=device),), + (mask_not_all_zeros((S,)),), + ] + + for args in test_args: + yield SampleInput(make_arg((S, S, S)), args=args) + + yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) + +def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for accumulate in [False, True]: + # Test with indices arg + yield SampleInput( + make_arg((S, S,)), + (index_variable(2, S, device=device),), + make_arg((2, S)), + accumulate=accumulate) + + # Test with mask arg + mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) + yield SampleInput( + make_arg((S, S)), (mask, ), make_arg((S,)), accumulate=accumulate) + +def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): + def small_3d_unique(): + res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + def large_1d_unique(): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique()) + + # Test cases for small 3d tensors. + # Imitates legacy tests from test/test_torch.py + dims = range(-3, 3) + flag = [True, False] + for dim, descending, stable in product(dims, flag, flag): + # default schema without stable sort + yield SampleInput(small_3d_unique(), dim, descending) + # schema with stable sort, no CUDA support yet + if torch.device(device).type == 'cpu': + yield SampleInput( + small_3d_unique(), dim=dim, descending=descending, stable=stable) + + # Test cases for scalar tensor + tensor_opt = dict(dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(torch.tensor(1, **tensor_opt)) + yield SampleInput(torch.tensor(1, **tensor_opt), 0) + yield SampleInput(torch.tensor(1, **tensor_opt), 0, True) + + # Test cases for empty tensor + yield SampleInput(torch.tensor((), **tensor_opt)) + yield SampleInput(torch.tensor((), **tensor_opt), 0) + yield SampleInput(torch.tensor((), **tensor_opt), 0, True) + + # Test cases for stable sort + yield SampleInput(small_3d_unique(), stable=True) + yield SampleInput(small_3d_unique(), dim=0, stable=True) + yield SampleInput(small_3d_unique(), dim=0, descending=True, stable=True) + +def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S)) + for x_size in sizes: + # threshold and values args must be numbers + yield SampleInput(make_arg(x_size), make_arg(()).item(), make_arg(()).item()) + +def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for shape, sorted, return_inverse, return_counts, dim in \ + product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): + # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim + if 0 in shape and shape.index(0) is not dim: + continue + + # skip invalid dim args + if dim is not None and (dim < -len(shape) or dim >= len(shape)): + continue + + kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + + # construct a test case with only one distinct value + input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with mixed 0s and 1s + input_t = make_arg(shape, dtype=torch.bool, requires_grad=False)\ + .to(dtype).requires_grad_(requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with many different values + yield SampleInput(make_arg(shape), **kwargs) + +def sample_inputs_unique_consecutive(*args, **kwargs): + for sample_input in sample_inputs_unique(*args, **kwargs): + if not sample_input.kwargs["sorted"]: + sample_input.kwargs.pop("sorted") + yield sample_input + +def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8), (5,)), + ((3, 8, 8), 5), + ((3, 8, 8), 1) + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((1, 8, 8, 8), (5, 7)), + ((2, 8, 8, 8), (None, 7)), + ((1, 8, 4, 3), (5, None)), + ((1, 8, 4, 3), (None, None)), + ((1, 8, 4, 3), (5)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 2") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8, 8, 8), (5, 7, 4)), + ((1, 8, 4, 3, 7), (None, None, None)), + ((1, 8, 4, 3, 7), (1, 1, 1)), + ((3, 3, 8, 8, 6), (5, 7, None)), + ((1, 3, 8, 8, 6), (5, None, 2)), + ((3, 3, 8, 8, 6), (None, 3, 2)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 3") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8), (5,)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((3, 4, 4), 3), + ((3, 4, 4), 1) + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + + +def error_inputs_adaptive_max_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="Trying to create tensor with negative dimension") + +def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8), (5, 7)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 4), (2, 3)), + ((2, 4, 4, 4), (None, 3)), + ((2, 4, 4, 4), (1, 1)), + ((1, 4, 4, 3), (3, None)), + ((1, 4, 4, 3), (None, None)), + ((1, 4, 4, 3), (3)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="Trying to create tensor with negative dimension") + + +def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8, 8), (5, 7, 4)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 3, 5), (None, None, None)), + ((1, 4, 4, 3, 5), (1, 1, 1)), + ((3, 3, 4, 4, 6), (2, 3, None)), + ((1, 3, 4, 4, 6), (3, None, 2)), + ((3, 3, 4, 4, 6), (None, 3, 2)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="Trying to create tensor with negative dimension") + + +class _TestParamsMaxPoolBase: + + def __init__(self): + self.kwargs = { + 'kernel_size': [3], + 'stride': [2, None], + 'ceil_mode': [True, False], + 'padding': [0, 1], + 'dilation': [1], + 'return_indices': [True, False] + } + + self.shapes = [ + [1, 2, None], # batch + [2], # channels + [3, 6] # signal + ] + + def _gen_shape(self): + for shape in product(*self.shapes): + # shape[0] is None indicates missing batch dimension + if shape[0] is None: + shape = shape[1:] + + yield shape, torch.contiguous_format + # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format + if len(self.shapes) == 4 and len(shape) == 4: + yield shape, torch.channels_last + + def _gen_kwargs(self): + keys = self.kwargs.keys() + for values in product(*self.kwargs.values()): + yield dict(zip(keys, values)) + + def gen_input_params(self): + yield from product(self._gen_shape(), self._gen_kwargs()) + +class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3,)] + self.kwargs['stride'] += [(2,)] + self.kwargs['padding'] += [(1,)] + self.kwargs['dilation'] += [(1,)] + +class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3, 2)] + self.kwargs['stride'] += [(2, 1)] + self.kwargs['padding'] += [(1, 1)] + self.kwargs['dilation'] += [(1, 2)] + + self.shapes.append([6]) + +class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3, 2, 3)] + self.kwargs['stride'] += [(2, 1, 2)] + self.kwargs['dilation'] += [(1, 2, 1)] + + self.shapes.append([6]) + self.shapes.append([5]) + +def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + params_generator_type_dict = { + 'nn.functional.max_pool1d': _TestParamsMaxPool1d, + 'nn.functional.max_pool2d': _TestParamsMaxPool2d, + 'nn.functional.max_pool3d': _TestParamsMaxPool3d, + 'max_pool2d_with_indices_backward': _TestParamsMaxPool2d, + } + + params_generator = params_generator_type_dict[op_info.name]() + for (shape, memory_format), kwargs in params_generator.gen_input_params(): + arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) + yield SampleInput(arg, kwargs=kwargs) + +def max_pool2d_backward(*args, kernel_size=(), stride=(), padding=(0,), dilation=(1,), ceil_mode=False, **kwargs): + out, indices = torch.nn.functional.max_pool2d_with_indices( + *args, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=True) + grad_out = torch.ones_like(out) + if stride is None: + stride = kernel_size + out_b = torch.ops.aten.max_pool2d_with_indices_backward.default( + grad_out, *args, kernel_size, stride, padding, dilation, ceil_mode, indices) + return out_b + +def error_inputs_max_pool1d(op_info, device, **kwargs): + # Toggle requires_grad because `max_pool1d` has different path + # based on whether `requires_grad` is set or not. + for requires_grad in (True, False): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=requires_grad) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > ((kernel_size - 1) * dilation + 1) / 2, when dilation is not default + yield ErrorInput(SampleInput(x, + kwargs={'kernel_size': 3, 'dilation': 2, 'stride': 1, 'padding': 3, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for input tensor + error_msg = r'Expected 2D or 3D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((), requires_grad=requires_grad), kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input + yield ErrorInput(SampleInput(torch.tensor([], device=device, requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: unbatched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((0, 10), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((1, 10, 0), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input with stride=0 + error_msg = 'stride must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), kwargs={'kernel_size': 1, 'stride': 0}), + error_regex=error_msg) + + # error inputs for empty input with dilation=0 + error_msg = 'dilation must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), + kwargs={'kernel_size': 1, 'stride': 1, 'padding': 0, 'dilation': 0}), + error_regex=error_msg) + + # error inputs for invalid output size + error_msg = 'Invalid computed output size: -2' + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), + kwargs={'kernel_size': 5, 'stride': 1, 'padding': 0, 'dilation': 1}), + error_regex=error_msg) + + # error inputs when kernel_size=0 + error_msg = 'kernel_size must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 0}), + error_regex=error_msg) + + # error inputs for strides > 0 + error_msg = 'stride must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 0}), + error_regex=error_msg) + + +def error_inputs_max_pool2d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size : int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size : tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected 3D or 4D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((1, 0, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 10, 0)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def error_inputs_max_pool3d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49, 50)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size: int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size: tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected input\'s non-batch dimensions to have positive length' + yield ErrorInput(SampleInput(make_arg((0, 1, 2, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched inputs with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 0, 1, 2)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((2, 1, 4, 5), {'p': 1., 'dim': 2}), + ((2, 3, 4, 5), {'p': 2., 'dim': 1}), + ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), + ((1, 3, 4, 5), {'p': -1., 'dim': 1}), + ((1, 3, 4, 5), {'p': 0., 'dim': -1}), + ((), {'p': 1.2, 'dim': 0}), + ((2, 3, 4, 5), {}), + ((2, 3, 4, 5), {'eps': 1e-4})) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), kwargs=kwargs) + + +def complex_conv(fn, input_size, weight, grad_output, stride, padding, dilation, groups): + # conv(W, x, b) = conv(Wr, xr, br) - conv(Wi, xi, 0) + i(conv(Wi, xr, bi) + conv(Wr, xi, 0)) + # a = conv(Wr, xr, br), + # b = conv(Wi, xi, 0), + # c = conv(Wr + Wi, xr + xi, br + bi) + # conv(W, x, b) = a - b + i(c - a - b) + + grad_output_ = torch.view_as_real(grad_output) + grad_output_r = grad_output_[..., 0] + grad_output_i = grad_output_[..., 1] + + weight_ = torch.view_as_real(weight) + weight_r = weight_[..., 0] + weight_i = weight_[..., 1] + + a = fn(input_size, weight_r, grad_output_r, stride, padding, dilation, groups) + b = fn(input_size, weight_i, grad_output_i, stride, padding, dilation, groups) + c = fn(input_size, weight_r + weight_i, grad_output_r + grad_output_i, stride, padding, dilation, groups) + + return (a - b) + 1j * (c - a - b) + + +def conv_transpose_ref(input, weight, bias, stride=1, padding=0, + output_padding=0, dilation=1, groups=1, + fn=None): + # Derivative of `conv` is `conv_transpose`. + # To verify the correctness of `conv_transpose`, + # we rely `torch.nn.grad` implementation (which is tested in test_nn.py) + # for floating dtypes. + + assert fn is not None + + grad_fn_map = {torch.nn.functional.conv_transpose1d: torch.nn.grad.conv1d_input, + torch.nn.functional.conv_transpose2d: torch.nn.grad.conv2d_input, + torch.nn.functional.conv_transpose3d: torch.nn.grad.conv3d_input} + batched_dim_map = {torch.nn.functional.conv_transpose1d: 3, + torch.nn.functional.conv_transpose2d: 4, + torch.nn.functional.conv_transpose3d: 5} + + # Input for `ref` is ndarray. + input, weight = torch.from_numpy(input), torch.from_numpy(weight) + + is_batched = len(input.shape) == batched_dim_map[fn] + if not is_batched: + input = input.unsqueeze(0) + + if bias is not None: + bias = torch.from_numpy(bias) + unsqueeze_dims = input.ndim - 2 + for _ in range(unsqueeze_dims): + bias = bias.unsqueeze(1) + + grad_output = input + # Get the input shape for grad_fn. + conv_transpose_output = fn(grad_output.to('meta'), weight.to('meta'), None, + stride=stride, padding=padding, output_padding=output_padding, + groups=groups, dilation=dilation) + input_size = conv_transpose_output.shape + + grad_fn = grad_fn_map[fn] + if weight.dtype.is_complex: + out = complex_conv(grad_fn, input_size, weight, grad_output, stride, padding, dilation, groups) + else: # Floating + out = grad_fn(input_size, weight, grad_output, stride, padding, dilation, groups) + + if bias is not None: + out = out + bias + + return out.squeeze(0) if not is_batched else out + + +def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4), (3, 3, 3), (3,), + {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), + ((2, 2, 4), (2, 2, 4), (4,), + {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), + ((1, 1, 4), (1, 1, 4), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), + ((1, 1, 4), (1, 2, 3), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5), (4, 8, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), + ((2, 2, 4, 4), (2, 2, 4, 5), (4,), + {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), + ((1, 1, 4, 5), (1, 1, 4, 3), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 1, 4, 3), (1, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((2, 4, 4, 4), (4, 1, 3, 3), None, {'groups': 4}), + ((1, 2, 5, 5), (2, 4, 3, 3), None, {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + +def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), + {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), + ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), + {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), + ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), + ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias, + # and a dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), + ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), + ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), + ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), + # With defaults + ((1, 4, 5), (3, 4, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4)), args=(make_int_arg((1, 1, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2)), make_arg((1,)))), + error_regex="weight should have at least three dimensions") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def error_inputs_conv2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((2, 4, 4)), args=(make_int_arg((3, 2, 3, 3)), make_arg((3,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((2, 4, 4)), args=(make_arg((3, 2, 3, 3)), make_complex_arg((3,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 2, 2, 3)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2, 4)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 2)), args=(make_arg((1, 1, 2, 5)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 1, 3, 2)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 3-dimensional input for 3-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for groups the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 5)), args=(make_arg((2, 2, 1, 4)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 4, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, groups, dilation) + cases: Tuple = ( + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'groups': 1}), + ((2, 4, 8, 8), (2, 2, 3, 3), (2,), + {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 2, 4, 3), (4, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'groups': 1}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': "valid"}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 1, 'padding': "same", 'dilation': 3}), + # Below are the group related samples from common_nn.py + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), + ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), + ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), + # With defaults + ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv3d(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 1, 4, 4, 4), (1, 1, 1, 1, 1), (1,), {'padding': 'same'}), + ((1, 1, 4, 4, 4), (1, 1, 4, 4, 4), (1,), {'stride': (2, 2, 2)}), + ((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1,), {'dilation': 2}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same', 'dilation': 2}), + ((1, 1, 10, 11, 12), (1, 1, 4, 4, 4), None, {'padding': 'same', 'dilation': 3}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'groups': 3}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'stride': (2, 2, 2), 'dilation': 1, 'groups': 3}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4, 4, 4)), args=(make_int_arg((1, 1, 2, 2, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 3, 3, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 3, 4, 5)), args=(make_arg((1, 1, 4, 3)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 4-dimensional input for 4-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), + error_regex="non-positive groups is not supported") + + # error inputs for padding='same' not supported by strided convolutions + yield ErrorInput( + SampleInput(make_arg((18, 27, 9, 1, 9)), args=(make_arg((9, 9, 9, 1, 9)), + make_arg((9,))), kwargs={'stride': 2, 'padding': 'same', 'groups': 3}), + error_regex="padding='same' is not supported for strided convolutions") + + +def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'eps' : 0.5}), + ((2, 6, 3), 2, {'eps' : -0.5}), + ((1, 3), 1, {'eps' : 1e-5}), + ((0, 2), 1, {'eps' : 1e-5}), + ((S, S, S), 1, {'eps' : 0.5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(make_arg(input_shape), num_groups, **kwargs) + + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=(1,)) + +def reference_inputs_group_norm(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_group_norm( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((20, 6, 10, 10), 3, {'eps' : 1e-5}), + # equivalent with InstanceNorm + # GroupNorm(C, num_groups=C) == InstanceNorm(num_features=C) + ((20, 6, 10, 10), 6, {'eps' : 1e-5}), + # equivalent with LayerNorm + # GroupNorm(C, num_groups=1, affine=False) == LayerNorm(normalized_shape=[C, H, W], elementwise_affine=False) + ((20, 6, 10, 10), 1, {'eps' : 1e-5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + input_tensor = make_arg(input_shape) + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(input_tensor, num_groups, **kwargs) + + +def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), + ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), + ((3, 2, 4), {'momentum': -1.2}), + ((3, 2, 4), {'momentum': 0.0}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] + weight = make_arg(channels) + bias = make_arg(channels) + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + new_kwargs = { + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': weight, + 'bias': bias, + **kwargs + } + + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs=new_kwargs + ) + + # Checking for permutations of weights and biases as `None` + # instance_norm assumes that if there's a bias, there's a weight + weights = [channels, None] + biases = [None, None] + + for weight_channels, bias_channels in zip(weights, biases): + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs={ + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': make_arg(weight_channels) if weight_channels is not None else None, + 'bias': make_arg(bias_channels) if bias_channels is not None else None + } + ) + + # Test case for no optional kwargs + yield SampleInput(make_arg((1, 2, 3)), kwargs={}) + + +def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape and a kwarg dict for eps + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), + ((2, 2, 3), (2, 3), {'eps': -0.5}), + ((1,), (1,), {}), + ((1, 2), (2,), {}), + ((0, 1), (1,), {}), + ) + + for input_shape, normalized_shape, kwargs in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias), + kwargs=kwargs + ) + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=((2,),)) + + # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, + # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 + + # With weight and a `None` bias + # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) + + # With `None` weight and bias (tests failing for this, see the link above) + # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) + + +def sample_inputs_native_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape, eps + cases: Tuple[Tuple[int], Tuple[int], float] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), 0.5), + ((2, 2, 3), (2, 3), -0.5), + ((1,), (1,), 1e-5), + ((1, 2), (2,), 1e-5), + ((0, 1), (1,), 1e-5), + ) + + for input_shape, normalized_shape, eps in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, None, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, None, eps), + ) + +def error_inputs_group_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # check that input has minimum number of dimensions + err_msg1 = "Expected at least 2 dimensions for input tensor but received" + s1 = SampleInput(make_arg(1), args=(1,)) + yield ErrorInput(s1, error_regex=err_msg1) + + # check that the channels dimension is compatible with number of groups + err_msg2 = "Expected number of channels in input to be divisible by num_groups, but got input of shape" + s2 = SampleInput(make_arg((2, 7, 4)), args=(2,)) + yield ErrorInput(s2, error_regex=err_msg2) + +def error_inputs_native_layer_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + input_shape = (1, 2, 3) + + err_msg1 = "Expected normalized_shape to be at least 1-dimensional" + s1 = SampleInput( + make_arg(input_shape), args=(tuple(), None, None, 1e-5) + ) + yield ErrorInput(s1, error_regex=err_msg1) + + normalized_shape = (1, 2, 3) + weight = make_arg((1, 2)) + err_msg2 = "Expected weight to be of same shape as normalized_shape" + s2 = SampleInput( + make_arg(input_shape), args=(normalized_shape, weight, None, 1e-5) + ) + yield ErrorInput(s2, error_regex=err_msg2) + + bias = make_arg((1, 2)) + err_msg3 = "Expected bias to be of same shape as normalized_shape" + s3 = SampleInput( + make_arg(input_shape), args=(normalized_shape, None, bias, 1e-5) + ) + yield ErrorInput(s3, error_regex=err_msg3) + + err_msg4 = "Given normalized_shape=" + s4 = SampleInput( + make_arg((2, 2, 3)), args=((2, 2), None, None, 1e-5) + ) + yield ErrorInput(s4, error_regex=err_msg4) + + +def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, size and a kwarg dict for alpha, beta, and k + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), + ((1, 6, 3), 2, {'alpha': 3e-05}), + ((1, 6, 3), 2, {'beta': 0.5}), + ((1, 6, 3), 2, {'k': 1.25}), + ((1, 6, 3), 2, {}), + ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ) + + for input_shape, size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) + +def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): + N = 5 + # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-5, high=5) + return (SampleInput(make_arg((N * 2, N * 2))) for _ in range(1, N)) + +def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4], [8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor = create_tensor(batch_shape + [in_feat]) + weight = create_tensor([out_feat, in_feat]) + if not has_bias: + yield SampleInput(input_tensor, weight) + continue + + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor, weight, bias) + + # 5D tensor, used to crash on MPS, see https://github.com/pytorch/pytorch/issues/114942 + yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2)) + yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2), create_tensor(4)) + +def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4, 5], [8, 8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor1 = create_tensor(batch_shape + [in_feat1]) + input_tensor2 = create_tensor(batch_shape + [in_feat2]) + weight = create_tensor([out_feat, in_feat1, in_feat2]) + if not has_bias: + yield SampleInput(input_tensor1, input_tensor2, weight) + continue + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor1, input_tensor2, weight, bias) + +def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): + features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for features, batch_shape in itertools.product(features_options, batch_options): + ndim = len(features) + len(batch_shape) + for dim in range(ndim): + input_tensor = create_tensor(batch_shape + features) + dim_size = input_tensor.size(dim) + if dim_size > 0 and dim_size % 2 == 0: + yield SampleInput(input_tensor, dim) + +def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + align_corners_options: Tuple[Any, ...] = (None,) + if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): + align_corners_options = (True, False, None) + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'nearest-exact': [1, 2, 3], + 'linear': [1], + 'bilinear': [2], + 'bicubic': [2], + 'trilinear': [3], + 'area': [1, 2, 3] + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return tuple([N, C] + ([size] * rank)) + return tuple([size] * rank) + + if mode in ('bilinear', 'bicubic') and dtype == torch.uint8: + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for a more close to typical image processing usage + rank = 2 + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg(shape(270, rank), memory_format=memory_format), + shape(130, rank, False), + scale_factor=None, + mode=mode, + align_corners=False, + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for align_corners in align_corners_options: + for rank in ranks_for_mode[mode]: + yield SampleInput( + make_arg(shape(D, rank)), + shape(S, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + yield SampleInput( + make_arg(shape(D, rank)), + shape(L, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + for recompute_scale_factor in [False, True]: + for scale_factor in [1.7, 0.6]: + yield SampleInput( + make_arg(shape(D, rank)), + size=None, + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + +def reference_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', 'bicubic'): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + for aa in [True, False]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + scale_factor=None, + mode=mode, + align_corners=False, + antialias=aa, + ) + +def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'bilinear': [2], + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return torch.Size([N, C] + ([size] * rank)) + return torch.Size([size] * rank) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for rank in ranks_for_mode[mode]: + yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), size=shape(L, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=1.7) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6) + +def reference_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', ): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide a single sample for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + ) + +def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs): + N = 6 + C = 3 + H = 10 + W = 20 + S = 3 + L = 5 + + input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=None, align_corners=False, scale_factors=[1.7, 0.9]) + yield SampleInput(input_tensor, output_size=None, align_corners=True, scale_factors=[0.8, 1.0]) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=None, scales_w=None) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=1.7, scales_w=0.9) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=True, scales_h=1.7, scales_w=0.9) + +def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): + N = 5 + for _ in range(1, N): + for approximate in ['none', 'tanh']: + yield SampleInput( + make_tensor((N * 2, N * 2), device=device, dtype=dtype, + requires_grad=requires_grad, low=-3, high=3), + approximate=approximate) + + +def error_inputs_gelu(op, device, **kwargs): + # Tests that gelu errors out when passed an approximation we don't know. + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device), kwargs={"approximate": "asdf"}), + error_regex="approximate argument must be either") + + +def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): + inputs = [] + args_for_reduction_with_dim = ( + ((S, S, S), (1,),), + ((S, S, S), (1, True, ),), + ((), (0,),), + ((), (0, True,),), + ) + return ((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *args)) + for input_tensor, args in args_for_reduction_with_dim) + +def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + +def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): + yield from _generate_reduction_inputs(device, dtype, requires_grad) + # NaN only exists for floating point numbers + if dtype.is_complex or dtype.is_floating_point: + yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) + yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_nan_reduction(supports_multiple_dims): + # Generates sample inputs for reduction ops that contain the input tensor + # and dim and keepdim kwargs. If a reduction op needs to test additional + # args/kwargs then create a separate sample_inputs function + def fn(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + yield SampleInput(t.clone().requires_grad_(requires_grad)) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): + yield SampleInput(t.clone().requires_grad_(requires_grad), **kwargs) + + return fn + +def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): + test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) + test_interpolations = ['linear', 'midpoint'] + + for quantiles in test_quantiles: + for t in _generate_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): + # Interpolation kwarg for now is only supported when providing both dim and keepdim + kwargs.setdefault('dim', 0) + kwargs.setdefault('keepdim', False) + for interpolation in test_interpolations: + kwargs['interpolation'] = interpolation + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles, **kwargs) + +def sample_inputs_reduction_count_nonzero(*args, **kwargs): + """Sample inputs for count_nonzero""" + # count_nonzero does not support keepdim yet + for sample in sample_inputs_reduction(*args, **kwargs): + sample.kwargs.pop('keepdim', None) + yield sample + +def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): + N = 10 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + return (SampleInput(make_arg((N, N))) for _ in range(1, N)) + +def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((1, 3, 9, 9), 3), + ((1, 3, 9, 9), (4, 4)), + ((1, 3, 9, 9), (6, 6)), + ((2, 3, 9, 9), (3, 3)), + ((1, 1, 4, 4), (2, 2)), + ((1, 2, 6, 6), (4, 4))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((2, 3, 5, 5, 5), (2, 2, 2)), + ((1, 2, 6, 5, 4), 2), + ((1, 2, 5, 6, 5), (2, 3, 2)), + ((1, 2, 6, 6, 6), (2, 3, 2)), + ((1, 1, 7, 6, 7), (2, 3, 4)), + ((1, 1, 4, 5, 4), (2, 2, 1)), + ((1, 1, 8, 7, 6), (4, 3, 2)), + ((0, 1, 4, 5, 4), (2, 2, 1))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3, 2), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), + ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), + ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), + ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), + ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), + ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) + + for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: + yield SampleInput(make_arg(input_shape), + args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) + # Case with just input_shape and kernel_size + yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) + +def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, kwargs + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 9), (3,), {}), + ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), + ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), + ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), + ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 3, 4, 4), (2, 2, 2), {}), + ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, + count_include_pad=False, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, + count_include_pad=True, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), + ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, + count_include_pad=False, divisor_override=2)), + ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=-2)), + ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, + count_include_pad=True, divisor_override=None)), + ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def error_inputs_avg_pool1d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + +def error_inputs_avg_pool2d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + +def error_inputs_avg_pool3d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49, 50], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + + # error inputs for invalid input dimension + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 0}), + error_regex='non-empty 4D or 5D') + + +def sample_inputs_to(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # test_multiple_devices_to_cuda would fail if we use a different device than given + devices = [device] + if torch.device(device).type == 'cpu': + devices = [torch.device('cpu'), torch.device('cuda:0')] if torch.cuda.is_available() else devices + memory_formats = [torch.preserve_format, torch.channels_last] + + # TODO: can't switch `to.device` overload to use positional arguments + # https://github.com/pytorch/pytorch/issues/84265 + # to.device overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(device, torch.float64, nb, cp), kwargs=kwargs) + + # to.dtype overload + for nb, cp, mem_f in product([True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(torch.float64, nb, cp), kwargs=kwargs) + + # to.other overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + other = make_arg((S, S, S, S), dtype=torch.float64, device=device) + yield SampleInput(make_arg((S, S, S, S)), args=(other, nb, cp), kwargs=kwargs) + + +def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): + def get_tensor_input(size): + return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(get_tensor_input((S, M, S)), 3) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True, True) + + yield SampleInput(get_tensor_input(()), 1) + yield SampleInput(get_tensor_input(()), 1, 0) + yield SampleInput(get_tensor_input(()), 1, -1) + yield SampleInput(get_tensor_input(()), 1, 0, True) + yield SampleInput(get_tensor_input(()), 1, -1, True) + yield SampleInput(get_tensor_input(()), 1, 0, True, True) + yield SampleInput(get_tensor_input(()), 1, -1, True, True) + +def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(M)) + +def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) + ps = (2, 4) + + for size_x, size_y, p in product(sizes, sizes, ps): + yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_index(op_info, device, dtype, requires_grad, reference=False, **kwargs): + # target.index_select(dim, idx) + select = "index_select" in op_info.name + # target.index_add(dim, idx, source, *, alpha=1) + add = "index_add" in op_info.name + # target.index_copy(dim, idx, source) + copy = "index_copy" in op_info.name + # target.index_fill(dim, idx, value) + fill = "index_fill" in op_info.name + + # Extended reference inputs. We generate that exercise atomic adds / writing + # several times to one location + if reference: + make_arg = partial(torch.ones, device=device, dtype=dtype, requires_grad=requires_grad) + make_idx = partial(torch.zeros, device=device, dtype=torch.int64) + else: + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # idx They need to be different for copy and add to be deterministic + if copy or add: + make_idx = partial(torch.randperm, device=device, dtype=torch.int64) + else: + def make_idx(n): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) + + shapes = [(), (1,), (S, S)] + # extra parameter for add + if add: + if dtype == torch.bool: + alphas = (True, False) + else: + alphas = (-1, 0, 2) + else: + alphas = (None,) + + if fill: + # A weird number to catch errors. + # The former one tests `index_fill.int_Scalar`, and the latter one tests `index_fill.int_Tensor`. + values = (make_arg((1,)).item(), make_arg(())) + else: + values = (None,) + + for shape, alpha, value in product(shapes, alphas, values): + t = make_arg(shape) + args = [] + + # dim. We handle the scalar case + dim = -1 if t.ndim == 2 else 0 + args.append(dim) + + idx = make_idx(t.shape[dim] if t.ndim != 0 else 1) + args.append(idx) + + # source + if copy or add: + args.append(make_arg(shape)) + elif fill: + args.append(value) + + args = tuple(args) + kwargs = {} if alpha is None else {"alpha": alpha} + + yield SampleInput(t, args=args, kwargs=kwargs) + +def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_idx(n, m): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) + + shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] + include_selfs = (True, False) + reduces = ('prod', 'mean', 'amin', 'amax') + + for shape, include_self, reduce in product(shapes, include_selfs, reduces): + self_shape, src_shape = shape + # dim. We handle the scalar case + dim = 1 if len(self_shape) >= 2 else 0 + idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, + self_shape[dim] if len(self_shape) != 0 else 1) + args = (dim, idx, make_arg(src_shape), reduce) + yield SampleInput(make_arg(self_shape), + args=args, + kwargs={'include_self' : include_self}) + + # Sample inputs to test edge cases for backward + if requires_grad: + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) + # (c) no zeros reduced (self[2, 1], self[2, 2]) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(0, idx, src, 'prod'), + kwargs={'include_self': True}) + +def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): + args = ( + ((S, S, S), (),), + ((S, S, S), (1, ),), + ((S, S, S), (1, True, ),), + ((), (),), + ((), (0,),), + ((), (0, True,),), + # Non-fused mode kernel on CUDA + ((3000,), ()), + ) + make_arg = partial(make_tensor, dtype=dtype, device=device, + requires_grad=requires_grad, low=None, high=None) + return (SampleInput(make_arg(input_tensor), *args) + for input_tensor, args in args) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs + idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] + idx_list = [idx, -idx - 1] + for idx, acc in product(idx_list, (True, False)): + yield SampleInput(input=make_arg((S, S)), + args=(idx.clone(), + make_arg((S,)), + acc)) + + # Scalar cases + scalar_sizes = [(), (1,)] + tgt_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + src_gen = (make_arg(size) for size in scalar_sizes) + for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + + # Empty cases + tgt_sizes = [(0,), (), (1,), (3, 2)] + tgt_gen = (make_arg(size) for size in tgt_sizes) + idx = make_idx((0,), high=1) + src = make_arg((0,)) + for tgt, acc in product(tgt_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + +def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs: take S elements out of S * S + index = make_idx((S,), high=(S * S)) + for idx in (index, -index - 1): + yield SampleInput(input=make_arg((S, S)), args=(idx,)) + + # Scalar cases + scalar_sizes = [(), (1,)] + src_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + for src, idx in product(src_gen, idx_gen): + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + + # Empty cases + src_sizes = [(0,), (), (1,), (3, 2)] + src_gen = (make_arg(size) for size in src_sizes) + + idx = make_idx((0,), high=1) + for src in src_gen: + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + +def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, 1, 2, 3], [3, 2, 1, 0]) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, -1, -2, -3], [-3, -2, -1, -0]) + +def reference_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape, source, destination + args = ( + # empty inputs + ((), (), ()), + # int inputs, negative + ((3, 5, 7, 2), -2, 1), + # swap bounds + ((3, 5, 7, 2), (-1, 0), (0, -1)), + # non-sequential, negative + ((2, 3, 4, 5, 6), (3, -3, 4), (1, 0, -1)), + # idempotence, negative + ((2, 3, 4, 5, 6), (-3, 4, 3, 1), (-3, 4, 3, 1)), + # reverse, sequential, positive + ((6, 2, 3, 5, 4), (4, 3, 2, 1, 0), (0, 1, 2, 3, 4)), + # reverse, non-sequential + ((6, 2, 3, 5, 4), (-3, -2, -4, -5, -1), (2, 1, 3, 4, 0)), + # reverse, sequential, negative + ((6, 2, 3, 5, 4), (4, -2, 2, -4, -5), (-5, 1, 2, -2, -1)), + ) + + for shape, source, destination in args: + yield SampleInput(make_arg(shape), args=(source, destination)) + +def error_movedim_moveaxis(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # source length < destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3), (1, 0, -1))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0, -1\] dims\)"), + ) + + # source length > destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3, 4), (1, 0))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3, 4\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0\] dims\)"), + ) + + # repeated source dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 4, -5), (1, 0, 2))), + error_regex=r"movedim: repeated dim in `source` \(\[0, 4, -5\]\)", + ) + + # repeated destination dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, 2), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `destination` \(\[0, 4, -5\]\)", + ) + + # repeated dim (both), with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, -4), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `source` \(\[1, 0, -4\]\)", + ) + + # out of bounds source inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 1, -6), (1, 4, 2))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 4, 2), (0, 1, -6))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds source input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(-6, 1)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(3, -6)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + +def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) + shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) + + if requires_grad: + # Tests for variant_consistency_jit, grad, gradgrad + # are slower. Use smaller bags of `rep_dims` and `shapes` + # in this case. + rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] + shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] + + is_repeat_op = op_info.name in ['repeat', '_refs.repeat'] + for rep_dim, shape in product(rep_dims, shapes): + # `torch.repeat` errors for `len(rep_dims) < t.dim()`, + # so we filter such combinations. + if is_repeat_op and len(rep_dim) < len(shape): + continue + yield SampleInput(make_arg(shape), rep_dim) + + +def sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + shapes_and_args = ( + ((S, S, S), 1, 2, 2), + ((S, S, S), -1, 2, 2), + ((S, S, S), 1, 0, 0), + ((S, S, S), -1, 0, 0), + ((S, S, S), 2, 1, 2), + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def reference_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + yield from sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, is_narrow=is_narrow, **kwargs) + + shapes_and_args = ( + # 1-dim + ((M,), 0, 0, 0), # 0 elems from the left + ((M,), -1, -1, 0), # 0 elems from the right + ((M,), 0, 5, 3), # 3 elems from the left + ((M,), 0, -5, 2), # 2 elems from the right + ((M,), -1, 0, M), # M elems from the left + ((M,), 0, -M, M), # M elems from the right + + # 2-dim + ((M, S), 1, 0, 0), # dim 1, 0 elems from the left + ((S, M), -2, -1, 0), # dim 0, 0 elems from the right + ((L, S), 1, 2, 3), # dim 1, 3 elems from the left + ((L, S), -1, 3, 2), # dim 1, 2 elems from the left + ((M, L), 0, 0, M), # dim 0, M elems from the left + ((M, L), -1, -L, L), # dim 1, L elems from the right + + # 3-dim + ((L, M, S), 2, 0, 0), # dim 2, 0 elems from the left + ((M, S, L), -1, -1, 0), # dim 2, 0 elems from the right + ((S, L, M), 2, 0, M), # dim 2, M elems from the left + ((L, S, M), -1, -M, M), # dim 2, M elems from the right + ((S, L, M), 1, 0, 0), # dim 1, 0 elems from the left + ((S, L, M), 0, 2, 1), # dim 0, 1 elem from the left + ((M, S, M), -1, -5, 4), # dim 2, 4 elems from the right + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def error_inputs_narrow_narrow_copy(op_info, device, *, is_narrow, is_ref): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # 0-dim + yield ErrorInput(SampleInput(make_arg(()), 0, 0, 1), + error_type=RuntimeError, + error_regex=r"narrow\(\) cannot be applied to a 0-dim tensor\.") + + # out of bounds dim + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=RuntimeError, + error_regex=r"Expected dim < static_cast\(self_sizes.size\(\)\) to be true, but got false\.") + else: + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got 3\)") + # out of bounds dim (negative) + yield ErrorInput(SampleInput(make_arg((L, S, M)), -4, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got -4\)") + + # out of bounds start + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, M + 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got 11\)") + # out of bounds start (negative) + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, -M - 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got -11\)") + + # out of bounds length + yield ErrorInput(SampleInput(make_arg((S, L, M)), 2, 0, M + 1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(11\) exceeds dimension size \(10\)\.") + # out of bounds length (negative) + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(-1\) exceeds dimension size \(10\)\.") + else: + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"narrow\(\): length must be non-negative\.") + + # Test Tensor overload that was added for XLA. Start must be an 0-dim + # integral Tensor. narrow_copy doesn't have this overload. + # https://github.com/pytorch/pytorch/issues/31558 + if is_narrow: + # *1-dim* integral Tensor + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, make_arg(S, dtype=torch.int), 2), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + # 0-dim *bool* Tensor (bools are not allowed) + yield ErrorInput(SampleInput(make_arg((L, M, S)), -3, make_arg((), dtype=torch.bool), 3), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + +def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=None, high=None) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_axes = [ + ((3, 4, 5), 0), + ((3, 4, 5), 1), + ((3, 4, 5), 3), + ((3, 4, 5), -1), + ((3, 4, 5), -3), + ((), 0), + ((), -1), + ((1,), 0), + ((1,), -1), + ] + + for shape, axis in shapes_and_axes: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, axis) + + +def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((0, 1, 5, 5), (2, 3, 5, 5)) + kernel_sizes = (2, (2, 2), (2, 3)) + dilations = (1, 2, (1, 2)) + paddings = (0, 1, (1, 2)) + strides = (1, 2, (1, 2)) + + cases = product(shapes, kernel_sizes, dilations, paddings, strides) + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for shape, kernel_size, dilation, padding, stride in cases: + tensor = make_arg(shape) + yield SampleInput(tensor, kernel_size, dilation, padding, stride) + + # With default args + yield SampleInput(make_arg((1, 1, 5, 5)), (3, 3)) + + +def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((S, 1, S, 1), ()), + ((1, 1, 1, 1), ()), + ((1, 1, 1, 1), (0,)), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (2,)), + ((S, 1, S, 1), (-2,)), + ((), (0, )), + ) + + for shape, args in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, args=args) + + +def sample_inputs_squeeze_multiple(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((1, 1, 1, 1), ()), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (1, 3)), + ((S, 1, S, 1), (1, 2,)), + ((), (0,)), + ) + + for shape, dims in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, dims) + + +def _squeeze_ref(x, axis=None): + # NumPy doesn't allow squeezing scalars + if x.ndim == 0: + return x + + if isinstance(axis, Sequence): + # Numpy doesn't allow specifying non-singular dimensions + axis = tuple(a for a in axis if x.shape[a] == 1) + + if isinstance(axis, int) and x.shape[axis] != 1: + return x + + return np.squeeze(x, axis) + +def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): + assert mode in ('constant', 'reflect', 'replicate', 'circular') + if mode in ['reflect', 'replicate']: + cases: tuple = ( # ignore + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + elif mode == 'constant': + cases = ( + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((1, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((0, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3), (1, 1, 1, 1, 1, 1)), + ((0, 3, 3, 3), (1, 2)), + ((0, 3, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((3, 3, 5, 5), (1, 2)), + ((3, 3, 5, 5), (0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 2)), + ((1, 3, 3, 3, 3), (0, 1)), + ((1, 3, 3, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + else: # mode == 'circular' + if dtype == torch.bool: + # test_dtypes fails on ASAN with for the case ab + # runtime error: load of value 190, which is not a valid value for type 'bool' + # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 + # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 + cases = ( + ((2, 3, 3), (1, 2)), + ((1, 3, 3), (1, 2)), + ) + else: + cases = ( + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if mode == 'constant': + # Default args + yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) + + if mode in ['reflect', 'replicate', 'circular']: + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode)) + else: # mode == 'constant' + for pad_value in (1., 2.): + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) + +def sample_inputs_nn_pad_replicate_negative(op_info, device, dtype, requires_grad, **kwargs): + cases: tuple = ( + ((5, 3, 4, 4), (-4, 5, 0, 0)), + ((6, 2, 4, 4), (0, 0, 2, -4)), + ((5, 6, 4, 4), (5, -4, -4, 3)), + ((4, 2, 5, 5), (-2, -1, 4, 6)), + ((2, 6, 5, 5), (8, -1, -1, -3)), + ((8, 1, 5, 5), (-2, -1, -1, -3)), + ) + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, 'replicate')) + +def sample_inputs_constant_pad_nd(op_info, device, dtype, *args, **kwargs): + # Inherit sample inputs from nn.pad, but transform them to fit + # constant_pad_nd's interface + nn_samples = sample_inputs_nn_pad(op_info, device, dtype, *args, + mode='constant', **kwargs) + + # NOTE: primTorch is more strict about the type of the fill value argument + # So we must cast it to the correct dtype + from torch._prims_common import dtype_to_type + scalar_type = dtype_to_type(dtype) + + def drop_mode_argument(input, pad, mode=None, value=None): + if value is None: + return SampleInput(input, args=(pad,)) + else: + return SampleInput(input, args=(pad, scalar_type(value))) + + for sample in nn_samples: + yield drop_mode_argument(sample.input, *sample.args, **sample.kwargs) + +def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(()), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2, dim=1) + yield SampleInput(make_input((2, 3, 4)), repeats=torch.arange(3, device=device), dim=1) + + +def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=requires_grad, **kwargs) + + yield SampleInput(mt(100), n_fft=10, return_complex=True) + yield SampleInput(mt(100), n_fft=10, return_complex=False) + if dtype.is_complex: + yield SampleInput(mt(100), n_fft=10) + + for center in [False, True]: + yield SampleInput(mt(10), n_fft=7, center=center, return_complex=True) + yield SampleInput(mt((10, 100)), n_fft=16, hop_length=4, + center=center, return_complex=True) + + window = mt(16, low=.5, high=2.0) + yield SampleInput( + mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + yield SampleInput( + mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + if not dtype.is_complex: + yield SampleInput( + mt((10, 100)), n_fft=16, window=window, onesided=False, + return_complex=True) + + +def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def mt(shape, **kwargs): + real_shape = shape if dtype.is_complex else shape + (2,) + return make_arg(real_shape, **kwargs) + + yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) + yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) + yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) + + for center in [False, True]: + yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) + yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) + + window = make_arg(10, low=.5, high=2.0) + yield SampleInput(mt((10, 10, 6)), kwargs=dict( + n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) + yield SampleInput(mt((10, 10, 10)), kwargs=dict( + n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) + + real_window = window if not dtype.is_complex else window.real + yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) + +def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): + # create a helper function wrapping `make_tensor` + make_input = partial(make_tensor, dtype=dtype, device=device, low=-1, high=1) + + batches = [(), (0, ), (2, ), (2, 1)] + ns = [5, 2, 0] + tf = [True, False] + for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): + input = make_input((*batch, m, n)) + reflectors, tau = torch.geqrf(input) + reflectors.requires_grad_(requires_grad) + tau.requires_grad_(requires_grad) + other_matrix_shape = (m, n) if left else (n, m) + other = make_input((*batch, *other_matrix_shape), requires_grad=requires_grad) + yield SampleInput(reflectors, tau, other, left=left, transpose=transpose) + + +def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): + cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False + ) + + for sample in cholesky_inverse_samples: + psd_matrix = sample.input + sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + sample.args = (psd_matrix.requires_grad_(requires_grad),) + yield sample + + +def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, + dtype=dtype, device=device, requires_grad=requires_grad) + + # not needed once OpInfo tests support Iterables + batch_shapes = ((), (3,), (3, 3)) + for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): + shape = batch_shape + (S + size_delta, S) + input = make_arg(*shape) + yield SampleInput(input, args=(True, get_infos)) + + +def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): + def out_fn(output): + return output[1], output[2] + + for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): + lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) + lu_data.requires_grad_(requires_grad) + yield SampleInput(lu_data, pivots).with_metadata(output_process_fn_grad=out_fn) + + +def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) + + for arg in args: + yield SampleInput(make_arg((0, 0, 0)), args=arg) + yield SampleInput(make_arg((S, S, S)), args=arg) + + # Scalar tensor + yield SampleInput(make_arg(()), args=(10, )) + +def error_inputs_roll(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "`shifts` required" + s1 = SampleInput(make_arg((S,)), ()) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = ("shifts and dimensions must align") + s2 = SampleInput(make_arg((S, S)), (2, 1), 0) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = ("out of range") + s3 = SampleInput(make_arg((S, )), 0, 2) + yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) + + err_msg4 = ("Dimension specified as 0") + s4 = SampleInput(make_arg(()), 0, 0) + yield ErrorInput(s4, error_regex=err_msg4, error_type=IndexError) + +def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) + + yield SampleInput(make_arg((S, S, S))) + for arg in args: + yield SampleInput(make_arg((S, S, S)), args=arg) + + +def error_inputs_rot90(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "expected total rotation dims" + s1 = SampleInput(make_arg((S, S)), dims=(0,)) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = "expected total dims >= 2" + s2 = SampleInput(make_arg((S,))) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = "expected rotation dims to be different" + s3 = SampleInput(make_arg((S, S)), dims=(1, 1)) + yield ErrorInput(s3, error_regex=err_msg3) + + +def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): + tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, + requires_grad=requires_grad) + tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, + requires_grad=requires_grad) + + yield SampleInput(tensor_nd()) + yield SampleInput(tensor_nd(), dim=1) + yield SampleInput(tensor_nd(), dim=1, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=False, keepdim=False) + + yield SampleInput(tensor_nd(), dim=(1,), correction=1.3) + yield SampleInput(tensor_nd(), dim=(1,), correction=S // 2) + yield SampleInput(tensor_nd(), dim=None, correction=0, keepdim=True) + yield SampleInput(tensor_nd(), dim=None, correction=None) + yield SampleInput(tensor_nd(), correction=0, keepdim=True) + yield SampleInput(make_tensor(3, 4, 5, device=device, dtype=dtype, requires_grad=requires_grad), dim=-3) + + +def sample_inputs_std_var_unbiased(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad) + + # Test var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + yield SampleInput(make_arg((S, S)), True) + yield SampleInput(make_arg((S,)), False) + + +def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): + shapes = [(2,), (1, 2), (3, 2), (2, 3)] + for shape in shapes: + yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + + +def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): + return (SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)) + + +def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_correlation_inputs(device, dtype, requires_grad): + yield SampleInput(t) + num_observations = t.numel() if t.ndimension() < 2 else t.size(1) + fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) + aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) + for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): + yield SampleInput(t.clone().requires_grad_(requires_grad), + correction=correction, fweights=fw, aweights=aw) + + +def error_inputs_cov(op_info, device, **kwargs): + a = torch.rand(S, device=device) + yield ErrorInput( + SampleInput(torch.rand(S, S, S, device=device)), + error_regex="expected input to have two or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, S, device=device)), + error_regex="expected fweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(S, S, device=device)), + error_regex="expected aweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, device=device)), + error_regex="expected fweights to have integral dtype") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([1, 1], device=device)), + error_regex="expected aweights to have floating point dtype") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([1], device=device)), + error_regex="expected fweights to have the same numel") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(1, device=device)), + error_regex="expected aweights to have the same numel") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([-1, -2, -3, -4 , -5], device=device)), + error_regex="fweights cannot be negative") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([-1., -2., -3., -4., -5.], device=device)), + error_regex="aweights cannot be negative") + + +def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = [((1, 2, 3, 4), (0, 2, 3, 1)), + ((1, 2, 3, 4), (0, -2, -1, 1)), + ((), ()), + ((1, 2, 3, 4), (2, 1, 3, 0))] + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=(args,)) + +def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((), ()), + ((1,), (0,)), + ((2, 2), (1, 0)), + ((2, 2), (0, 1)), + ((2, 0, 1), (0, 2, 1)), + ((3, 4, 2), (2, 1, 0)), + ((3, 4, 2), (1, 0, 2)), + ((3, 4, 2), (0, 1, 2)), + ) + + # Adds tricky permutations and permutations with noncontiguity + for shape, permutation in cases: + for p in itertools.permutations(permutation): + a = make_arg(shape).permute(p) + yield SampleInput(a, args=(permutation,)) + + a = make_arg(shape, noncontiguous=True).permute(p) + yield SampleInput(a, args=(permutation,)) + +def error_inputs_softshrink(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"lambd": -0.5}), + error_regex="lambda must be greater or equal to 0, but found to be -0.5") + +def sample_inputs_softshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for lbda in (0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + +def sample_inputs_hardshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + # Note that unlike softshrink, lambd is allowed to be negative for hardshrink + for lbda in (-0.5, 0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + + +def sample_inputs_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of min_val and max_val beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for max_val, min_val in ((-0.5, 0.5), (0.5, -0.5), (0., 0.)): + yield SampleInput(make_arg(S, S), kwargs={"min_val": min_val, "max_val": max_val}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + + +def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): + def c(t): + return t.clone().requires_grad_(requires_grad) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg((3,)) + y = make_arg((4,)) + A = make_arg((2, 3,)) + B = make_arg((1, 3,)) + C = make_arg((1, 2, 3,)) + D = make_arg((1, 3, 4,)) + E = make_arg((4, 4,)) + H = make_arg((3, 3,)) + I = make_arg((1, 3, 1,)) + + # Vector operations + yield SampleInput([c(x)], 'i->') # sum + yield SampleInput([c(x), c(y)], 'i,j->ij') # outer + + # Matrix operations + yield SampleInput([c(A)], "ij->i") # col sum + yield SampleInput([c(A), c(B)], "ij,kj->ik") # matmul + yield SampleInput([c(A), c(E)], "ij,Ab->ijAb") # matrix outer product + + # Tensor operations + yield SampleInput([c(C), c(D)], "aij,ajk->aik") # batch matmul + yield SampleInput([c(D), c(E)], "aij,jk->aik") # tensor matrix contraction + yield SampleInput([c(C), c(B)], "ijk,ik->j") # non contiguous + + # Test diagonals + yield SampleInput([c(I)], 'iji->j') # non-contiguous trace + + # Test ellipsis + yield SampleInput([c(H)], "i...->...") + yield SampleInput([c(C), c(x)], '...ik, ...j -> ij') + + +def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((S, M, S), (S, 0, M)) + all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) + + for size, dims in product(sizes, all_dims): + yield SampleInput(make_arg(size), kwargs={"dims": dims}) + +def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): + shapes = [ + (S, M, S), + (S, 0, M), + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + return (SampleInput(make_arg(shape, low=None, high=None)) for shape in shapes) + +def error_inputs_fliplr(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), + error_regex="Input must be >= 2-d.") + +def error_inputs_flipud(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), + error_regex="Input must be >= 1-d.") + +def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + shape = (S, M, S) + + yield SampleInput(make_arg(shape), args=(make_arg(shape), make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape[1:]), make_arg(shape[1:]))) + yield SampleInput(make_arg(shape), args=(make_arg((S, 1, S)),)) + yield SampleInput(make_arg(shape), args=(None, make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape), None)) + +def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): + yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) + supported_dtypes = op.supported_dtypes(device) + + # broadcasting and oncontiguous cases + cases = ( + ((4, 4), (4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 1, 4)), + ((4, 4, 1), (1, 4, 4), (4, 4)), + ((4, 1), (1, 4, 4), (1, 4)), + ((4, 4), (), (4, 4)), + ((4, 4), (), ()), + ((), (4, 4), (1, 4, 4)), + ) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) + yield SampleInput(make_arg(a, noncontiguous=True), + args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) + + # scalar cases + if supports_scalars: + cases = [ + ((), 1, 2,), + ((), 1., 2), + ((4, 4), 1., 2,), + ((3, 4), make_scalar_tensor(), make_scalar_tensor()), + ] + + if torch.complex64 in supported_dtypes: + cases.extend([ + ((3, 1, 4), complex(1, 2), 3.), + ]) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(b, c)) + + # type promotion cases + # int x float + if torch.float in supported_dtypes and torch.long in supported_dtypes: + a = make_arg((), dtype=torch.long) + b = make_arg((1, 4), dtype=torch.float) + c = make_arg((3, 4)) + + cases = ( + (a, b, c), + (c, a, b), + ) + + for a, b, c in cases: + yield SampleInput(a, args=(b, c)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) + + a = make_arg((12,)) + a[4] = nan + a[7] = nan + b = make_arg((12,)) + b[1] = nan + b[7] = nan + c = make_arg((12,)) + c[9] = nan + + yield SampleInput(a, args=(b, c)) + + +def _clamp_min_numpy(a, min=None): + return np.maximum(a, min) + + +def _clamp_max_numpy(a, max=None): + return np.minimum(a, max) + + +def _clamp_numpy(a, min=None, max=None): + if min is None: + return np.minimum(a, max) + if max is None: + return np.maximum(a, min) + + return np.minimum(max, np.maximum(a, min)) + + +def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_zeros(dim_select): + assert len(dim_select) == 2 + result = make_arg(3 * (S,)) + result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() + result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() + result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() + return result + + for dim in range(3): + yield SampleInput(make_arg((S, S, S)), args=(dim,)) + # Scalar tensors and empty tensor + for size in [(), (1,), (0,)]: + yield SampleInput(make_arg(size), args=(0,)) + + yield SampleInput(prod_zeros([0, 1]), args=(1,)) + yield SampleInput(prod_zeros([0, 2]), args=(1,)) + yield SampleInput(prod_zeros([1, 2]), args=(1,)) + + # test dtype kwarg + yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) + +def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S), ()) + return (SampleInput(make_arg(size)) for size in sizes) + +def error_inputs_complex(op_info, device, is_ref=False, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + + if is_ref: + error_float = "Expected both inputs to be Half, Float or Double tensors but got torch.float32 and torch.int32" + error_dtype = "Expected object of scalar type torch.float32 but got scalar type torch.float64 for second argument" + error_out = "Expected out tensor to have dtype torch.complex128 but got torch.complex64 instead" + else: + error_float = "Expected both inputs to be Half, Float or Double tensors but got Float and Int" + error_dtype = "Expected object of scalar type Float but got scalar type Double for second argument" + error_out = "Expected object of scalar type ComplexDouble but got scalar type ComplexFloat for argument 'out'" + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.int)), + error_type=RuntimeError, error_regex=error_float) + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.float64)), + error_type=RuntimeError, error_regex=error_dtype) + + yield ErrorInput(SampleInput(make_arg(M, S, dtype=torch.float64), make_arg(M, S, dtype=torch.float64), + out=make_arg(M, S, dtype=torch.complex64)), + error_type=RuntimeError, error_regex=error_out) + +def sample_inputs_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = (S, S) + yield SampleInput(make_arg(shape), make_arg(shape)) + +def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_single_zero(): + result = make_arg(2 * (S,)) + result[0, 1] = 0 + return result + + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + # only Tensor, ignore other inputs + yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) + yield sample + + # Generates samples with keepdim = True + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + sample.kwargs['keepdim'] = True + yield sample + + yield SampleInput(prod_single_zero()) + yield SampleInput(make_arg((3, 3, 3)), args=(1,)) + yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) + + yield SampleInput(make_arg((3, 0)), args=(1,)) + yield SampleInput(make_arg((3, 0)), args=(1,), kwargs={'keepdim': True}) + yield SampleInput(torch.tensor([2., 3, 0, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + + # test zero scalar tensor + zero = make_arg(()) + zero.zero_() + yield SampleInput(zero.clone().requires_grad_(requires_grad)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), + args=(0,), + kwargs={'keepdim': True}) + +def error_inputs_neg(op_info, device, **kwargs): + si = SampleInput(torch.tensor((False, True), device=device)) + msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." + " If you are trying to invert a mask, use the `\\~` or" + " `logical_not\\(\\)` operator instead.") + yield ErrorInput(si, error_regex=msg) + +def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg(M)) + + tensors = ( + make_arg((M, M)), + make_arg((3, 5)), + make_arg((5, 3)), + ) + + args = ((), (2,), (-2,), (1,), (2,)) + + for tensor, arg in product(tensors, args): + yield SampleInput(tensor.clone().requires_grad_(requires_grad), *arg) + +def reference_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_diagonal_diag_embed( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes1d = ((0,), (1,)) + shapes2d = ((L, M),) + shapes3d = ((L, M, S),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 > dim2 is allowed + dict(dim1=1, dim2=0), + # negative dims are allowed + dict(dim1=-2, dim2=-1), + # one dim negative and the other nonnegative is allowed + dict(dim1=-1, dim2=0), + # out of bounds offset should return an empty tensor in diagonal and + # offset the diagonal in diag_embed + dict(offset=100), + ) + + kwargs3d = kwargs2d + ( + # make sure we can use non-sequential dims + dict(offset=-1, dim1=0, dim2=2), + ) + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + if 'diagonal' in op_info.name: + # these are error inputs for diagonal + if shape in ((0,), (1,)): + continue + yield SampleInput(input=make_arg(shape), kwargs=kwargs) + + +def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # Shapes for 2D Tensors + shapes_2d = ((M, M), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((M, M, M),) + + args_2d = ((), (2,), (-2,), (1,)) + args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) + + for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): + input_ = make_arg(input_shape) + # We can programmatically figure out the right shape for src: + # It should be the same size as input.diagonal(other_args...) + if not isinstance(arg, tuple): + arg_tuple = (arg,) + else: + arg_tuple = arg + src_shape = input_.diagonal(*arg_tuple).size() + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *arg_tuple)) + + +def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S))).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + yield SampleInput(make_arg((S, S)), 1).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + +def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): + batch_size, num_classes = shape = (2, 3) + reductions = ("mean", "sum", "none") + + input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [ + (shape, {}), + ((*shape, 1), {}), + ((*shape, 1, 2), {}), + ((*shape, 1, 2, 3), {}), + *[(shape, dict(reduction=reduction)) for reduction in reductions], + *[ + ( + shape, + dict( + weight=make_tensor((num_classes,), device=device, dtype=dtype), + reduction=reduction, + ), + ) + for reduction in reductions + ], + (shape, dict(ignore_index=1)), + ] + + for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): + input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) + + if probabilities_target: + # ignore_index is not supported for probabilities target + if "ignore_index" in kwargs: + continue + + target = make_tensor( + input_shape, + low=0, + high=1, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + else: + target = make_tensor( + (batch_size, *input_shape[2:]), + low=0, + high=num_classes, + device=device, + dtype=torch.long, + ) + + if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): + # make sure at least one item in target is not ignored + target[0] = random.sample(sorted(set(range(num_classes)) - {kwargs["ignore_index"]}), 1)[0] + + yield SampleInput(input, target, **kwargs) + + +def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): + low, high = op_info.domain + + # Note: Operator is very sensitive at points near the + # start and end of domain and leads to NaN for float16 + # if domain_eps is 1e-5. + if dtype.is_floating_point or dtype.is_complex: + domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 + + low = low + domain_eps + high = high - domain_eps + + make_arg = partial(make_tensor, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg((S, S, S)), 0.2) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg(()), 0.2) + +def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # isin has two paths based on the size of elements and test_elements. + # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): + yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) + # else: + yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) + +def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), + broadcasts_input=True) + +def error_inputs_masked_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float) + for mask_dtype in [torch.float, torch.uint8]: + yield ErrorInput(SampleInput(make_arg(1, 3), args=(torch.ones(1, 3, device=device, dtype=mask_dtype), + make_arg(3, 4))), + error_regex=r"masked_scatter_ only supports boolean masks") + +def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) + + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg(())), + broadcasts_input=True) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, 10), + broadcasts_input=True) + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CUDA but `value` is a CPU scalar tensor. + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, torch.randn(()))) + +def error_inputs_masked_fill(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # `value` is not a 0-D tensor. + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, make_arg((1,)))), + error_regex="only supports a 0-dimensional value tensor, but got tensor with 1 dimension") + # downcasting complex value (scalar overload) + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, 1j)), + error_regex=r"value cannot be converted to type .* without overflow") + # downcasting complex value (tensor overload) + yield ErrorInput(SampleInput(torch.ones(2, dtype=torch.long, device=device), + args=(make_arg(()) > 0, torch.tensor(1j, device=device))), + error_regex=r"value cannot be converted to type .* without overflow") + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CPU but `value` is a CUDA scalar tensor. + yield ErrorInput(SampleInput(torch.randn((S, S), device='cpu'), + args=(torch.randn(S, S, device='cpu') > 0, + torch.randn((), device='cuda'))), + error_regex=r"to be on same device") + + +def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + + yield SampleInput(make_arg((M, M)), torch.randn(M, M, device=device) > 0) + + yield SampleInput(make_arg((M, M)), torch.randn((M,), device=device) > 0) + yield SampleInput(make_arg((M,)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg((M, 1, M)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg(()), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg((M, M)), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg(()), torch.randn((M, M), device=device) > 0) + +def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S))) + yield SampleInput(make_arg((S, S, S))) + +def sample_inputs_matmul(op_info, device, dtype, requires_grad, is_rmatmul=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, + high=None, requires_grad=requires_grad) + test_cases = (((L,), (L,)), + ((S, M), (M,)), + ((M,), (M, S)), + ((S, M), (M, S)), + ((S, 0), (0, M)), + ((S, S, M), (M,)), + ((S, S, M), (M, S)), + ((S, S, 0), (0, S)), + ((M,), (S, M, S)), + ((S, M), (S, M, S)), + ((0, 0), (S, 0, 0)), + ((S, S, M, M), (S, S, M, S)), + ((S, S, M, M), (M,)), + ((M,), (S, S, M, S)), + ((S, S, S), (1, S, S)) + ) + for lhs_shape, rhs_shape in test_cases: + lhs = make_arg(lhs_shape) + rhs = make_arg(rhs_shape) + if not is_rmatmul: + yield SampleInput(lhs, rhs) + else: + yield SampleInput(rhs, lhs) + + +def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, + requires_grad: bool, + *, variant: str, **kwargs) -> List[SampleInput]: + if variant == 'variadic': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return tensors + elif variant == 'list': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return [tensors] + else: + raise ValueError( + 'Unsupported variant, must be one of {"variadic", "list"}. ' + f'Got "{variant}".') + + SCALAR = torch.Size([]) + VECTOR = torch.Size([3]) + test_cases: List[List[torch.Size]] = [ + [SCALAR], + [VECTOR], + [VECTOR, SCALAR], + [VECTOR, SCALAR, VECTOR], + [VECTOR, SCALAR, VECTOR, SCALAR], + ] + + for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): + args = make_inputs( + [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes]) + yield SampleInput(*args, indexing=indexing) + + +def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + # Since the accepted lower bound for input + # to mvlgamma depends on `p` argument, + # the following function computes the lower bound + # which we pass to `make_tensor`. + def compute_min_val(p): + return (p - 1.) / 2 + + for shape, n in product(tensor_shapes, ns): + min_val = compute_min_val(n) + if not dtype.is_floating_point: + # Round-up minimum value for integral dtypes + min_val += 1 + else: + min_val += 2 * torch.finfo(dtype).eps + yield SampleInput(make_arg(shape, low=min_val), args=(n,)) + + +# Since `mvlgamma` has multiple entries, +# there are multiple common skips for the additional +# entries. Following function is a helper to that end. +def skips_mvlgamma(skip_redundant=False): + skips = ( + # outside domain values are hard error for mvlgamma op. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.float16, torch.int8)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=(torch.int8,)), + ) + if skip_redundant: + # Redundant tests + skips = skips + ( # type: ignore[assignment] + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + ) + return skips + + +# To test reference numerics against multiple values of argument `p`, +# we make multiple OpInfo entries with each entry corresponding to different value of p. +# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. +def make_mvlgamma_opinfo(variant_test_name, domain, skips, sample_kwargs): + return UnaryUfuncInfo('mvlgamma', + ref=reference_mvlgamma if TEST_SCIPY else None, + aliases=('special.multigammaln',), + variant_test_name=variant_test_name, + domain=domain, + decorators=(precisionOverride({torch.float16: 5e-2}),), + dtypes=all_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_mvlgamma, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=skips, + sample_kwargs=sample_kwargs) + + +def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): + def _make_tensor_helper(shape, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(_make_tensor_helper((S, S, S)), 0) + yield SampleInput(_make_tensor_helper((S, S, S)), 1) + yield SampleInput(_make_tensor_helper(()), 0) + + if supports_dtype_kwargs: + # NOTE: if `dtype` is not same as input, then inplace variants fail with + # `provided dtype must match the dtype of self tensor in cumsum` + yield SampleInput(_make_tensor_helper((S, S, S)), 1, dtype=dtype) + + +def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((), (0, 1, 1)), + ((S, S, S, S), (0, 3, 1)), + ((S, S, S, S), (1, 3, 1)), + ((S, S, S, S), (2, 3, 1)), + ((S, S, S, S), (3, 3, 1)), + ((S, S, S, S), (0, 3, 2)), + ((S, S, S, S), (1, 3, 2)), + ((S, S, S, S), (2, 3, 2)), + ((S, S, S, S), (3, 3, 2)), + ((S, S, S, S), (0, 4, 1)), + ((S, S, S, S), (1, 4, 1)), + ((S, S, S, S), (2, 4, 1)), + ((S, S, S, S), (3, 4, 1)), + ((M,), (0, 3, 1)), + ((M,), (0, 3, 2)), + ((M,), (0, 3, 3)), + ((1000,), (0, 3, 11)), + ((1000,), (0, 2, 27)), + ((10, 10), (0, 1, 2)), + ((10, 10), (1, 2, 3)), + ((10, 10), (1, 2, 2)), + ((S, S, S), (2, 3, 2)), + ) + + for shape, arguments in test_cases: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *arguments) + +def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if list_args: + cases = ( + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), 2),), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), -2),) + ) + else: + cases = ( # type: ignore[assignment] + ((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = (((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3), 0]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), 2)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), -2)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): + def apply_grad(t): + if dtype in floating_types_and(torch.float16, torch.bfloat16): + t.requires_grad_(requires_grad) + + def large_1d_unique(dtype, device): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype) + apply_grad(res) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique(dtype, device)) + + yield SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + +def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4) + # broadcast rhs with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S,)), make_arg((S, S))) + # broadcast rhs and weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, 1)), make_arg((S,))) + # broadcast lhs + yield SampleInput(make_arg((S,)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # scalar broadcast_lhs + yield SampleInput(make_arg(()), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # tensor broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), make_arg((S, 1))).with_metadata( + broadcasts_input=True) + # no broadcast with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, S)), make_arg((S, S))) + # broadcast lhs with weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor variant + yield SampleInput(make_arg((S, S)), make_arg((S, S, S)), make_arg((S,))).with_metadata( + broadcasts_input=True) + + if dtype.is_complex: + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 1.2 + 0.1j) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 5.4 + 9j) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4j) + yield SampleInput(make_arg(()), make_arg(()), 6.1 + 0.004j) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg(()), 1 + 2j) + +def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): + cases = ( + ((2, 2, 2), (2, 2, 2), (2)), + ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), + ) + for first_shape, second_shape, dims in cases: + yield SampleInput(make_tensor(first_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + make_tensor(second_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + dims=dims) + +def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + test_cases = ( + ((S, S), (M, L)), + ) + + for input_shape, other_shape in test_cases: + input = make_arg(input_shape) + other = make_arg(other_shape) + yield SampleInput(input, other) + +def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(S)) + yield SampleInput(make_arg(), make_arg(S, S)) + +def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), + (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), + (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor(()), (0, zero.clone().detach(), _tensor(()))), + (_tensor(()), (0, zero.clone().detach(), 2.5)), + ) + + for tensor, args in test_cases: + yield SampleInput(tensor, *args) + + if not requires_grad: + yield SampleInput(tensor.clone().detach(), *args, reduce='add') + + if dtype.is_floating_point: + yield SampleInput(tensor.clone().detach(), *args, reduce='multiply') + +def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + yield SampleInput(_tensor((M, S)), 0, _gather((S, S), 1, M), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), -1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 0, _gather((M, S // 2), 1, M), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), 1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), -1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor(()), 0, zero.clone().detach(), _tensor(())) + +def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + gather = partial(gather_variable, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + ((M, S), 0, gather((S, S), 1, M), (S, S)), + ((M, S), 1, gather((S, S), 0, S), (S, S)), + ((M, S), -1, gather((S, S), 0, S), (S, S)), + ((M, S), 0, gather((M, S // 2), 1, M), (M, S // 2)), + ((M, S), 1, gather((M, S // 2), 0, S), (M, S // 2)), + ((M, S), -1, gather((M, S // 2), 0, S), (M, S // 2)), + ((), 0, zero.clone().detach(), ()), + ) + + reduce = op_info.variant_test_name + for (inp_shape, dim, index, src_shape), include_self in product(test_cases, [False, True, False]): + yield SampleInput(make_arg(inp_shape), + args=(dim, index, make_arg(src_shape), reduce), + kwargs={'include_self': include_self}) + + + # Sample inputs to test edge cases for backward + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + if requires_grad and reduce == 'prod': + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) + # (c) no zeros reduced (self([2, 1])) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(1, idx, src, reduce), + kwargs={'include_self': True}) + +def sample_inputs_segment_reduce(op_info, device, dtype, requires_grad, *, mode='lengths', **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + # inp_shape, dim, lengths, unsafe + ((S,), 0, [0, 1, 2, 2], False), + ((S,), 0, [0, 1, 2, 2], True), + ((S,), 0, [2, 0, 3, 0], False), + ((S, S), 0, [0, 1, 2, 2], False), + # test when lengths do not sum to dim size + ((M, S, S), 0, [1, 2, 0, 6, 0], True), + # test for higher dimensions + ((S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ((S, S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ) + + reductions = ["max", "mean", "min", "sum", "prod"] + for args, reduce, initial in product(test_cases, reductions, [1, 2]): + inp_shape, dim, lengths, unsafe = args + lengths_t = torch.tensor(lengths, dtype=torch.long, device=device) + sample_input_kwargs = {'axis': dim, 'unsafe': unsafe, 'initial': initial} + if mode == 'lengths': + sample_input_kwargs['lengths'] = lengths_t + elif mode == 'offsets': + zeros_shape = list(lengths_t.shape) + zeros_shape[dim] = 1 + offsets_t = torch.cat((lengths_t.new_zeros(zeros_shape), lengths_t), dim).cumsum_(dim) + sample_input_kwargs['offsets'] = offsets_t + else: + raise RuntimeError(f"mode most be one of 'offsets' or 'lengths' got '{mode}'.") + yield SampleInput(_tensor(inp_shape), + args=(reduce,), + kwargs=sample_input_kwargs) + + +def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg((S, S, S), noncontiguous=True)) + +def sample_inputs_unravel_index(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput( + torch.tensor( + [[3, 8, 13], [0, 5, 10]], + device=device, + dtype=dtype), + (4, 5)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (4, 2**30)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (2**30, 4)) + yield SampleInput( + torch.tensor(2, device=device, dtype=dtype), + (2, 2)) + max_val = 2**(8 * dtype.itemsize - (1 if dtype.is_signed else 0)) - 1 + yield SampleInput( + torch.tensor(max_val - 1, device=device, dtype=dtype), + (1, max_val)) + yield SampleInput( + torch.tensor([22, 41, 37], device=device, dtype=dtype), + (7, 6)) + yield SampleInput( + torch.tensor(min(1621, max_val), device=device, dtype=dtype), + (6, 7, 8, 9)) + yield SampleInput( + torch.tensor([], device=device, dtype=dtype), + (10, 3, 5)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], + device=device, + dtype=dtype), + (5, 8)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + device=device, + dtype=dtype), + (5, 8, 10)) + yield SampleInput( + torch.tensor(0, device=device, dtype=dtype), + ()) + + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + _, i1, i2 = np.intersect1d(a, b, assume_unique=True, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + _, i1, i2 = np.intersect1d(a, b, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + +def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((M, M), ()), + ((M, M), (2,),), + ((M, S), ()), + ((M, S), (-1,)), + ((M, M), (2,),), + ((S, M, S), ()), + ((S, M, S), (2,)), + ((3, 3, S, S), ()),) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def error_inputs_tril_triu(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for input.ndim <= 2 + yield ErrorInput(SampleInput(make_arg((4,))), error_regex="input tensor must have at least 2 dimensions") + +def sample_inputs_trilu_indices(op_info, device, dtype, requires_grad, **kwargs): + # (row, col, offset) + args_list = ((0, 0), + (20, 0), + (0, 20), + (20, 21, 0), + (20, 21, 7), + (20, 21, -7), + # Large test cases below are deliberately commented out to speed up CI + # tests and to avoid OOM error. When modifying implementations of + # tril_indices and triu_indices, please enable these tests and make sure + # they pass. + # (2, 68435455, 3), + # (5000, 5000), + # (5000, 5000, 1234), + # (5000, 5000, -1233), + ) + for args in args_list: + yield SampleInput(args[0], args=args[1:], kwargs={"dtype": dtype, "device": device}) + +def sample_inputs_clone_contiguous(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, M, S))) + yield SampleInput(make_arg(())) + +def reference_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs): + # NOTE: the default memory format for clone is torch.preserve_format, for contiguous it's torch.contiguous_format + # This exploits that default to test torch.preserve_format for clone, without causing an error when testing contiguous + yield from sample_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs) + + shapes = ( + (3, 5, 6), + (1, 1, 3, 5, 6), + (1, 1, 3, 5, 6, 1, 1), + (1, 0, 3, 5, 0, 2), + (1, 0, 3, 5, 0, 0, 1, 1, 2), + (), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + + yield SampleInput(make_arg(shape), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + for shape, strides, offset in strided_cases: + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset), kwargs={'memory_format': torch.contiguous_format}) + + # channels last 2D + yield SampleInput(make_arg((2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last}) + a = make_arg((2, 2, 2, 2)).permute(0, 3, 1, 2) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last}) + + # channels last 3D + yield SampleInput(make_arg((2, 2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last_3d}) + a = make_arg((2, 2, 2, 2, 2)).permute(0, 4, 1, 2, 3) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last_3d}) + + +def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # list of tuples (shape, shape) defining the shapes of the input and output tensors + sample_shapes = [ + ((), ()), + ((S,), (1,)), + ((S, S), (1, 1)), + ((S, S), (1, S)), + ((S, S), (S, S)), + ((S, S, S), (S, 1, S)), + ] + + for input_shape, output_shape in sample_shapes: + yield SampleInput(make_arg(input_shape), args=(output_shape,)) + if output_shape == (): + continue + yield SampleInput(make_arg(input_shape), args=(list(output_shape),)) + yield SampleInput(make_arg(input_shape), args=(*output_shape,)) + + +def error_inputs_sum_to_size(op_info, device, **kwargs): + shape = (M, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M, M)) + yield ErrorInput(si, error_regex=err_msg) + + shape = (M + 1, S, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M + 1, 1)) + yield ErrorInput(si, error_regex=err_msg) + + +def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + cases = (((S, S, S), (S * S, S)), + ((), ()), + ((), (1, 1, 1)), + ) + + for shape, args_or_shape in cases: + # Update `args` based on operator + if op_info.name == 'resize_': + # resize_ takes shape/tuple of ints, + args = (args_or_shape, ) + elif op_info.name == 'resize_as_': + # resize_as_ takes another tensor + args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] + else: + raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") + + yield SampleInput(make_arg(shape, requires_grad=requires_grad), args=args) + +def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = ( + # a, b, is_tensor_supported + ((S, S, S), (S * S, S), True), + ((S * S, S), (S, S, S), True), + ((S * S, S), (S, -1, S), False), # neg index + ((S * S * 2, S), (S, -1), False), # neg index + ((S,), (S,), True), + ((), (), False), # empty + ((), (1,), True), + ) + + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) + + cases = ( + # a, b, is_tensor_supported + ((125,), (25, 5), True), + ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1), True), + ((16, 32), (2, 4, 1, 4, 4, 1, 4), True), + ((16, 12), (12, 16), True), + ((1, 16, 12), (12, 16), True), + ((1, 5, 1, 5), (25, 1), True), + ((2, 4, 2), (4, 4), True), + ((1, 4), (1, 1, 2, 1, 2), True), + ((3, 5, 7), (7, 5, 3), True), + ((1,), (), False), # empty + ((5, 0, 2, 3), (5, 0, 2, 3), True), + ((2, 1, 0, 3, 1), (5, 0), True), + ((1,), (), False), # empty + ((4, 5, 6), (4, 5, 6, 1, 1, 1), True), + ((), (1, 1, 1, 1), False), # empty + ) + + irreversible_cases = ( + ((), (-1,), False), # neg index, empty + ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if kwargs.get("tensor_arg"): + # convert to tensor + yield SampleInput(make_arg(a), args=(make_arg(b, requires_grad=False),)) + yield SampleInput(make_arg(b), args=(make_arg(a, requires_grad=False),)) + else: + yield SampleInput(make_arg(a), args=(b,)) + yield SampleInput(make_arg(b), args=(a,)) + + for a, b, is_tensor_supported in irreversible_cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def error_inputs_view_reshape(op, device, **kwargs): + + cases = ( + # a, b, is_tensor_supported + # Reshape to different numel + ((2,), (), False), # empty + ((1, 3, 0), (), False), # empty + ((4, 3), (4, 2), True), + ((1, 3, 5), (5, 2, 2), True), + # No valid inference + ((1, 3, 5), (5, -1, 2), False), # neg index + # Two inferred shapes + ((1, 3, 5), (5, -1, -1), False), # neg index + ((1), (0, -1), False), # neg index + ((0, 5), (0, -1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if b == (5, -1, -1): + error_regex = "only one dimension can be inferred" + elif a == (0, 5): + error_regex = (r"cannot reshape tensor of 0 elements into shape " + r"\[0, -1\] because the unspecified dimension size " + r"-1 can be any value and is ambiguous") + else: + # to avoid having issues with a regex + shape = ', '.join(map(str, b)) + size = a if type(a) is int else functools.reduce(operator.mul, a, 1) + error_regex = rf"shape '\[{shape}\]' is invalid for input of size {size}" + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, + error_regex=error_regex) + + +def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): + input_list = [] + shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + yield SampleInput([make_tensor_partial(shape) for shape in shapes]) + +def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): + cases: Tuple[tuple, tuple] = ( # type: ignore[assignment] + ((S, 2, 1), (S, 3, 1)), + ((S), (S, 5)), ((), (1, S)) + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape1, shape2 in cases: + yield SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]) + +def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + if len(shape) > 1: + yield SampleInput(make_tensor_partial(shape), start_dim=1, end_dim=-1) + +def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) + + # shape x start_dim x end_dim + cases = ( + ((5, 4, 0, 1, 3, 7), 1, 3), + ((5, 4, 0, 1, 3, 7), 4, 5), + ((5, 4, 1, 1, 3, 7), 2, 3), + ((), 0, -1), + ((1,), 0, -1), + ((3, 7, 5), 1, 2), + ((4, 5), 1, 1), + ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), + ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), + ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), + ((2, 4, 2), 0, 1), + ((4, 2, 2), 1, 2), + ((0, 3, 4, 5), 1, 3), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape, start, end in cases: + yield SampleInput(make_arg(shape), args=(start, end,)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) + yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) + +def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs): + # in_shape, dim, sizes + args = (((8,), 0, (8,)), + ((8,), 0, (4, 2)), + ((8,), -1, (2, 2, 2)), + ((8,), -1, (-1, 2)), + ((3, 6, 2), 1, (2, 3)), + ((3, 6, 2), -2, (2, 3)), + ((3, 6, 2), -2, (-1, 3)), + ((3, 2, 12), 2, (3, 2, 2)), + ((4, 0), 0, (2, 2)), + ((4, 0), 1, (2, 0, 0, 0)), + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for in_shape, dim, sizes in args: + yield SampleInput(make_tensor_partial(in_shape), args=(dim, sizes)) + + +def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (1, 2)), + ((S, S, S), (-1, 2)), + ((S, S, S), (-1, -1)), + ((S, S, S), (1, -1)), + ((S,), (0, 2)) + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (S, S), (1, 2)), + ((S, S, S), (S, S), (-1, 2)), + ((S, S, S), (S, S), (-1, -1)), + ((S, S, S), (S, S), (1, -1)), + ((S,), (), (0, 2)) + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + + +def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), + ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), + ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (1, 0, L, 1)), + ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), + ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (2, 0, L, 1)), + ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), + ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + +def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1, S), (-1, S, -1)), + ((S, 1, S), (-1, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=(args,)) + +def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((), + (2, 3)) + memory_format_options = [None, torch.contiguous_format] + + for shape, memory_format in itertools.product(shapes, memory_format_options): + yield SampleInput(make_arg(shape), + kwargs={'memory_format': memory_format} if memory_format else {}) + yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) + +def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + + cases = (((S, 1, 1), (S, S, S)), + ((), ()), + ((), (1, 1)), + ) + + for shape, shape_other in cases: + yield SampleInput(make_arg(shape, requires_grad=requires_grad), + args=(make_arg(shape_other, requires_grad=False),)) + + +def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + def make_bool_mask(shape): + # Make sure atleast one element is nonzero, + # except for empty tensor + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + + if mask_t.numel() == 0: + return mask_t + elif mask_t.numel() == 1: + mask_t.fill_(True) + return mask_t + + if mask_t.sum() == 0: + def random_index(shape): + return tuple(random.randrange(0, max_idx) for max_idx in shape) + + mask_t[random_index(mask_t.shape)] = True + return mask_t + + return mask_t + + cases = (((M, M), (M, M), (M, M), False), + ((M, 1, M), (M, M), (M, M, 1), True), + ((), (), (), False), + ((M, 1, M), (), (M, M, 1), True), + ((), (M, M), (), True), + ((), (2), (1, 1), True), + ) + + for shape, mask_shape, other_shape, broadcasts_input in cases: + yield SampleInput(make_arg(shape), + args=(make_bool_mask(mask_shape), make_arg(other_shape)), + broadcasts_input=broadcasts_input) + +# TODO: add reference inputs for where(condition) signature +def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) + + make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # noncontiguous + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), noncontiguous=True) + b = make_arg((3, 10, 3)).transpose(0, -1) + + # NOTE that the OpInfo for where takes samples of the form a, cond, b + yield SampleInput(a, args=(c, b)) + + # type promoting + other_dtype = torch.double if dtype is not torch.double else torch.long + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), dtype=torch.long) + b = make_arg((10, 1)) + + yield SampleInput(a, args=(c, b)) + + # two python scalars + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((1,)).item() + b = make_arg((1,)).item() + + yield SampleInput(a, args=(c, b)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + if dtype.is_floating_point: + nan = float('nan') + else: + # dtype.is_complex + nan = complex(float('nan'), float('nan')) + c = make_cond((1, 10, 3)) + a = make_arg((10, 3), noncontiguous=True) + a[2, 1] = nan + b = make_arg((1, 3)) + b[0, 2] = nan + + yield SampleInput(a, args=(c, b)) + + # Python scalars type promotion + for scalar in (0, 0.0, 2j, False): + yield SampleInput(scalar, args=(c, b)) + yield SampleInput(a, args=(c, scalar)) + + +def error_inputs_where(op_info, device, **kwargs): + shape = (S,) + err_msg = "Expected all tensors to be on the same device" + for devices in product(('cpu', device), repeat=3): + if len(set(devices)) == 2: + si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), + args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), + make_tensor(shape, device=devices[2], dtype=torch.float32))) + yield ErrorInput(si, error_regex=err_msg) + +def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + for input_t, as_tuple in product(inputs, [False, True]): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(as_tuple=as_tuple)) + +def sample_inputs_nonzero_static(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + nonzero_sizes = [0, 1, XS, S, M] + + for input_t, nonzero_size in product(inputs, nonzero_sizes): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(size=nonzero_size)) + +def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ((S, S, S), (S, -1))) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=args) + +def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # shape x chunks x dim + cases = ( + ((13, 9, 11), 17, -1), + ((13, 9, 11), 11, -1), + ((13,), 12, -1), + ((15,), 12, -1), + ((15,), 7, 0), + ((15,), 9, 0), + ((3, 7), 9, 1), + ((3, 7), 9, 0), + ((3, 7), 2, 0), + ((3, 7), 3, 0), + ((3, 7), 1, 0), + ((3, 7), 1, 1), + ((4, 4), 2, 0), + ) + + for shape, chunks, dim in cases: + yield SampleInput(make_arg(shape), args=(chunks, dim)) + +def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + test_cases = [ + ((S, S, S), (2,)), + ((S, S, S), (2, 1,)), + ((S, S, S), (2, -1,)), + ((S, S, S), (2, 1, True,)), + ((S, S, S), (2, -1, True,)), + ((S,), (2, 0,)), + ((S,), (2, 0, True,)), + ((), (1,)), + ((), (1, 0,)), + ((), (1, 0, True)), + ] + + yield from (SampleInput(_tensor(tensor), *args) for tensor, args in test_cases) + +def error_inputs_kthvalue(op_info, device, **kwargs): + # tests overlapping output fails + t = make_tensor(10, dtype=torch.float32, device=device) + indices = torch.empty((), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(t, 5, out=(t, indices)), + error_regex="unsupported operation") + + k_out_of_range_err = "selected number k out of range for dimension" + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3, 0), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.tensor(2, device=device), 3), + error_regex=k_out_of_range_err) + +def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, + train=None, valid_input_dim=None, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if valid_input_dim: + cases = ((S,) * i for i in valid_input_dim) + else: + cases = ((S, S), (S,), ()) + p_vals = [0.0, 0.5, 1.0] + # This is to handle special case for feature_alpha_dropout which has different + # supported dtypes depending on `train` parameter + training_vals = [train] if train is not None else [True, False] + + for case, p, training in product(cases, p_vals, training_vals): + yield SampleInput(make_arg(case), p=p, training=training) + yield SampleInput(make_arg(case)) + +def sample_inputs_dropout_backward(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_mask = partial(make_tensor, device=device, dtype=torch.bool, requires_grad=False) + + cases = ((S, S, S, S), (S,), ()) + scale_vals = [0.0, 1.0, 2.0] + + for case, scale in product(cases, scale_vals): + yield SampleInput(make_arg(case), make_mask(case), scale) + +def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high, noncontiguous=False): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, + noncontiguous=noncontiguous) + + def make_per_sample_weight(flag, idx): + # a tensor of float / double weights, or None + # to indicate all weights should be taken to be 1 + if flag: + return make_input(idx.shape) + return None + + offsets = torch.tensor([0, 3], device=device, dtype=torch.long) + for generate_per_sample_weight in (True, False): + for mode in ('sum', 'mean', 'max'): + # per_sample_weights is only supported for mode='sum' (got mode='****') + if generate_per_sample_weight and mode in ('mean', 'max'): + continue + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # bag with zero length + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), + 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + # The gradient vector at `padding_idx` is not updated. + # Negative padding_idx + idx = make_long_input((6,), low=0, high=S) + idx[0] = 4 + idx[4] = 4 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': -1, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((3, 3), low=0, high=S) + # Positive padding_idx + idx[0, 0] = 2 + idx[1, 1] = 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': 2, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'norm_type': 1.0, + 'mode': mode, 'offsets': offsets, + 'per_sample_weights': per_sample_weights},) + + if mode != 'max': + # Scale the gradient based on the inverse frequency of a particular index. + # Note : smax mode does not support sparse weights + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'scale_grad_by_freq': True, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + # gradcheck not implemented for sparse tensors. + # Note : max mode does not support sparse weights + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((6, ), low=0, high=S) + idx[0] = 1 # freq more than 1 + idx[1] = 1 # freq more than 1 + idx[3] = 0 # padding_idx + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, + 'max_norm': 1., 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + +def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) + + # 0-D index tensor + idx = make_long_input((), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # The gradient vector at `padding_idx` is not updated. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 2 + idx[1, 1] = 2 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) + + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 4 + idx[1, 1] = 4 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) + + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) + + # Scale the gradient based on the inverse frequency of a particular index. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) + + # gradcheck not implemented for sparse tensors. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) + + idx = make_long_input((3, 3), low=0, high=S) + idx[0, 0] = 1 # freq more than 1 + idx[0, 1] = 1 # freq more than 1 + idx[1, 0] = 0 # padding_idx + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, + 'padding_idx': 0, 'max_norm': 1.}) + + +def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) + + shapes = ((), (S,), (L, M, S)) + num_classess = (-1, 10) + + return ( + SampleInput( + make_input( + shape, + low=0, + high=10 if num_classes == -1 else num_classes // 2, + ), + kwargs=dict(num_classes=num_classes), + ) + for shape, num_classes in itertools.product(shapes, num_classess) + ) + + +def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): + rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Although most losses also support the reduce and size_average combination instead of reduce, the former is + # deprecated since 0.4.1 and thus is not tested + shapes_and_kwargs = ( + ((), None), + ((S,), dict(reduction="mean")), + ((S,), dict(reduction="sum")), + ((S,), dict(reduction="none")), + ((S, S), None), + ((S, S, S), None), + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), + kwargs=kwargs) + +def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = ("bilinear", "nearest") + align_cornerss = (False, True) + padding_modes = ("zeros", "border", "reflection") + + for dim in (2, 3): + + modes_ = (*modes, "bicubic") if dim == 2 else modes + + for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, *[S] * dim)), + _make_tensor((batch_size, *[S] * dim, dim)), + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + +def reference_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + + batch_size = 2 + num_channels = 3 + height = 345 + width = 456 + modes = ("bilinear", "nearest", "bicubic") + align_cornerss = (False, True) + padding_modes = ('zeros', 'border', 'reflection') + + # Create an affine transformation matrix + a = torch.deg2rad(torch.tensor(45.0)) + ca, sa = torch.cos(a), torch.sin(a) # rotation angles + s1, s2 = 1.23, 1.34 # scales + + theta = torch.tensor([[ + [ca / s1, sa, 0.0], + [-sa, ca / s2, 0.0], + ]], dtype=dtype, device=device) + theta = theta.expand(batch_size, 2, 3).contiguous() + + x = torch.arange(batch_size * num_channels * height * width, device=device) + x = x.reshape(batch_size, num_channels, height, width).to(torch.uint8) + x = x.to(dtype=dtype) + x.requires_grad_(requires_grad) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + grid = torch.nn.functional.affine_grid( + theta, size=(batch_size, num_channels, height, width), align_corners=align_corners + ) + yield SampleInput( + x, + grid, + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_grid_sampler_2d(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = (0, 1, 2) + align_cornerss = (False, True) + padding_modes = (0, 1, 2) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, S, L)), + _make_tensor((batch_size, M + 3, M, 2)), + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_target(shape): + shape = () if len(shape) == 1 else (shape[0], ) + t = torch.randint(0, 2, shape, device=device, dtype=torch.long) + # Label with -1 or 1 + t = t * 2 - 1 + target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) + return target + + shapes = ((S, S), (S,)) + reductions = ('none', 'mean', 'sum') + for s, r in product(shapes, reductions): + yield SampleInput( + make_input(s), + args=(make_input(s), make_target(s)), + kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) + ) + +def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): + input_length = 50 + batch = 16 + num_char = 20 + target_length = 30 + + def make_log_probs(s): + t = make_tensor(s, device=device, dtype=dtype) + log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) + return log_probs + + reductions = ('none', 'mean', 'sum') + zero_inf = (True, False) + lengths_type = (list, torch.Tensor) + for r, z, lt in product(reductions, zero_inf, lengths_type): + log_probs = make_log_probs((input_length, batch, num_char)) + targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) + input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) + target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) + + # Dont generate int[] types if reduction = "Mean" since this results in non composite compliant calls + # to ctc_loss.IntList since a tensor needs to be created from the target lengths. + # Creating such a tensor requires the use of pointers to copy data from int[] -> torch.Tensor + # e.g. via std::copy. Similarly symbolic/real tracing with fx will also not work + if lt is list and r in ["none", "sum"]: + input_lengths = input_lengths.tolist() + target_lengths = target_lengths.tolist() + + yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) + +def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + shape = (2, 3) + num_classes = shape[1] + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # FIXME: Derivative wrt. weight not implemented + make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) + + def make_target(shape, zeros=False): + s = (shape[0], *shape[2:]) if len(shape) > 1 else () + if zeros: + return torch.zeros(s, device=device, dtype=torch.long) + else: + return make_tensor(s, + low=0, + high=shape[1] if len(shape) > 1 else shape[0], + device=device, + dtype=torch.long) + + + def gen_shape_kwargs(): + # Batched, non-batched and 2d + shapes = (shape, (num_classes,), shape + (2, 2)) + reductions = ('none', 'mean', 'sum') + for reduction, s in product(reductions, shapes): + yield make_input(s), make_target(s), dict(reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) + t = make_target(s) + ignore = num_classes // 2 + # If "mean", nll returns NaN, so it's not differentiable at those points + if t.eq(ignore).all() and reduction == "mean": + t.fill_(0) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) + # Test ignoring all the targets + # If "mean", nll returns NaN, so it's not differentiable at those points + if reduction != "mean": + yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target,), kwargs=kwargs) + + target = torch.tensor([-1, 2], device=device, dtype=torch.long) + yield SampleInput(make_input(shape), args=(target,), kwargs={'ignore_index': -1}) + + +def sample_inputs_binary_cross_entropy_with_logits( + op_info, device, dtype, requires_grad, **kwargs +): + make = partial(make_tensor, device=device, dtype=dtype) + make_prob = partial(make, low=0, high=1) + reductions = ("mean", "sum", "none") + + def make_weight_shape_kwargs(): + kwargs = [] + for shape in ((1,), (1, S), (S), (S, S)): + kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) + return kwargs + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *make_weight_shape_kwargs(), + *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], + ] + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + make(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + mask = torch.tensor([[0, 1, 0, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 1, 0], + [1, 0, 1, 1, 0], + [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t) + + yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) + yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) + +def _generate_sample_shape_reduction(): + shapes = ((S,), (S, S), (S, S, S)) + reductions = ('none', 'mean', 'sum') + yield from product(shapes, reductions) + +def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 + make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape(shape): + yield shape + # Broadcast + yield (*shape[:-1], 1) + yield shape[:-1] + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for t_s, v_s in product(gen_shape(s), gen_shape(s)): + yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + + for input, target, var, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, var, ), kwargs=kwargs) + +def error_inputs_gaussian_nll_loss(op_info, device, **kwargs): + _make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 3), low=0), reduction="abc"), + error_type=ValueError, error_regex="abc is not valid") + + # var is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 2), low=0)), + error_type=ValueError, error_regex="var is of incorrect size") + + # target is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 2), _make((10, 2, 3), low=0)), + error_type=RuntimeError, + error_regex=(r"The size of tensor a \(3\) must match the size of tensor b \(2\) " + r"at non-singleton dimension 2")) + +def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for s, r in _generate_sample_shape_reduction(): + yield _make_tensor(s), _make_tensor(s), dict(reduction=r) + +def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = 1 + target[~mask] = -1 + d['margin'] = random.uniform(-9, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + + # scalar input and target. + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) + +def error_inputs_hinge_embedding_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + +def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp = make_input((10, )) + inp[2] = float('nan') + target = make_input((10, )) + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Inf Handling + inp = make_input((10, )) + inp[4] = float('inf') + target = make_input((10, )) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Broadcasting + inp = make_input((5, 5)) + target = make_input((1, 5)) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + +def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + d['delta'] = random.uniform(1e-3, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + +def error_inputs_huber_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + err = 'is not a valid value for reduction' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex=err) + # delta <= 0 + for delta in (0, -1): + err = 'huber_loss does not support non-positive values for delta.' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'delta': delta}), + error_type=RuntimeError, error_regex=err) + +def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for li in (True, False): + for f in (True, False): + i1 = _make_tensor(s) + i2 = _make_tensor(s) + # For Poisson NLL Loss, + # target is assumed to be from + # Poisson Distribution which + # always has positive samples + t1 = _make_tensor(s, low=0) + t2 = _make_tensor(s, low=0) + + if not li: + i1.abs_() + i2.abs_() + t1.abs_() + t2.abs_() + + yield ( + i1, t1, + dict(log_input=li, full=f, reduction=r) + ) + yield ( + i2, t2, + dict(log_input=li, full=f, + eps=random.uniform(1e-8, 1e-3), + reduction=r) + ) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, ), kwargs=kwargs) + + # test INT_TO_FLOAT promotion + if dtype.is_complex: + for d in (torch.bool, torch.int64): + yield SampleInput(_make_tensor(dtype=dtype), args=(_make_tensor(dtype=d),)) + yield SampleInput(_make_tensor(dtype=d), args=(_make_tensor(dtype=dtype),)) + +def error_inputs_poisson_nll_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(5\) must match the ' + r'size of tensor b \(4\) at non-singleton ' + r'dimension 1)')) + +def error_inputs_soft_margin_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)')) + +def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): + make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) + + kwargss = ( + *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], + dict(swap=True), + *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], + ) + + for kwargs in kwargss: + input = make() + args = (make(), make()) + if with_distance: + kwargs["distance_function"] = torch.nn.PairwiseDistance() + yield SampleInput(input, args=args, kwargs=kwargs) + +def error_inputs_triplet_margin_loss(op_info, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + samples = ( + # input, args, kwargs, error_type, error_regex + # invalid reduction + (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), + dict(reduction="abc"), + ValueError, "abc is not a valid value for reduction"), + + # shape mismatch + (make_input(3, 5), (make_input(3, 4), make_input(3, 4)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(5\) must match the size of tensor b \(4\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 5), make_input(3, 4)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 4), make_input(3, 5)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + + # different dimensions + (make_input(3,), (make_input(3, 4), make_input(3, 4)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 1D, positive 2D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3,), make_input(3, 4)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 1D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3, 4), make_input(3,)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 2D, " + r"and negative 1D inputs")), + ) + + for input, args, kwargs, error_type, error_regex in samples: + yield ErrorInput(SampleInput(input, args=args, kwargs=kwargs), + error_type=error_type, error_regex=error_regex) + +def sample_inputs_scaled_mm(op_info, device, dtype, requires_grad, **kwargs): + make_mat_e4m3 = partial(make_tensor, device=device, dtype=torch.float8_e4m3fn, requires_grad=requires_grad) + make_mat_e5m2 = partial(make_tensor, device=device, dtype=torch.float8_e5m2, requires_grad=requires_grad) + M, N, K = 15, 32, 16 + samples = [] + # two e4m3 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + # mat1 e4m3 mat2 e5m2 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e5m2((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + # mat1 e5m2 mat2 e4m3 + mat1 = make_mat_e5m2((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + + yield from samples + +def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, seq_q, seq_kv, num_heads, head_dim = 4, 3, 6, 4, 8 + + dim_3_q_shape = (batch, seq_q, head_dim) + dim_3_kv_shape = (batch, seq_kv, head_dim) + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + broadcast_tuple = ((num_heads, seq_q, head_dim), (batch, num_heads, seq_kv, head_dim)) + + qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple] + samples = [] + for qkv_shape, is_causal, dropout_p in product( + qkv_shapes, [True, False], [0.0, 0.5]): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q), + make(shape_kv), + make(shape_kv), + is_causal=is_causal, + dropout_p=dropout_p + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim + 8)), + is_causal=is_causal, + dropout_p=dropout_p + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + attn_mask=make((seq_q, seq_kv)), + is_causal=False, + dropout_p=0.0) + ) + + yield from samples + + +def sample_inputs_efficient_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + mask_types = [1, 2] # UpperLeft, LowerRight + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, mask_type, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], mask_types, scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=mask_type, + compute_log_sumexp=requires_grad, + scale=scale, + causal_diagonal=None, + seqlen_k=None + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim + 8)), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + causal_diagonal=None, + seqlen_k=None + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + bias=make(batch, num_heads, seq_q, seq_kv), + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + causal_diagonal=None, + seqlen_k=None + ) + ) + + # jagged (with query/keys offsets) + cu_seqlens_k = torch.arange(-1, 32 * 2 + 1, dtype=torch.int32, device=device) + cu_seqlens_k[-1] = 62 + cu_seqlens_k[0] = 0 + samples.append( + SampleInput( + make((32, 2, 64)).view(-1, 8, 8).unsqueeze(0), + make((6, 64)).view(-1, 8, 8).unsqueeze(0), + make((6, 64)).view(-1, 8, 8).unsqueeze(0), + bias=None, + cu_seqlens_q=torch.arange(0, 32 * 2 + 2, dtype=torch.int32, device=device), + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=2, + max_seqlen_k=2, + dropout_p=0.0, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + causal_diagonal=None, + seqlen_k=None, + ) + ) + + yield from samples + +def sample_inputs_flash_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + cum_seq_q=None, + cum_seq_k=None, + max_q=seq_q, + max_k=seq_kv, + dropout_p=dropout_p, + is_causal=is_causal, + return_debug_mask=False, + scale=scale, + )) + + yield from samples + +def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shape = (3,) + batched_shape = (2, *shape) + shapes_and_kwargs = [ + (shape, None), + (batched_shape, None), + (shape, dict(keepdim=True)), + (batched_shape, dict(keepdim=True)), + (shape, dict(p=5.0)), + (shape, dict(p=-1.0)), + (shape, dict(eps=1.0)), + ] + + return ( + SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs + ) + +def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 9, 2, 2)), upscale_factor=upscale_factor) + for upscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), upscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 1, 6, 6)), downscale_factor=downscale_factor) + for downscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), downscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype) + # Lower bounds must be greater than 'eps' defined in gradcheck.py::gradgradcheck() -> eps + # otherwise perturbation calculation causes Tensor value to become negative triggering + # a device-side hardware assertion + make_prob = partial(make, low=1e-6, high=1) + + reductions = ("mean", "sum", "none") + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], + ] + + if logits: + shapes_and_kwargs.extend( + [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + (make if logits else make_prob)(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): + sample_shapes = [(), (S), (S, S, S)] + atols = [1e-2, 1e-16] + rtols = [1e-1, 0.5] + eps = 1e-8 + for s, rtol, atol in product(sample_shapes, rtols, atols): + # close sample + t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + close = (t + atol).detach().requires_grad_(requires_grad) + yield SampleInput(t, close, rtol=rtol, atol=atol) + + # random sample + a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(a, b, rtol=rtol, atol=atol) + + +def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + # test COMPLEX_TO_FLOAT promotion + if dtype.is_complex: + make = partial(make_tensor, (), device=device, requires_grad=requires_grad) + yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) + yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) + +def error_inputs_l1_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)') + ) + +def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) + + # This test case always triggers the smooth condition, since absolute difference of input and target + # is smaller than beta + yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) + yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) + +def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): + # kl_div works with inputs in [0, 1] (aka the pdf of a probability measure) + # Then log [0, 1] = (-inf, 0], so this is the log space + make_arg = partial(make_tensor, low=0., device=device, dtype=dtype, requires_grad=requires_grad) + + def make_log(shape): + out = torch.nn.functional.log_softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + def make_prob(shape): + out = torch.nn.functional.softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + shapes = ((2,), (2, 3)) + reductions = ("none", "mean", "batchmean", "sum") + for shape, reduction, log_target in product(shapes, reductions, (True, False)): + input = make_log(shape) + target = make_log(shape) if log_target else make_prob(shape) + yield SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) + +def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) + yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) + +def reference_pdist(input, p=2): + pdist = scipy.spatial.distance.pdist + if p == 0: + output = pdist(input, "hamming") * input.shape[1] + elif p == float("inf"): + output = pdist(input, lambda x, y: np.abs(x - y).max()) + else: + output = pdist(input, "minkowski", p=p) + return output.astype(input.dtype) + +def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(())) + yield SampleInput(make_input((2,))) + yield SampleInput(make_input((2, 2))) + yield SampleInput(make_input((2,)), offset=1) + yield SampleInput(make_input((2,)), offset=-1) + +def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + unpool_name_to_pool_method_dict = { + 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, + 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, + 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d + } + + unpool_name_to_dim = { + 'nn.functional.max_unpool1d': 1, + 'nn.functional.max_unpool2d': 2, + 'nn.functional.max_unpool3d': 3 + } + + unpool_to_pool_name_dict = {k: f'nn.functional.{v.__name__}' for k, v in unpool_name_to_pool_method_dict.items()} + + pool_dim = unpool_name_to_dim[op_info.name] + pool_method = unpool_name_to_pool_method_dict[op_info.name] + + pool_op_info = copy.copy(op_info) + pool_op_info.name = unpool_to_pool_name_dict[op_info.name] + + for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): + # shapes (C, ...) do not work as of now, + # see https://github.com/pytorch/pytorch/issues/68337 + # TODO: remove once the issue is resolved + if sample.input.dim() != pool_dim + 2: + continue + + # No dilation > 1 for max_unpool, + # see https://github.com/pytorch/pytorch/issues/68420 + if sample.kwargs['dilation'] != 1: + continue + + # Can't unpool without indices + if sample.kwargs['return_indices']: + pool, indices = pool_method(sample.input, **sample.kwargs) + # arg has to be a leaf + arg = pool.detach().requires_grad_(requires_grad) + sample_kwargs = { + 'kernel_size': sample.kwargs['kernel_size'], + 'stride': sample.kwargs['stride'], + 'padding': sample.kwargs['padding'], + # output_size could be None but we specify it explicitly + # to compensate for the information lose in pool due + # to the floor/ceil operation used to compute the shapes + 'output_size': sample.input.size() + } + + yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) + +def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): + for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + indices = sample.args[0] + # The samples for max_unpool are generated with max_pool. + # It could be that a single element from the max_pool's + # input is mapped to several locations in its output. + # This situation leads to failed gradchecks because + # the finite difference algorithm perturbs the elements + # of the output one by one, and not in classes of + # equivalences determined by whether two elements + # in the output are coming from the same location in the + # input (simply put, they have the same corresponding index). + # So, there are two ways to resolve this issue: + # 1. Extract a perturbation for one element and apply it all + # the elements from the same equivalence class, or + # 2. Make sure that the equivalence classes are all singletons, + # i.e. the index tensor has to be comprised of only unique + # indices. + # Here we go with the solution 2, the easiest of all. + if indices.unique().numel() == indices.numel(): + yield sample + +def sample_inputs_multi_head_attention_forward(opinfo, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if requires_grad: + # backward tests would take too long to complete, causing the job timeout. + bsz = 2 + is_batcheds = (True,) + use_separate_proj_weights = (False,) + emb_sizes = (2,) + src_lens = (XS,) + tgt_lens = (XS,) + heads = (2,) + dropouts = (0.5,) + mask_types = ("2d",) + else: + bsz = 2 + is_batcheds = (False, True) + use_separate_proj_weights = (False, True) + emb_sizes = (2, 4) + src_lens = (XS,) + tgt_lens = (XS, S) + heads = (1, 2) + dropouts = (0.0, 0.5) + mask_types = (None, "2d", "3d") + + for is_batched, use_separate_proj_weight, mask_type, emb_size, src_len, tgt_len, num_heads, dropout_p in itertools.product( + is_batcheds, use_separate_proj_weights, mask_types, emb_sizes, src_lens, tgt_lens, heads, dropouts + ): + attn_mask = None + if mask_type == "2d": + attn_mask = make_input(src_len, tgt_len) + elif mask_type == "3d": + attn_mask = make_input((bsz if is_batched else 1) * num_heads, src_len, tgt_len) + + if is_batched: + q = make_input(src_len, bsz, emb_size) + k = make_input(tgt_len, bsz, emb_size) + v = make_input(tgt_len, bsz, emb_size) + else: + q = make_input(src_len, emb_size) + k = make_input(tgt_len, emb_size) + v = make_input(tgt_len, emb_size) + if use_separate_proj_weight: + in_proj_weight = None + q_proj_weight = make_input(emb_size, emb_size) + k_proj_weight = make_input(emb_size, emb_size) + v_proj_weight = make_input(emb_size, emb_size) + else: + in_proj_weight = make_input(emb_size * 3, emb_size) + q_proj_weight = None + k_proj_weight = None + v_proj_weight = None + + bias_k = make_input(emb_size) + bias_v = make_input(emb_size) + in_proj_bias = make_input(emb_size * 3) + out_proj_weight = make_input(emb_size, emb_size) + out_proj_bias = make_input(emb_size) + sample_args = ( + k, v, emb_size, num_heads, in_proj_weight, + in_proj_bias, bias_k, bias_v, False, + dropout_p, out_proj_weight, out_proj_bias + ) + sample_kwargs = { + "q_proj_weight" : q_proj_weight, + "k_proj_weight" : k_proj_weight, + "v_proj_weight" : v_proj_weight, + "attn_mask" : attn_mask, + "training" : True if dropout_p > 0.0 else False, + "use_separate_proj_weight" : use_separate_proj_weight + } + + yield SampleInput(q, args=sample_args, kwargs=sample_kwargs) + + +# Includes some values such that N * N won't be a multiple of 4, +# which should ensure we test the vectorized and non-vectorized +# kernel code paths. +NUM_SIZE0_TENSORS = 10000 +foreach_num_tensors = [20, 23] if not TEST_WITH_SLOW else [23, 30, 300] +_foreach_inputs_default_kwargs = {"noncontiguous": False, "same_size": False, "low": None, "high": None} + + +class ForeachRightmostArgType(enum.Enum): + TensorList = enum.auto() + ScalarList = enum.auto() + Scalar = enum.auto() + Tensor = enum.auto() + + +class ForeachSampleInput(SampleInput): + # For TensorList Scalar/Tensor, we compute the reference + # by converting it into TensorList ScalarList/TensorList and + # then converting into multiple Tensor Scalar/Tensor. + # ref_args contains the args converted to TensorList ScalarList/TensorList + ref_args: Any + disable_fastpath: bool + + def __init__(self, *args, disable_fastpath=False, ref_args=None, **kwargs): + super().__init__(*args, **kwargs) + self.ref_args = ref_args or self.args + self.disable_fastpath = disable_fastpath + + +class foreach_inputs_sample_func: + def __init__( + self, + arity: int, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool = False, + ) -> None: + self.arity = arity + self._set_rightmost_arg_types( + rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor, + ) + + def _set_rightmost_arg_types( + self, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool, + ) -> None: + self._rightmost_arg_types = [ForeachRightmostArgType.TensorList] + if self.arity > 1: + if rightmost_supports_scalar: + self._rightmost_arg_types.append(ForeachRightmostArgType.Scalar) + if rightmost_supports_scalarlist: + self._rightmost_arg_types.append(ForeachRightmostArgType.ScalarList) + if rightmost_supports_tensor: + self._rightmost_arg_types.append(ForeachRightmostArgType.Tensor) + + def _sample_rightmost_arg(self, opinfo, rightmost_arg_type, device, dtype, num_tensors, **_foreach_inputs_kwargs): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.Tensor: + return [make_tensor( + (), device=device, dtype=dtype, + noncontiguous=_foreach_inputs_kwargs["noncontiguous"], + requires_grad=_foreach_inputs_kwargs.get("requires_grad", False), + )] + should_use_simpler_scalars = opinfo.name == "_foreach_pow" and dtype in (torch.float16, torch.bfloat16) + + def sample_float(): + s = random.random() + if should_use_simpler_scalars: + return 1.0 if s > 0.5 else 2.0 + else: + return 1.0 - s + + high = 2 if should_use_simpler_scalars else 9 + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + return [ + [random.randint(0, high) + 1 for _ in range(num_tensors)], + [sample_float() for _ in range(num_tensors)], + [complex(sample_float(), sample_float()) for _ in range(num_tensors)], + [True for _ in range(num_tensors)], + [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)], + [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)], + ] + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + return ( + random.randint(1, high + 1), + sample_float(), + True, + complex(sample_float(), sample_float()), + ) + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + if self.arity == 1: + if "foreach_abs" in opinfo.name and dtype in complex_types(): + return True + # unary + if opinfo.ref in (torch.abs, torch.neg): + return False + return dtype in integral_types_and(torch.bool) + if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor: + return None + if "foreach_pow" in opinfo.name and dtype in integral_types(): + return True + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if "foreach_add" in opinfo.name and dtype == torch.bool: + disable_fastpath = True + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.Scalar: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if isinstance(rightmost_arg, bool): + disable_fastpath |= dtype == torch.bool + if opinfo.ref in (torch.add, torch.mul): + disable_fastpath = False + elif isinstance(rightmost_arg, int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg, float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg, complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalar of type {rightmost_arg_type} - {rightmost_arg}") + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.ScalarList: + disable_fastpath = opinfo.ref == torch.div and dtype in integral_types_and(torch.bool) + elmt_t = type(rightmost_arg[0]) + has_same_type = all(isinstance(v, elmt_t) for v in rightmost_arg) + if not has_same_type: + return dtype not in complex_types() + if isinstance(rightmost_arg[0], bool): + if ("foreach_add" in opinfo.name or "foreach_mul" in opinfo.name) and dtype == torch.bool: + disable_fastpath = False + elif isinstance(rightmost_arg[0], int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg[0], float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg[0], complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalarlist of {rightmost_arg}") + return disable_fastpath + else: + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _sample_kwargs(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList and opinfo.supports_alpha_param: + if dtype in integral_types_and(torch.bool): + kwargs["alpha"] = 3 + elif dtype.is_complex: + kwargs["alpha"] = complex(3, 3) + else: + kwargs["alpha"] = 3.14 + if self.arity > 1: + kwargs["disable_fastpath"] = self._should_disable_fastpath(opinfo, rightmost_arg, rightmost_arg_type, dtype) + return kwargs + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for rightmost_arg_type in self._rightmost_arg_types: + zero_size_foreach_inputs_kwargs = copy.deepcopy(_foreach_inputs_kwargs) + zero_size_foreach_inputs_kwargs["zero_size"] = True + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + if self.arity > 1: + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + args.append( + self._sample_rightmost_arg( + opinfo, ForeachRightmostArgType.TensorList, device, dtype, NUM_SIZE0_TENSORS, + **zero_size_foreach_inputs_kwargs)[0]) + kwargs = self._sample_kwargs( + opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype, zero_size=True) + else: + args = [] + kwargs = {} + if opinfo.ref in (torch.abs, torch.neg): + kwargs["disable_fastpath"] = False + else: + kwargs["disable_fastpath"] = dtype in integral_types_and(torch.bool) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + _foreach_inputs_kwargs["zero_size"] = False + + # add empty tensor interspersion to test fully fixing #100701 + for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product( + num_input_tensors, self._rightmost_arg_types, (True, False)): + if intersperse_empty_tensors and (num_tensors != max(num_input_tensors) or str(device) == 'cpu'): + # generate interspersed empty tensors for only 1 N on non-cpu device to lessen redundancy + continue + _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors + input = sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + args = [] + if self.arity > 1: + args = [ + sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, + **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + args.append(rightmost_arg) + kwargs = self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype) + ref_args = args + if rightmost_arg_type in (ForeachRightmostArgType.Scalar, ForeachRightmostArgType.Tensor): + ref_args = args[:-1] + [[args[-1] for _ in range(num_tensors)]] + sample = ForeachSampleInput(input, *args, ref_args=ref_args, **kwargs) + yield sample + args.pop() + else: + yield ForeachSampleInput( + input, + *args, + disable_fastpath=self._should_disable_fastpath(opinfo, None, None, dtype), + ) + + +class foreach_norm_sample_func(foreach_inputs_sample_func): + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for ord in (0, 1, 2, -1, -2, float('inf'), float('-inf')): + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors = kwargs.pop("num_input_tensors", foreach_num_tensors) + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + + for num_tensors, ord in product(num_input_tensors, (0, 1, 2, -1, -2, float('inf'), float('-inf'))): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) + + # Also test nan propagation with a single tensor, but skip autograd testing + if not requires_grad: + nan_inputs = [ + [float('nan')], + [float('nan'), 1.0], + [1.0, float('nan')], + [1.0, 2.0, 3.0, float('nan'), float('nan'), 7.0, float('nan'), float('nan'), -1.5, 6.0], + [7.0, 3.0, float('nan'), float('nan'), -1.5, 6.0], + [3.0, float('nan'), float('nan'), -1.5, 6.0], + ] + for input in nan_inputs: + x = torch.tensor(input, device=device) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput([x], ord=ord, disable_fastpath=disable_fastpath) + + + + +class foreach_lerp_sample_func(foreach_inputs_sample_func): + def _sample_rightmost_arg(self, opinfo, rightmost_arg_type, device, dtype, num_tensors, **_foreach_inputs_kwargs): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + return [ + [random.randint(0, 9) + 1 for _ in range(num_tensors)], + [1.0 - random.random() for _ in range(num_tensors)], + [complex(1.0 - random.random(), 1.0 - random.random()) for _ in range(num_tensors)], + [True for _ in range(num_tensors)], + [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)], + [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)], + ] + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + return [random.random()] + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + +class foreach_pointwise_sample_func(foreach_inputs_sample_func): + + def __init__( + self, + arity: int = 3, + rightmost_supports_scalar: bool = False, + rightmost_supports_scalarlist: bool = False, + ): + super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist) + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + return dtype in integral_types_and(torch.bool) and opinfo.ref in (torch.addcmul,) + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + # zero_size tensor + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + for _ in range(2) + ] + if "scalars" in kwargs: + del kwargs["scalars"] + kwargs.update(self._sample_kwargs(opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + + for num_tensors, rightmost_arg_type in itertools.product(num_input_tensors, self._rightmost_arg_types): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for _ in range(2 - int(rightmost_arg_type == ForeachRightmostArgType.TensorList)) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.append(rightmost_arg) + elif rightmost_arg_type in [ForeachRightmostArgType.Tensor, ForeachRightmostArgType.ScalarList]: + kwargs["scalars"] = rightmost_arg + else: + kwargs["value"] = rightmost_arg + kwargs.update(self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype)) + assert len(args) == 2, f"{len(args)=}" + sample = ForeachSampleInput(input, *args, **kwargs) + yield sample + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.pop() + + +foreach_unary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + 'exp', + foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'acos', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'asin', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'atan', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'cos', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'cosh', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'log', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'log10', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'log2', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'tan', + foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + decorators=( + # due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex + DecorateInfo( + toleranceOverride( + { + torch.complex64: tol(atol=3e-04, rtol=2e-05) + } + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'tanh', + foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + decorators=( + DecorateInfo( + toleranceOverride( + {torch.complex64: tol(atol=5e-03, rtol=1e-04)} + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'sin', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'sinh', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'neg', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex(), + ), + ForeachFuncInfo( + 'sqrt', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'ceil', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'erf', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'erfc', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'expm1', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'floor', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'log1p', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half), + ), + ForeachFuncInfo( + 'round', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'frac', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'reciprocal', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'sigmoid', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'trunc', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'abs', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta", + "test_dispatch_symbolic_meta_inplace", dtypes=complex_types()), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta", + "test_dispatch_meta_inplace", dtypes=complex_types()), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta", + "test_meta_inplace", dtypes=complex_types()), + ), + ), + ForeachFuncInfo( + 'zero', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + ), + ForeachFuncInfo( + 'sign', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + ), + ForeachFuncInfo( + 'lgamma', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + skips=( + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_meta_inplace", dtypes=integral_types_and(torch.bool)), + ), + ), +] + +foreach_binary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + "add", + foreach_inputs_sample_func(2, True, True, True), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_alpha_param=True, + skips=( + # These tests fail with aten._local_scalar_dense not being implemented. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "sub", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_alpha_param=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "mul", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + skips=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "div", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + skips=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + # fails with div_cpu is not implemented with ComplexHalf + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.float16,), device_type='cpu'), + ), + ), + ForeachFuncInfo( + "clamp_min", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "clamp_max", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "minimum", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=False, + supports_inplace_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "maximum", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=False, + supports_inplace_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "pow", + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + supports_alpha_param=False, + supports_scalar_self_arg=True, + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + supports_forward_ad=True, + backward_requires_result=True, + ), + ForeachFuncInfo( + "copy", + foreach_inputs_sample_func(2, False, False), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=False, + supports_autograd=False, + ) +] + +foreach_pointwise_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "addcmul", + foreach_pointwise_sample_func(4, True, True), + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "addcdiv", + sample_inputs_func=foreach_pointwise_sample_func(4, True, True), + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), +] + +foreach_reduce_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "norm", + foreach_norm_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + ), + ), +] + +foreach_other_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "lerp", + foreach_lerp_sample_func(3, True, False), + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + ), +] + +def reference_sign(x): + if x.dtype == np.bool_: + # `np.sign` doesn't support `bool`. + # >>> np.sign(True) + # ufunc 'sign' did not contain a loop + # with signature matching types dtype('bool') -> dtype('bool') + return np.sign(x, dtype=np.uint8).astype(np.bool_) + return np.sign(x) + + +def reference_sgn(x): + # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. + # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. + # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) + if x.dtype not in [np.complex64, np.complex128]: + return reference_sign(x) + + out = (x / np.abs(x)) + if out.ndim == 0: + # Handle x == 0 case + if (x == 0): + # Can't assign to np.complex object + # So make a new one. + return np.array(complex(0, 0), dtype=x.dtype) + return out + + # Handle x == 0 case + mask = (x == 0) + out[mask] = complex(0, 0) + return out + + +def reference_sigmoid(x): + # 'scipy.special.expit' not supported for the input types + if x.dtype in [np.complex64, np.complex128]: + return (1 / (1 + np.exp(-x))) + return scipy.special.expit(x) + + +def reference_logsigmoid(x): + return np.where( + x < 0, + x - np.log1p(np.exp(x)), + -np.log1p(np.exp(-x))) + + +def reference_hardsigmoid(x): + intermediate = x / 6 + 0.5 + y = np.clip(intermediate, 0, None) + return np.where(y > 1, 1, y).astype(x.dtype) + + +def reference_lgamma(x): + # scipy.special.gammaln returns `-inf` when input is `-inf`. + # While Pytorch, C and C++, all return `inf` when input is `-inf`. + # Reference: + # https://en.cppreference.com/w/cpp/numeric/math/lgamma + # https://en.cppreference.com/w/c/numeric/math/lgamma + + # To handle the above discrepancy, + # we replace -inf with inf so values + # that were originally -inf map to inf as expected + if x.dtype.kind == 'f': + x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) + + out = scipy.special.gammaln(x) + + if x.dtype == np.float16: + # `scipy.special.gammaln` returns output of float32 when input is float16, + # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, + # Pytorch version outputs `inf` while SciPy returns finite values. + out = out.astype(np.float16) + + return out + + +def reference_mvlgamma(x, d): + if x.dtype == np.float16: + return scipy.special.multigammaln(x, d).astype(np.float16) + + return scipy.special.multigammaln(x, d) + +def reference_softplus(input, beta=1, threshold=20): + non_linear = input * beta <= threshold + output = input.copy() + output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta + return output + +def reference_gelu(X, *, approximate='none'): + def _gelu_ref(X): + return X * stats.norm.cdf(X) + + def _tanh_gelu_ref(X): + M_SQRT_2_PI = math.sqrt(2 / math.pi) + Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) + return 0.5 * X * (1.0 + np.tanh(Z)) + + if approximate == 'tanh': + return _tanh_gelu_ref(X) + else: + return _gelu_ref(X) + + +def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray: + if num_classes == -1: + num_classes = int(np.amax(a) + 1) + + idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes + one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) + np.put(one_hot, idcs, 1) + return one_hot.reshape(*a.shape, -1) + + +def reference_mse_loss(input, target, reduction="mean"): + se = (input - target) ** 2 + if reduction == "mean": + return np.mean(se) + elif reduction == "sum": + return np.sum(se) + else: # reduction == "none" + return se + + +def wrapper_set_seed(op, *args, **kwargs): + """Wrapper to set seed manually for some functions like dropout + See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details. + """ + with freeze_rng_state(): + torch.manual_seed(42) + output = op(*args, **kwargs) + + if isinstance(output, torch.Tensor) and output.device.type == "lazy": + # We need to call mark step inside freeze_rng_state so that numerics + # match eager execution + torch._lazy.mark_step() + + return output + + +def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5): + return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0] + + +def reference_native_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight, bias, eps): + feature_size = np.prod(normalized_shape) + inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + if weight is None and bias is not None: + Y = Y + bias.reshape(-1) + elif weight is not None and bias is None: + Y = Y * weight.reshape(-1) + elif weight is not None and bias is not None: + Y = Y * weight.reshape(-1) + bias.reshape(-1) + axis = inp.ndim - len(normalized_shape) + stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape) + return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape) + + +def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5): + inp_view = inp + if np.prod(inp.shape) != 0: + inp_view = inp.reshape((inp.shape[0], num_groups, -1)) + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + Y = Y.reshape(inp.shape) + if weight is not None: + # weight is a vector of length equal to the channel + if len(Y.shape) > 2: + weight = np.expand_dims(weight, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y * weight + if bias is not None: + # bias is a vector of length equal to the channel + if len(Y.shape) > 2: + bias = np.expand_dims(bias, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y + bias + return Y + + +# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't +# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into +# stacked 1D cases +def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): + side = 'right' if (right or side == 'right') else 'left' + if len(sorted_sequence.shape) == 1 : + ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) + return ret.astype(np.int32) if out_int32 else ret + elif sorted_sequence.shape[0] == 0: + if sorter is not None: + sorter = sorter.flatten() + ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) + ret = ret.astype(np.int32) if out_int32 else ret + return ret.reshape(boundary.shape) + else: + # numpy searchsorted only supports 1D inputs so we split up ND inputs + orig_shape = boundary.shape + num_splits = np.prod(sorted_sequence.shape[:-1]) + splits = range(0, num_splits) + sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) + if sorter is not None: + sorter = sorter.reshape(num_splits, -1) + + split_sequence = [sorted_sequence[i] for i in splits] + split_boundary = [boundary[i] for i in splits] + split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] + + split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) + for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] + split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret + return np.stack(split_ret).reshape(orig_shape) + +def loss_reference_reduction_wrapper(fn): + def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): + if size_average is not None or reduce is not None: + raise RuntimeError( + "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" + ) + output = fn(input, target, **other_kwargs) + if reduction == "mean": + return np.mean(output) + elif reduction == "sum": + return np.sum(output) + else: # reduction == "none" + return output + + return wrapper + +@loss_reference_reduction_wrapper +def reference_smooth_l1_loss(input, target, beta=1.0): + diff = input - target + abs_diff = np.abs(diff) + above_threshold = abs_diff >= beta + + loss = np.empty_like(input) + loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta + loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) + + return loss + +def reference_std_var(f): + """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" + g = reference_reduction_numpy(f) + + @wraps(g) + def wrapper(x: np.ndarray, *args, **kwargs): + assert not ('unbiased' in kwargs and 'correction' in kwargs) + + if 'unbiased' in kwargs: + kwargs['ddof'] = int(kwargs.pop('unbiased')) + elif 'correction' in kwargs: + kwargs['ddof'] = kwargs.pop('correction') + + return g(x, *args, **kwargs) + + return wrapper + +def generate_std_var_kwargs(t: torch.Tensor, **kwargs): + """Generates unbiased/correction kwargs for std/var operators""" + yield ((), {'unbiased': True}) + yield ((), {'unbiased': False}) + + # Currently, calling std with correction is only enabled when + # both dim and keepdim are provided. + if 'dim' in kwargs and 'keepdim' in kwargs: + yield ((), {'correction': 0}) + yield ((), {'correction': 1}) + + numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() + yield ((), {'correction': numel // 2}) + +def error_inputs_mean(op_info, device, is_ref=False, **kwargs): + if is_ref: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput(make_tensor((3, 4, 5), dtype=torch.int64, device=device), []), + error_regex=err_msg1, + ) + + if is_ref: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.float32, device=device), + [], + dtype=torch.int64), + error_regex=err_msg2 + ) + + if is_ref: + err_msg3 = "Expected out tensor to have dtype torch.float64, but got torch.float32 instead" + else: + err_msg3 = "Expected out tensor to have dtype double, but got float instead" + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.int64, device=device), + [], + dtype=torch.float64, + out=make_tensor([], dtype=torch.float32, device=device), + ), + error_regex=err_msg3 + ) + +# numpy implementation of torch.flatten +# unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape +def reference_flatten(input, start_dim=0, end_dim=-1): + in_shape = input.shape + in_rank = len(in_shape) + for d in start_dim, end_dim: + if not ((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank): + raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank-1}], but got {d}") + end_dim = end_dim if end_dim >= 0 else in_rank + end_dim + start_dim = start_dim if start_dim >= 0 else in_rank + start_dim + if in_rank == 0: + end_dim = start_dim + if end_dim < start_dim: + raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim") + flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1) + out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:] + return np.reshape(input, out_shape) + +# Operator database (sorted alphabetically) +op_db: List[OpInfo] = [ + UnaryUfuncInfo('abs', + aliases=('absolute', ), + ref=np.abs, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_grad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestFwdGradients', + 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestSparseUnaryUfuncs", + "test_inplace", dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) + # We can break the logic of the loop over all possible types but it is OK. + # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + ), + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True), + # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) + UnaryUfuncInfo('acos', + aliases=('arccos', ), + ref=np.arccos, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-1, + torch.complex64: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_method_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_inplace_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_inplace_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS),)), + # NOTE: the derivative for inplace acosh is not implemented + UnaryUfuncInfo('acosh', + aliases=('arccosh', ), + ref=np.arccosh, + domain=(1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + # acosh is not defined at x < 1 (real) + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)), + safe_val=2)), + BinaryUfuncInfo('add', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.float16, torch.chalf), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_add_sub, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestCommon', + 'test_numpy_refs', + dtypes=(torch.complex128,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('item', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.item, inp, *args, **kwargs), + ref=np.ndarray.item, + method_variant=None, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf, torch.bool), + supports_out=False, + supports_autograd=False, + error_inputs_func=error_inputs_item, + sample_inputs_func=sample_inputs_item, + skips=( + # Error testing item function variant + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32, torch.complex64)), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: Composite compliance check failed with the above error. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake'), + )), + OpInfo('arange', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_out=True, + supports_autograd=False, + is_factory_function=True, + error_inputs_func=error_inputs_arange, + sample_inputs_func=sample_inputs_arange, + skips=( + # https://github.com/pytorch/pytorch/issues/81774 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Lazy tensor failures + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + + # Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608 + # We don't have an op for aten::arange but it isn't a special case. + # Argument types: bool, bool, bool, int, int, Device, boo + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + + # Captured graph does not contain aten::arange (succeeds on complex!) + # g: graph(): + # %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]() + # return (%25) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('cauchy', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.cauchy_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.cauchy_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_cauchy, + error_inputs_func=error_inputs_cauchy, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('exponential', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.exponential_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.exponential_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_exponential, + error_inputs_func=error_inputs_exponential, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('geometric', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.geometric_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.geometric_, + dtypes=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8), + supports_out=False, + supports_autograd=False, + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_geometric, + error_inputs_func=error_inputs_geometric, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('log_normal', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.log_normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.log_normal_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_log_normal, + error_inputs_func=error_inputs_log_normal, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('normal', + variant_test_name='in_place', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.normal_, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_normal, + error_inputs_func=error_inputs_normal, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + )), + OpInfo('uniform', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.uniform_, inp, *args, **kwargs), + method_variant=None, + inplace_variant=torch.Tensor.uniform_, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_autograd=False, + is_factory_function=False, + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_uniform, + error_inputs_func=error_inputs_uniform, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # aten.uniform was not decomposed + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('clamp_max', + ref=_clamp_max_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('clamp_min', + ref=_clamp_min_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('mul', + aliases=('multiply',), + dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + error_inputs_sparse_func=error_inputs_sparse_mul, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsc)), + BinaryUfuncInfo('sub', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), + aliases=('subtract',), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_add_sub, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + OpInfo('addmm', + # This addmm OpInfo is for when alpha and beta are not both equal to 1. + # alpha=beta=1 is tested in the following opinfo, because that special case will + # trigger addmm being decomposed by a jit pass. + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_addmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('addmm', + # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. + variant_test_name='decomposed', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], + sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # https://github.com/pytorch/pytorch/issues/71784 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.float16,)), + )), + OpInfo('addmv', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-5, rtol=3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + sample_inputs_func=sample_inputs_addmv), + OpInfo('addbmm', + ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), + np.multiply(np.asarray(alpha, dtype=batch1.dtype), + np.sum(np.matmul(batch1, batch2), axis=0))), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_refs'), + # MPS has slightly worse precision. Is this acceptable? + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-04), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + 'TestConsistency', + 'test_output_match', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.5e-05, rtol=1e-05)}), + 'TestCommon', 'test_out'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=6e-3, rtol=6e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # addbmm does not correctly warn when resizing out= inputs + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # https://github.com/pytorch/pytorch/issues/55907 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addbmm), + OpInfo('baddbmm', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], + torch.complex64, torch.complex128), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view', device_type='cuda'), + ], + sample_inputs_func=sample_inputs_baddbmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('dot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('vdot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('bmm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", "test_out") + ), + sample_inputs_func=sample_inputs_bmm), + OpInfo('mv', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mv), + OpInfo('addr', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + # Reference: https://github.com/pytorch/pytorch/issues/50747 + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/50747 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), + ), + sample_inputs_func=sample_inputs_addr, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('addcmul', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + OpInfo('addcdiv', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + UnaryUfuncInfo('asin', + aliases=('arcsin', ), + ref=np.arcsin, + domain=(-1, 1), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + # NOTE: derivative for inplace asinh is not implemented + UnaryUfuncInfo('asinh', + aliases=('arcsinh', ), + ref=np.arcsinh, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('atan', + aliases=('arctan', ), + ref=np.arctan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + BinaryUfuncInfo('atan2', + aliases=('arctan2',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + )), + UnaryUfuncInfo('atanh', + aliases=('arctanh', ), + ref=np.arctanh, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), + )), + OpInfo('allclose', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=np.allclose, + supports_autograd=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_allclose, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_out=False), + OpInfo('broadcast_to', + ref=np.broadcast_to, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_broadcast_to), + OpInfo('broadcast_shapes', + op=torch.broadcast_shapes, + ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, + dtypes=_dispatch_dtypes((torch.float32,)), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + supports_autograd=False, + supports_scripting=False, + sample_inputs_func=sample_inputs_broadcast_shapes, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # skip dtype tests since broadcast_shape is not device dependent. + # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('broadcast_tensors', + ref=np.broadcast_arrays, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_broadcast_tensors, + reference_inputs_func=reference_inputs_broadcast_tensors, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + )), + OpInfo('block_diag', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Default batching rule in core doesn't work for ops with TensorList args + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_block_diag), + UnaryUfuncInfo('bitwise_not', + ref=np.bitwise_not, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.invert, + supports_autograd=False), + BinaryUfuncInfo('bitwise_left_shift', + op=torch.bitwise_left_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.lshift, + inplace_operator_variant=operator.ilshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('bitwise_right_shift', + op=torch.bitwise_right_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.rshift, + inplace_operator_variant=operator.irshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('combinations', + op=torch.combinations, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_combinations), + OpInfo('cartesian_prod', + op=torch.cartesian_prod, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_cartesian_prod, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, + 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + )), + OpInfo('cdist', + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_cdist), + UnaryUfuncInfo('ceil', + ref=np.ceil, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('cholesky', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), + OpInfo('cholesky_inverse', + dtypes=floating_and_complex_types(), + backward_dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_gradgrad=True, + sample_inputs_func=sample_inputs_linalg_cholesky_inverse, + gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), + OpInfo('cholesky_solve', + op=torch.cholesky_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_cholesky_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + OpInfo('chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('unsafe_chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + check_batched_forward_grad=False, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('clone', + ref=np.copy, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format' + # (NumPy reference needs to be extended with memory_format) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + ),), + OpInfo('contiguous', + op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_fusible_nodes=['aten::contiguous'], + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('sum_to_size', + op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sum_to_size, + error_inputs_func=error_inputs_sum_to_size, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)), + )), + OpInfo('clamp', + aliases=('clip',), + ref=_clamp_numpy, + dtypes=all_types_and(torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_clamp, + reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NNC appear to not handle boolean clamp + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + )), + UnaryUfuncInfo('positive', + ref=np.positive, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + ), + UnaryUfuncInfo('conj', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False), + UnaryUfuncInfo('conj_physical', + decomp_aten_name='_conj_physical', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # RuntimeError: inputSet && outputSet + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), + DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), + 'TestSparseUnaryUfuncs', 'test_inplace'), + )), + OpInfo('resolve_conj', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('resolve_neg', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('view_as_real', + dtypes=complex_types(), + supports_forward_ad=True, + supports_out=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_view_as_real, + test_conjugated_samples=False, + ), + OpInfo('view_as_complex', + dtypes=floating_types_and(torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + test_neg_view=False, + sample_inputs_func=sample_inputs_view_as_complex, + skips=( + # RuntimeError: Tensor must have a last dimension with stride 1 + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), + # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + BinaryUfuncInfo('complex', + dtypes=floating_types_and(torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + error_inputs_func=error_inputs_complex, + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),)), + BinaryUfuncInfo('copysign', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('corrcoef', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_corrcoef, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False), + UnaryUfuncInfo('cos', + ref=np.cos, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('cosh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + OpInfo('cov', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cov, + error_inputs_func=error_inputs_cov, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Float did not match double + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Jacobian mismatch + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip("Barely fails"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cross', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cross, + supports_fwgrad_bwgrad=True, + supports_out=True, + supports_forward_ad=True), + OpInfo('cumsum', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumsum does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + sample_inputs_func=sample_inputs_cumulative_ops), + OpInfo('cumprod', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumprod does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + # gradgradcheck fails in fast_mode=True: #56275 + sample_inputs_func=sample_inputs_cumprod, + gradcheck_fast_mode=False), + OpInfo('cummax', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('cummin', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + UnaryUfuncInfo('deg2rad', + ref=np.radians, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + OpInfo('diff', + op=torch.diff, + # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append + # are set as None when converting to numpy + ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( + np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) + ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diff, + error_inputs_func=error_inputs_diff, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='no_rounding_mode', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True),), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='trunc_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='floor_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + )), + BinaryUfuncInfo('true_divide', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True)), + OpInfo('equal', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=lambda input, other: (input == other).all(), + sample_inputs_func=sample_inputs_equal, + supports_autograd=False, + supports_tracing=False, + skips=( + )), + UnaryUfuncInfo('exp', + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + OpInfo('expand', + op=lambda self, shape: self.expand(shape), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_expand, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('expand_as', + op=lambda self, other: self.expand_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_expand_as, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), + ), + OpInfo('diag', + ref=np.diag, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_diag, + error_inputs_func=error_inputs_diag), + OpInfo('diag_embed', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal', + aten_backward_name='diagonal_backward', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_scatter), + BinaryUfuncInfo('eq', + ref=np.equal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + always_returns_bool=True, + supports_autograd=False, + sample_inputs_func=sample_inputs_comparison_ops, + skips=( + )), + BinaryUfuncInfo('fmax', + op=torch.fmax, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmin', + op=torch.fmin, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmod', + ref=np.fmod, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + BinaryUfuncInfo('remainder', + ref=np.remainder, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + operator_variant=operator.mod, + inplace_operator_variant=operator.imod, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + # Fails on XLA + # False is not true : Tensors failed to compare as equal! + # Attempted to compare equality of tensors with different dtypes + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + )), + UnaryUfuncInfo('frac', + ref=lambda x: np.modf(x)[0], + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + # 76047 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.bfloat16, torch.float32, torch.float64)), + )), + OpInfo('stft', + decorators=[ + skipCPUIfNoFFT, + DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ], + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_stft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + ), + OpInfo('istft', + dtypes=complex_types(), + sample_inputs_func=sample_inputs_istft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + decorators=( + DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ), + skips=( + skipCPUIfNoFFT, + # gradcheck fails on ROCm (gh-68429) + # grad is computed improperly (probably for weights tensor) + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + )), + UnaryUfuncInfo('floor', + ref=np.floor, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('flip', + op=torch.flip, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_flip, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('fliplr', + op=torch.fliplr, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_fliplr, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('flipud', + op=torch.flipud, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_flipud, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('sparse.sampled_addmm', + dtypes=floating_and_complex_types(), + supports_autograd=True, + sample_inputs_func=sample_inputs_sparse_sampled_addmm, + decorators=[ + skipCUDAIf(not ((_get_torch_cuda_version() >= (11, 3)) + or (_get_torch_rocm_version() >= (5, 2))), + "cusparseSDDMM was added in 11.2.1"), + skipCPUIfNoMklSparse, ], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype. + # RuntimeError: Sparse CSR tensors do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # NotImplementedError: Could not run 'aten::sparse_sampled_addmm' with arguments from the 'SparseCsrMeta' backend. + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + )), + OpInfo('sparse.mm', + dtypes=floating_types_and(torch.bfloat16), + variant_test_name='reduce', + supports_autograd=True, + supports_out=False, + supports_gradgrad=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_sparse_mm_reduce, + decorators=[onlyCPU], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # RuntimeError: Sparse CSR tensors do not have is_contiguou + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_fail_gradgrad'), + # NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCsrMeta' backend + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), + )), + UnaryUfuncInfo('i0', + ref=np_unary_ufunc_integer_promotion_wrapper( + scipy.special.i0) if TEST_SCIPY else None, + aliases=('special.i0',), + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_i0_i1, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.int8,)), + )), + BinaryUfuncInfo('floor_divide', + ref=_floor_divide_np, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_autograd=False, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + supports_two_python_scalars=True, + skips=( + # AssertionError: Results of original model and exported/imported version of model differed + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + )), + UnaryUfuncInfo('frexp', + op=torch.frexp, + ref=np.frexp, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + # skip testing torch.frexp as it is not supported by ROCm platform yet + decorators=[], + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, + # while theses tests currently requires output to a single tensor. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + + # skips test_reference_numerics due to error in Windows CI. + # The np.frexp returns exponent as np.intc dtype on Windows platform, + # and np.intc does not have the correspond torch dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('log1p', + ref=np.log1p, + aliases=('special.log1p',), + domain=(-1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + promotes_int_to_float=True), + BinaryUfuncInfo('ge', + ref=np.greater_equal, + aliases=('greater_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('geqrf', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + supports_autograd=False, + skips=( + # FIXME: geqrf can't forward with complex inputs that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + BinaryUfuncInfo('gt', + ref=np.greater, + aliases=('greater',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + UnaryUfuncInfo('imag', + ref=np.imag, + dtypes=complex_types_and(torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo('gradient', + dtypes=floating_and_complex_types_and(torch.int8, torch.int16, + torch.int32, torch.int64, + torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # following tests give a runtime error with undefined value tensor + # see discussion : https://github.com/pytorch/pytorch/issues/56660 + # RuntimeError: + # Arguments for call are not valid. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_gradient, + error_inputs_func=error_inputs_gradient), + OpInfo('isin', + dtypes=all_types(), + dtypesIfCUDA=all_types_and(torch.half), + supports_autograd=False, + sample_inputs_func=sample_inputs_isin), + OpInfo('kthvalue', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kthvalue, + error_inputs_func=error_inputs_kthvalue), + BinaryUfuncInfo('le', + ref=np.less_equal, + aliases=('less_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + UnaryUfuncInfo('log', + ref=np.log, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log10', + ref=np.log10, + domain=(0, None), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log10(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log2', + ref=np.log2, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + # log2(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + BinaryUfuncInfo('ldexp', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + promotes_int_to_float=True, + supports_out=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: mul(): functions with out=... arguments don't support + # automatic differentiation, but one of the arguments requires grad + # https://github.com/pytorch/pytorch/issues/68966 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.complex64: tol(atol=1e-05, rtol=1e-05) + }), + 'TestCommon', device_type='cpu', + ), + ], ), + BinaryUfuncInfo('logaddexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + OpInfo('logaddexp2', + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_logaddexp), + UnaryUfuncInfo('logical_not', + ref=np.logical_not, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + skips=( + # The function variant always returns BoolTensor + # while the inplace variant preserves the input dtype. + # >>> t = torch.randn(3) + # >>> torch.logical_not(t) + # tensor([False, False, False]) + # >>> torch.logical_not(t).dtype + # torch.bool + # >>> t.logical_not_().dtype + # torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + )), + BinaryUfuncInfo('lt', + ref=np.less, + aliases=('less',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('lu_unpack', + op=torch.lu_unpack, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + sample_inputs_func=sample_inputs_lu_unpack), + OpInfo('lu', + op=torch.lu, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # we skip jit tests because `lu` is a torch function + # RuntimeError: + # 'Tensor (inferred)' object has no attribute or method 'lu'.: + # File "", line 3 + # def the_method(i0): + # return i0.lu(True, True) + # ~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('lu_solve', + op=torch.lu_solve, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Tests different backward paths"), + "TestCommon", "test_floating_inputs_are_differentiable"),), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]), + OpInfo('masked_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_masked_fill, + error_inputs_func=error_inputs_masked_fill, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + supports_out=False), + OpInfo('masked_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_scatter, + error_inputs_func=error_inputs_masked_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + skips=( + )), + OpInfo('masked_select', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_select, + error_inputs_func=error_inputs_masked_select, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('matrix_exp', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + aliases=('linalg.matrix_exp',), + sample_inputs_func=sample_inputs_matrix_exp, + # Needs to construct a 2nx2n matrix by copy_ ing into it + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # mexp does not support bf16 and fp16 + DecorateInfo(unittest.skip('Skipped!'), 'TestInductorOpInfo', 'test_comprehensive', + dtypes=[torch.half], device_type="cpu"), + ), + supports_out=False, + ), + OpInfo('matmul', + aliases=('linalg.matmul',), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=False), + decorators=[ + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # ROCm intermittently fails the test with standard atol/rtol + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', + active_if=TEST_WITH_ROCM), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_out', device_type='cuda', + active_if=TEST_WITH_ROCM), + # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the + # backward on CPU + DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu'), + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-5, rtol=1e-5), + torch.complex64: tol(atol=1e-5, rtol=1e-5), + }), + "TestDecomp", "test_comprehensive", device_type="cuda", + ), + ], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', + device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + OpInfo('max', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + skips=( + ), + supports_forward_ad=True), + OpInfo('max', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('median', + dtypes=all_types_and(torch.bfloat16, torch.float16), + # TODO: some signatures of median do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_median, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('nanmedian', + dtypes=all_types_and(torch.bfloat16, torch.float16), + # TODO: some signatures of nanmedian do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('var_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('var_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of std_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('meshgrid', + variant_test_name='variadic_tensors', + ref=np.meshgrid, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), + skips=[ + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ], + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('meshgrid', + variant_test_name='list_of_tensors', + # Unlike the variant above, we do not use np.meshgrid as a + # ref since it does not officially support list of numpy + # arrays. + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), + skips=[ + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + ], + assert_autodiffed=True, + supports_out=False, + autodiff_nonfusible_nodes=[], + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('min', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + )), + OpInfo('min', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('quantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + OpInfo('nanquantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + BinaryUfuncInfo( + 'max', + aliases=('maximum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'maximum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'min', + aliases=('minimum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo( + 'minimum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + ), + ), + BinaryUfuncInfo('logical_and', + ref=np.logical_and, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_or', + ref=np.logical_or, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_xor', + ref=np.logical_xor, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False, + skips=( + )), + BinaryUfuncInfo('bitwise_and', + ref=np.bitwise_and, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.and_, + inplace_operator_variant=operator.iand, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_or', + ref=np.bitwise_or, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.or_, + inplace_operator_variant=operator.ior, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_xor', + ref=np.bitwise_xor, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.xor, + inplace_operator_variant=operator.ixor, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('heaviside', + ref=lambda a, b: ( + # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 + np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) + ), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + BinaryUfuncInfo('lcm', + ref=np.lcm, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('gcd', + ref=np.gcd, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)),)), + BinaryUfuncInfo('isclose', + ref=np.isclose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_isclose, + error_inputs_func=error_inputs_isclose, + supports_autograd=False, + supports_out=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_numpy_refs', dtypes=(torch.complex128,)), + # RuntimeError: Short did not match Int + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + # `softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + aten_name='softmax', + aten_backward_name='_softmax_backward_data', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + variant_test_name="with_dtype", + aten_name='softmax', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo( + '_softmax_backward_data', + op=torch.ops.aten._softmax_backward_data, + aten_name='_softmax_backward_data', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_softmax_backward_data, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + ), + # `softmin` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('nn.functional.softmin', + aten_name='softmin', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=False, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.softmin', + variant_test_name="with_dtype", + aten_name='softmin', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo( + "nn.functional.cross_entropy", + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_cross_entropy, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 + # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked + # 1536 bytes CUDA memory on device 0 + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ) + ), + OpInfo('nn.functional.normalize', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_normalize, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('aminmax', + ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(onlyNativeDeviceTypes,), + supports_autograd=False, + sample_inputs_func=sample_inputs_aminmax, + error_inputs_func=error_inputs_aminmax_amax_amin), + OpInfo('as_strided', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # AssertionError: False is not true : Scalars failed to compare as equal! + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_variant_consistency_eager'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_complex_half_reference_testing'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), + )), + OpInfo('as_strided', + variant_test_name='partial_views', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_partial_views, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: This operator is not Composite Compliant: the + # storage_offset of the tensor was modified directly without + # going through the PyTorch dispatcher. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + + # These fail because the test changes the input's in-memory layout + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_grad'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', + 'test_make_fx_symbolic_exhaustive_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # Fail but are also flaky + DecorateInfo(unittest.skip("Test changes in memory layout"), 'TestMathBits'), + DecorateInfo(unittest.skip("Modifies input strides and storage_offset"), 'TestCommon', + 'test_non_standard_bool_values'), + # RuntimeError: setStorage: sizes [2, 2], strides [1, 2], storage offset 10, and itemsize 2 requiring a + # storage size of 28 are out of bounds for storage of size 20 + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides'), + )), + OpInfo('as_strided_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_scatter, + error_inputs_func=error_inputs_as_strided_scatter, + skips=( + DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 + DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 + DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # AssertionError: Tensor-likes are not close! (new_empty_strided.default) + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),)), + OpInfo('native_layer_norm', + aten_name='native_layer_norm', + ref=reference_native_layer_norm, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + assert_jit_shape_analysis=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_native_layer_norm, + error_inputs_func=error_inputs_native_layer_norm, + skips=( + # IndexError: tuple index out of range + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients', 'test_forward_mode_AD'), + # Tests fail when weight=None and bias is defined + # https://github.com/pytorch/pytorch/issues/79705 + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + # JIT test also tries to compute double backward, which fails + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + OpInfo('native_batch_norm', + aten_name='native_batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_native_batch_norm, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + # AssertionError: Booleans mismatch: True is not False + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_autocast'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('_native_batch_norm_legit', + aten_name='_native_batch_norm_legit', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs__native_batch_norm_legit, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('nn.functional.cosine_similarity', + aten_name="cosine_similarity", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_cosine_similarity), + OpInfo('nn.functional.adaptive_avg_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool1d, + sample_inputs_func=sample_inputs_adaptive_avg_pool1d), + OpInfo('nn.functional.adaptive_avg_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool2d, + sample_inputs_func=sample_inputs_adaptive_avg_pool2d), + OpInfo('nn.functional.adaptive_avg_pool3d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool3d, + sample_inputs_func=sample_inputs_adaptive_avg_pool3d), + OpInfo('nn.functional.adaptive_max_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool1d, + sample_inputs_func=sample_inputs_adaptive_max_pool1d), + OpInfo('nn.functional.adaptive_max_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool2d, + sample_inputs_func=sample_inputs_adaptive_max_pool2d), + OpInfo('nn.functional.adaptive_max_pool3d', + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool3d, + sample_inputs_func=sample_inputs_adaptive_max_pool3d), + OpInfo('nn.functional.avg_pool1d', + aten_name='avg_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool1d, + sample_inputs_func=sample_inputs_avgpool1d), + OpInfo('nn.functional.avg_pool3d', + aten_name='avg_pool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool3d, + sample_inputs_func=sample_inputs_avgpool3d, + skips=( + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + )), + OpInfo( + "nn.functional.binary_cross_entropy_with_logits", + aten_name="binary_cross_entropy_with_logits", + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + 'TestJit', + 'test_variant_consistency_jit', + dtypes=(torch.float32,) + ), + ), + ), + UnaryUfuncInfo( + 'nn.functional.relu', + aten_name="relu", + ref=lambda a: np.where(a <= 0, 0, a), + supports_autograd=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_activation_relu, + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True), + OpInfo('nn.functional.conv_transpose1d', + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose1d), + aten_name='conv_transpose1d', + aliases=('conv_transpose1d',), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-2, rtol=5e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.float: tol(atol=1.5e-5, rtol=1.5e-5), }), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # Reason for Skip: https://github.com/pytorch/pytorch/pull/79694#issuecomment-1186949486 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64,)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float,)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose2d', + aten_name='conv_transpose2d', + aliases=('conv_transpose2d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose2d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose2d, + # Runs very slowly on slow-gradcheck for complex. + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-05, rtol=5e-05), }), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=8e-2, rtol=8e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # AssertionError: None mismatch: torch.complex64 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose3d', + aten_name='conv_transpose3d', + aliases=('conv_transpose3d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose3d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and( + torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose3d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=5e-2, rtol=5e-2), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }), + 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }), + 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', + active_if=TEST_CUDNN), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1e-4)}), + "TestMathBits", "test_conj_view", device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=9e-2, rtol=9e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-1), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "slow_conv3d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip('Skipped for ROCm!'), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=[torch.complex32], active_if=TEST_WITH_ROCM), + ), + supports_out=False,), + OpInfo('nn.functional.conv1d', + aliases=('conv1d',), + aten_name='conv1d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv1d, + error_inputs_func=error_inputs_conv1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing' + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv2d', + aliases=('conv2d',), + aten_name='conv2d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=partial(sample_inputs_conv2d), + error_inputs_func=error_inputs_conv2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv3d', + aliases=('conv3d',), + aten_name='conv3d', + dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv3d, + error_inputs_func=error_inputs_conv3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + # TF32 + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-3, rtol=1e-3)}), + 'TestCommon', 'test_noncontiguous_samples', + ), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=5e-5, rtol=5e-6)}), + 'TestMathBits', 'test_conj_view', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-6)}), + 'TestOperators', 'test_vjpvmap', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs) INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: Conv3D is not supported on MPS + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + # AssertionError: Tensor-likes are not close! + # break slow tests + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.group_norm', + aten_name='group_norm', + aliases=('group_norm',), + ref=reference_group_norm, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + error_inputs_func=error_inputs_group_norm, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)) + ], + sample_inputs_func=sample_inputs_group_norm, + reference_inputs_func=reference_inputs_group_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.instance_norm', + # no ref because instance_norm will often have numerical instability (large numbers or nan) + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', + active_if=TEST_WITH_ROCM) + ], + sample_inputs_func=sample_inputs_instance_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.layer_norm', + aten_name='layer_norm', + aten_backward_name='layer_norm_backward', + aliases=('layer_norm',), + ref=reference_layer_norm, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), + 'TestCommon', 'test_numpy_refs' + ), + DecorateInfo(unittest.skip("Bug in MPS backend!"), 'TestCommon', 'test_numpy_ref_mps'), + ], + sample_inputs_func=sample_inputs_layer_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.local_response_norm', + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ], + sample_inputs_func=sample_inputs_local_response_norm,), + OpInfo('constant_pad_nd', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_constant_pad_nd, + supports_out=False, + skips=( + # bool can't be passed to Scalar arguments in JIT tracer because + # BoolType is not a subtype of ScalarType. + DecorateInfo( + unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('nn.functional.pad', + variant_test_name='constant', + aten_name='constant_pad_nd', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='reflect', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate_negative', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_pad_replicate_negative, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Some negative padding cases cause a segfault on MPS + DecorateInfo(unittest.skip("Not fully supported on MPS"), 'TestConsistency'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='circular', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Difference from is larger with decomposition new_empty_strided.default than original on output 0 + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'), + ), + supports_out=False), + OpInfo('nn.functional.hardswish', + aten_name="hardswish", + aten_backward_name='hardswish_backward', + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardswish, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_nonfusible_nodes=["aten::hardswish"]), + OpInfo('nn.functional.unfold', + aten_name='im2col', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_unfold, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # NOTE: this failure may not reproduce consistently on different systems + # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 + DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest-exact', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.uint8), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest-exact'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: aten::_upsample_nearest_exact*d hit the vmap fallback which is currently disabled + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapjvpall_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapvjp_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + # NotImplementedError: The operator 'aten::_upsample_nearest_exact3d.out' is not currently implemented + # for the MPS device. + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='linear', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bilinear', + supports_fwgrad_bwgrad=True, + supports_autograd=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bicubic', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bicubic'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='trilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='area', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'area'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.upsample_bilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), + reference_inputs_func=partial(reference_inputs_upsample, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('_upsample_bilinear2d_aa', + op=torch.ops.aten._upsample_bilinear2d_aa, + aten_name='_upsample_bilinear2d_aa', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestEagerFusionOpInfo', 'test_aot_autograd_symbolic_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo( + "nn.functional.soft_margin_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + # doesn't support grad on target + sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), + error_inputs_func=error_inputs_soft_margin_loss, + ), + OpInfo('nn.functional.upsample_nearest', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo( + "nn.functional.margin_ranking_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_margin_ranking_loss, + error_inputs_func=error_inputs_margin_ranking_loss, + reference_inputs_func=reference_inputs_margin_ranking_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo( + "nn.functional.multi_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_multi_margin_loss, + reference_inputs_func=reference_inputs_multi_margin_loss, + error_inputs_func=error_inputs_multi_margin_loss, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + OpInfo( + "nn.functional.multilabel_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_multilabel_margin_loss, + reference_inputs_func=reference_inputs_multilabel_margin_loss, + error_inputs_func=error_inputs_multilabel_margin_loss, + ), + OpInfo('nn.functional.leaky_relu', + aliases=None, + aten_name="leaky_relu", + aten_backward_name='leaky_relu_backward', + sample_inputs_func=sample_inputs_leaky_relu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + inplace_variant=lambda x, negative_slope=0.01: + torch.nn.functional.leaky_relu(x, negative_slope, inplace=True), + supports_autograd=True, + assert_autodiffed=True, + supports_gradgrad=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::leaky_relu"]), + OpInfo( + "nn.functional.multilabel_soft_margin_loss", + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 + # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 + # leaked 4096 bytes CUDA memory on device 0 + DecorateInfo( + # Skip instead of expectedFailure because this fails + # locally for me but passes in CI. + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo('nn.functional.avg_pool2d', + aten_name='avg_pool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_avg_pool2d, + sample_inputs_func=sample_inputs_avgpool2d, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('nn.functional.fractional_max_pool2d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + test_neg_view=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_fractional_max_pool2d, + decorators=( + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.fractional_max_pool3d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + test_neg_view=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_fractional_max_pool3d, + decorators=( + # FIXME: both derivatives are implemented incorrectly + # https://github.com/pytorch/pytorch/issues/69322 + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.max_pool1d', + aten_name='max_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + # RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet. + # Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data() + # to actually allocate memory + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + ), + error_inputs_func=error_inputs_max_pool1d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_pool2d', + aten_name='max_pool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + assert_jit_shape_analysis=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_max_pool2d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('max_pool2d_with_indices_backward', + op=max_pool2d_backward, + # We've defined a custom op, so there's no corresponding aten op + aten_name=None, + method_variant=None, + inplace_variant=None, + operator_variant=None, + inplace_operator_variant=None, + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_max_pool, + skips=( + # We've defined a custom op here, and we don't handle the case where we receive an out kwarg + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # object has no attribute max_pool2d_with_indices_backward (It's not available on torch -- so expected) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit') + )), + OpInfo('nn.functional.max_pool3d', + aten_name='max_pool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + # TODO: investigate nondeterminism + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_max_pool3d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_unpool1d', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad', + device_type='cpu'), + )), + OpInfo('nn.functional.max_unpool1d', + variant_test_name='grad', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool2d', + aten_name='max_unpool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool2d', + variant_test_name='grad', + aten_name='max_unpool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_grad=False, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool3d', + aten_name='max_unpool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool3d', + variant_test_name='grad', + aten_name='max_unpool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.linear', + aten_name='linear', + supports_autograd=True, + supports_gradgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_linear, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # linear calls mm under the hood which is nondeterministic on CUDA + # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_expanded_weight=True, + decorators=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('nn.functional.bilinear', + aten_name='bilinear', + supports_autograd=True, + sample_inputs_func=sample_inputs_bilinear, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), + decorators=( + DecorateInfo(toleranceOverride({torch.float16: tol(atol=5e-05, rtol=1e-03)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.glu', + aten_name='glu', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + sample_inputs_func=sample_inputs_glu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + UnaryUfuncInfo( + 'nn.functional.elu', + aten_backward_name='elu_backward', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.elu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + # Marked as a Unary function because it has some rather odd broadcasting semantics in its + # second argument + UnaryUfuncInfo( + 'nn.functional.prelu', + aten_backward_name='_prelu_kernel_backward', + ref=lambda x, weight: + np.maximum(0., x) + np.minimum(0., x) * + (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + # test_reference_numerics only tests the case when the weight tensor is a scalar + sample_kwargs=sample_kwargs_prelu_scalar_weight, + error_inputs_func=error_inputs_prelu, + sample_inputs_func=sample_inputs_prelu, + reference_inputs_func=reference_inputs_prelu, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + # https://github.com/pytorch/pytorch/issues/68752 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], + ), + UnaryUfuncInfo( + 'nn.functional.celu', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.celu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + UnaryUfuncInfo( + 'nn.functional.rrelu', + aten_backward_name='rrelu_with_noise_backward', + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_kwargs=lambda device, dtype, input: + (dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs=dict(lower=0., upper=1., training=True)), + error_inputs_func=error_inputs_rrelu, + decorators=( + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # In-place operations do not play well with forward AD + # https://github.com/pytorch/pytorch/issues/77447 + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', + 'test_inplace_forward_mode_AD'), + # The noise vector that's generated in these tests is not the same elementwise + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + UnaryUfuncInfo( + 'nn.functional.selu', + ref=lambda x, inplace=False: + 1.0507009873554804934193349852946 * ( + np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) + ), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, # depends on 'elu' + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + OpInfo( + 'torch._scaled_mm', + sample_inputs_func=sample_inputs_scaled_mm, + dtypes=empty_types(), + dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,), + supports_out=True, + supports_forward_ad=False, + supports_autograd=False, + decorators=[skipCUDAIf(not SM90OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 9.0')], + skips=( + # Sample inputs isn't really parametrized on dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda'), + # "mul_cuda" not implemented for float8_e4m3fn + # https://github.com/pytorch/pytorch/issues/107256 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.float8_e4m3fn,)), + ) + ), + OpInfo( + 'nn.functional.scaled_dot_product_attention', + op=lambda *args, **kwargs: + wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs), + sample_inputs_func=sample_inputs_scaled_dot_product_attention, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=False, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[DecorateInfo(toleranceOverride( + {torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ], + skips=( + # When attn mask is a composite tensor this fails backward by returning a none + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + # This is only failing on Linux Bionic 3.10 Cuda 11.6 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda', active_if=_get_torch_cuda_version() >= (11, 6)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', + dtypes=(torch.float32,)), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Forward works for dtype=float64 which is the math path + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # Not implemented for Forward AD + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + device_type='cpu'), + # Not implemented for backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad', + device_type='cpu'), + # CPU and CUDA have inconsistencies for intermediate outputs + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cpu'), + # TODO: Do not work even on MI200 because of stride mismatching. + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda', dtypes=[torch.float16, torch.bfloat16], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda', dtypes=[torch.float16, torch.bfloat16], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_amp', + device_type='cuda', active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + # When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', + device_type='cpu'), + # OpInfo was implemented with a lambda + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO Need to understand what this is testing and why it doesn't work + DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'), + # TODO skip this for now since we can't skip on runtime arch support + DecorateInfo(unittest.skip('This is '), 'TestInductorOpInfo', 'test_comprehensive'), + # skip for sm < 80 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + # registered in fake_impls.py instead of _meta_registrations.py, so meta kernels will fail. + # However, for implementations that fall back to the constituent ops, the meta kernels may not + # fail. Fused kernels will fail, whereas unfused kernels will not fail. + # All fused kernels support bf16 and fp16 - so if fused attention is supported, the test will fail. + # mem_eff_attention also supports fp32 - so if it is supported the test will fail. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.bfloat16, torch.float16), active_if=PLATFORM_SUPPORTS_FUSED_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bfloat16, torch.float16), active_if=PLATFORM_SUPPORTS_FUSED_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.bfloat16, torch.float16,), active_if=PLATFORM_SUPPORTS_FUSED_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_MEM_EFF_ATTENTION),), + ), + OpInfo( + 'torch.ops.aten._flash_attention_forward', + sample_inputs_func=sample_inputs_flash_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16) + if not SM80OrLater + else custom_types(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + decorators=[skipCUDAIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "This platform doesn't support Flash Attention")], + skips=( + # Device mismatch due to philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake', device_type='cuda'), + # meta implementation is in fake_impls.py instead of being a meta registration + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + # Checking the scalar value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + # TODO: Do not work on MI200 because of stride mismatching. + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda', dtypes=[torch.float16, torch.bfloat16], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda', dtypes=[torch.float16, torch.bfloat16], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + ) + ), + OpInfo( + 'torch.ops.aten._efficient_attention_forward', + sample_inputs_func=sample_inputs_efficient_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16, torch.float32) + if not SM80OrLater + else custom_types(torch.float16, torch.float32, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[skipCUDAIf(TEST_WITH_ROCM, "ROCm doesn't support efficient attention")], + skips=( + # Device mismatch due to philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake', device_type='cuda'), + # meta implementation is in fake_impls.py instead of being a meta registration + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + # Checking the scaler value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + UnaryUfuncInfo( + 'nn.functional.silu', + aten_backward_name='silu_backward', + ref=lambda x, inplace=False: x / (1 + np.exp(-x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_autograd=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,), device_type='cpu'), + ), + autodiff_nonfusible_nodes=["aten::silu"], + ), + # TODO: combine this with the nn.functional.silu OpInfo when + # complex autodiff for silu is supported or when + # the forward bug is fixed + # Note: silu errors when given inputs that require grad + # but it doesn't support grad in their dtype + # This is why the dtypes list above passes test_dtypes, + # because it's getting lucky and failing in forward + # because test_dtypes sets requires_grad to True + # THIS IS A BUG + UnaryUfuncInfo( + 'nn.functional.silu', + variant_test_name='complex', + ref=lambda x, inplace=False: + x / (1 + np.exp(-x)), + dtypes=complex_types(), + dtypesIfCUDA=complex_types(), + supports_forward_ad=False, + supports_autograd=False, + assert_autodiffed=False, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,)), + # FIXME: intentionally misreports dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.complex64, torch.cdouble)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.complex64,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.complex64,)))), + UnaryUfuncInfo( + 'nn.functional.hardsigmoid', + aten_backward_name='hardsigmoid_backward', + ref=reference_hardsigmoid, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=False, + supports_forward_ad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], + skips=[ + # still want to test that first derivative works though second derivative isn't supported + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', "test_inplace_gradgrad"), + # produces 0 instead of nan on ROCM + DecorateInfo(unittest.expectedFailure, + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + device_type='cuda', + active_if=(TEST_WITH_ROCM)), ] + ), + UnaryUfuncInfo( + 'nn.functional.logsigmoid', + aten_name="log_sigmoid", + aten_backward_name='log_sigmoid_backward', + ref=reference_logsigmoid, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_autograd=True, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + # autodiff_nonfusible_nodes=["aten::log_sigmoid"], + decorators=[ + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + ], + skips=( + # Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), + ), + ), + UnaryUfuncInfo( + 'nn.functional.mish', + aten_backward_name='mish_backward', + ref=lambda x: x * np.tanh(reference_softplus(x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.mish, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ], + ), + UnaryUfuncInfo( + 'nn.functional.softsign', + ref=lambda x: x / (np.abs(x) + 1), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.int, torch.int8)),), + ), + UnaryUfuncInfo( + 'nn.functional.tanhshrink', + ref=lambda x: x - np.tanh(x), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=6e-04, rtol=1e-05), + torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + # tan(j * pi/2 * odd_number) is nan which also make tanhshrink nan. + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0) + ), + UnaryUfuncInfo( + 'nn.functional.threshold', + ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype), + dtypes=all_types_and(torch.half, torch.bfloat16), + inplace_variant=lambda x, threshold, value: + torch.nn.functional.threshold(x, threshold, value, inplace=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}, + {'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}), + # TODO(whc) should not need sample_inputs_func, but without it + # kwargs aren't being hooked up properly + sample_inputs_func=sample_inputs_threshold, + ), + OpInfo( + "nn.functional.triplet_margin_loss", + sample_inputs_func=sample_inputs_triplet_margin_loss, + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "nn.functional.triplet_margin_with_distance_loss", + sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # This test cannot handle a callable passed to `distance_function`. If we would use + # `distance_function=None`, the test would pass fine. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + BinaryUfuncInfo('nextafter', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False), + OpInfo( + "to", + op=lambda x, *args, **kwargs: x.to(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_to, + skips=( + # RuntimeError: undefined value cpu + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + # NotImplementedError: Cannot copy out of meta tensor; no data! + DecorateInfo( + unittest.skip("Skipped!"), + "TestMeta", + "test_meta_outplace", + ), + # https://github.com/pytorch/pytorch/issues/84335 + DecorateInfo( + unittest.skip("Skipped!"), + "TestProxyTensorOpInfo", + "test_make_fx_symbolic_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + OpInfo('topk', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_topk), + # Multiple variants for batch_norm to test with and without cuDNN disabled + # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details + OpInfo('nn.functional.batch_norm', + aten_name='batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_batch_norm, + skips=( + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.bfloat16, torch.float16)), + # Trying to use forward AD with miopen_batch_norm that does not support it + # because it has not been implemented yet. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', + device_type="cuda", active_if=TEST_WITH_ROCM), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-05, rtol=1e-05)}), + 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), + )), + # This variant tests batch_norm with cuDNN disabled only on CUDA devices + OpInfo('nn.functional.batch_norm', + variant_test_name='without_cudnn', + aten_name='batch_norm', + dtypes=empty_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[onlyCUDA, disablecuDNN], + skips=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-04)}), + 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_batch_norm), + OpInfo( + "nn.functional.binary_cross_entropy", + aten_backward_name='binary_cross_entropy_backward', + sample_inputs_func=sample_inputs_binary_cross_entropy, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + gradcheck_fast_mode=False, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestCudaFuserOpInfo", + ), + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + ), + # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120783 + DecorateInfo( + unittest.skip("Skipped!"), + "TestCompositeCompliance", + "test_cow_input", + device_type='cuda', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + ), + # RuntimeError: output with shape [] doesn't match the broadcast shape [5, 5] + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + ), + skips=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the + # standard entry, second is to run gradcheck tests on the second argument. + BinaryUfuncInfo('igamma', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammainc',), + dtypesIfCUDA=floating_types(), + # TODO: FIXME + supports_rhs_python_scalar=False, + supports_autograd=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implemented grad for both inputs + # BinaryUfuncInfo('igamma', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments. + # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # skips=( + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"),"), + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + BinaryUfuncInfo('igammac', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammaincc',), + dtypesIfCUDA=floating_types(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implementing grad for both inputs + # BinaryUfuncInfo('igammac', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments + # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"), + # ], + # skips=( + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + UnaryUfuncInfo('nn.functional.softshrink', + aten_name="softshrink", + aten_backward_name='softshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_softshrink, + error_inputs_func=error_inputs_softshrink), + UnaryUfuncInfo('nn.functional.hardshrink', + aten_name="hardshrink", + aten_backward_name='hardshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardshrink, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardshrink"]), + UnaryUfuncInfo('nn.functional.hardtanh', + aten_name="hardtanh", + aten_backward_name='hardtanh_backward', + dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.half, torch.bfloat16), + backward_dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardtanh, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardtanh"]), + OpInfo('nn.functional.gelu', + aten_name="gelu", + aten_backward_name='gelu_backward', + ref=reference_gelu if TEST_SCIPY else None, + error_inputs_func=error_inputs_gelu, + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_gelu, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + autodiff_nonfusible_nodes=["aten::gelu"], + skips=( + # AssertionError: Tensor-likes are not close! + # May not replicate in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + UnaryUfuncInfo('nn.functional.relu6', + aten_name="relu6", + dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypes=floating_types_and(torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::relu6"]), + OpInfo('mm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('mode', + op=torch.mode, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Resized a non-empty tensor but did not warn about it + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + ), + sample_inputs_func=sample_inputs_mode,), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_1', + domain=(1, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_3', + domain=(2, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_5', + domain=(3, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), + BinaryUfuncInfo('ne', + ref=np.not_equal, + aliases=('not_equal',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('narrow', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=True), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=True), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=False), + skips=( + # Use of .item() + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo('narrow_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + supports_autograd=False, + # https://github.com/pytorch/pytorch/issues/86931 + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=False), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=False), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=False), + skips=( + # https://github.com/pytorch/pytorch/issues/84577 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Lazy tensor failures: mutating and aliasing ops should all have codegen'd kernels + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Could not run 'aten::narrow_copy.out' with arguments from the 'CUDA' backend + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('view_copy', + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + ref=lambda x, newshape: np.reshape(x, newshape).copy(), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + sample_inputs_func=sample_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape), + UnaryUfuncInfo('neg', + aliases=('negative', ), + ref=np.negative, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + error_inputs_func=error_inputs_neg, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('dist', + op=torch.dist, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_dist), + OpInfo('outer', + op=torch.outer, + aliases=('ger', ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_outer,), + OpInfo('ormqr', + op=torch.ormqr, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_ormqr, + error_inputs_func=error_inputs_ormqr, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('permute', + ref=np.transpose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_varargs=True, + sample_inputs_func=sample_inputs_permute, + reference_inputs_func=reference_inputs_permute), + BinaryUfuncInfo('pow', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + ref=np.power, + # Due to AVX2 currently not being fully supported for Float16, log_vml_cpu can't be enabled + # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently + # unsupported on CPU. + backward_dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + # FIXME Complex values error with: Greatest absolute difference: nan at index + # Ref: https://github.com/pytorch/pytorch/issues/76853 + # For `chalf`, reference computation in `numpy` is computed in `cfloat`. + # Output of `chalf` saturates to `inf` quicker than reference due to its small range + # which leads to failure of this test. + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + )), + BinaryUfuncInfo('float_power', + ref=np.float_power, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # FIXME + # AssertionError: Object comparison failed: torch.float64 != torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # -3.43399e+38 is outside the range of representable values of type 'float' + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + # Inplace always promotes to double and thus other floating dtypes are not supported + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bfloat16, torch.float16, torch.float32]), + )), + OpInfo('qr', + op=torch.qr, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]), + UnaryUfuncInfo('rad2deg', + ref=np.degrees, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + UnaryUfuncInfo('real', + ref=np.real, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo( + "roll", + ref=np.roll, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + error_inputs_func=error_inputs_roll, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_roll, + decorators=(onlyNativeDeviceTypes,), + ), + OpInfo( + "rot90", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + error_inputs_func=error_inputs_rot90, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_rot90, + ), + # To test reference numerics against multiple values of argument `decimals`, + # we make multiple OpInfo entries with each entry corresponding to different value of decimals. + UnaryUfuncInfo('round', + ref=np.round, + aliases=('special.round',), + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + ), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_0', + aliases=('special.round',), + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_neg_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('sin', + ref=np.sin, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), + UnaryUfuncInfo('sinc', + ref=np_sinc_with_fp16_as_fp32, + aliases=('special.sinc',), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('sinh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('sign', + ref=reference_sign, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + )), + UnaryUfuncInfo('sgn', + ref=reference_sgn, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + OpInfo('split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True), + OpInfo('split', + # Cannot declare this aten_name because of + # test_variant_consistency_jit_split_list_args_cpu_float32 + decomp_aten_name='split_with_sizes', + variant_test_name='list_args', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=partial(sample_inputs_split, list_args=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + # `unsafe_split` supports only `int` for split_size argument + OpInfo('unsafe_split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True, + check_batched_forward_grad=False), + OpInfo('split_with_sizes', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + OpInfo('split_with_sizes_copy', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # No error raised + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_requires_grad_error"), + )), + BinaryUfuncInfo('__radd__', + op=torch.Tensor.__radd__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::add'],), + BinaryUfuncInfo('__rdiv__', + op=torch.Tensor.__rdiv__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + promotes_int_to_float=True, + lhs_make_tensor_kwargs={'exclude_zero': True}, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + skips=( + # https://github.com/pytorch/pytorch/issues/76806 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), + BinaryUfuncInfo('__rmul__', + op=torch.Tensor.__rmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::mul'],), + BinaryUfuncInfo('__rand__', + op=torch.Tensor.__rand__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__ror__', + op=torch.Tensor.__ror__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__rxor__', + op=torch.Tensor.__rxor__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('__rmatmul__', + op=torch.Tensor.__rmatmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=True), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1e-05)}), + "TestDecomp", "test_comprehensive", device_type="cuda", + active_if=TEST_WITH_ROCM), + ), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + BinaryUfuncInfo('__rmod__', + op=torch.Tensor.__rmod__, + dtypes=floating_types_and(torch.bfloat16, torch.half,), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + # Support autograd after torch.remainder(Tensor, Tensor) supports + # autograd of the second argument. + # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 + # supports_autograd=False, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::remainder'],), + BinaryUfuncInfo('__rpow__', + op=torch.Tensor.__rpow__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + # Reference: https://github.com/pytorch/pytorch/issues/54774 + # "log2" "_vml_cpu" not implemented for Half + backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # TODO: FIXME tolerance is too high + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients'), + DecorateInfo(unittest.skip('Skipped!'), 'TestBwdGradients'), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::pow'],), + BinaryUfuncInfo('__rsub__', + op=torch.Tensor.__rsub__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::rsub'],), + BinaryUfuncInfo('rsub', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_inplace_autograd=False, + assert_autodiffed=None, + sample_inputs_func=sample_inputs_add_sub), + OpInfo('select', + aten_backward_name='select_backward', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_select, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('select_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_select_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('slice', + op=torch.ops.aten.slice.Tensor, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_slice, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_scripting=False, + supports_inplace_autograd=False, + supports_out=False), + OpInfo('slice_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_slice_scatter, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: Internal error: pybind11::error_already_set called while + # Python error indicator not set. + # TODO: Investigate this more + DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_out'), + ), + supports_out=True), + UnaryUfuncInfo('signbit', + ref=np.signbit, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False,), + UnaryUfuncInfo('tan', + ref=np.tan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), + UnaryUfuncInfo('tanh', + ref=np.tanh, + aten_backward_name='tanh_backward', + aliases=('nn.functional.tanh',), + decorators=(precisionOverride({torch.bfloat16: 1e-2}), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(j * pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + OpInfo('tensor_split', + ref=np.array_split, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + ), + sample_inputs_func=sample_inputs_tensor_split,), + OpInfo('hsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_hsplit, + error_inputs_func=error_inputs_hsplit,), + OpInfo('vsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_vsplit, + error_inputs_func=error_inputs_vsplit,), + OpInfo('dsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_dsplit, + error_inputs_func=error_inputs_dsplit,), + OpInfo('triangular_solve', + op=torch.triangular_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_legacy_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # Gradcheck fails + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=floating_and_complex_types()), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + UnaryUfuncInfo('trunc', + aliases=('fix', ), + ref=np.trunc, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + UnaryUfuncInfo('exp2', + aliases=('special.exp2', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('expm1', + aliases=('special.expm1', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + assert_autodiffed=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('nan_to_num', + ref=np.nan_to_num, + dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # Passing numpy_kwargs via sample_kwargs, as numpy does comparison + # with BFloat16 in float, since it currently doesn't support BFloat16. + # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 + sample_kwargs=lambda device, dtype, input: ({}, + {'posinf': torch.finfo(torch.bfloat16).max, + 'neginf': torch.finfo(torch.bfloat16).min}) + if dtype is torch.bfloat16 else ({}, {})), + UnaryUfuncInfo('reciprocal', + ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('rsqrt', + ref=lambda x: np.reciprocal(np.sqrt(x)), + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.half: 5e-2}),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + )), + UnaryUfuncInfo('sqrt', + ref=np.sqrt, + supports_sparse=True, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('square', + ref=np.square, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + # >>> t = torch.tensor(complex(-0.01, float("inf"))) + # >>> np.square(t.numpy()) + # (-inf-infj) + # >>> t.square() + # tensor(-inf-infj) + # >>> t.cuda().square() + # tensor(inf+nanj, device='cuda:0') + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=[torch.bool]), + ),), + OpInfo('lerp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_and_complex_types_and(torch.chalf, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_lerp, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('angle', + ref=np.angle, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_complex_to_float=True, + skips=( + # Ref: https://github.com/pytorch/pytorch/issues/78413 + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),), + )), + UnaryUfuncInfo('isfinite', + ref=np.isfinite, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isinf', + ref=np.isinf, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isposinf', + ref=np.isposinf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isneginf', + ref=np.isneginf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isreal', + ref=np.isreal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isnan', + ref=np.isnan, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + OpInfo('einsum', + # we need this lambda because SampleInput expects tensor input as the first argument + # TODO(@heitorschueroff) update SampleInput to handle such cases + op=lambda tensors, equation: torch.einsum(equation, tensors), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + sample_inputs_func=sample_inputs_einsum, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # test does not work with passing lambda for op + # there's a test `test_einsum` in `test_jit.py` to handle this case + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('svd', + op=torch.svd, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_svd, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('svd_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_svd_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cuda')], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(slowTest, 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('pca_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_pca_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cuda')], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('polar', + dtypes=floating_types(), + # this function is undefined if 'abs' values are <0 + supports_forward_ad=True, + lhs_make_tensor_kwargs=dict(low=0), + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 + # Numerical: + # tensor([[0.]], dtype=torch.float64) + # Analytical: + # tensor([[-0.0047]], dtype=torch.float64, grad_fn=) + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + )), + # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. + # To test reference numerics against multiple values of argument `n`, + # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). + # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_0', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), + safe_val=1)), + *(UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name=f'polygamma_n_{n_}', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + decorators=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-3)}), 'TestUnaryUfuncs'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e1, rtol=1e-1), + torch.float32: tol(atol=1e-4, rtol=1e-2)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal', + active_if=IS_WINDOWS), + ), + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + sample_kwargs=lambda device, dtype, input: ({'n': n_}, {'n': n_}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), + safe_val=1)) + for n_ in (1, 2, 3, 4)), + OpInfo('ravel', + ref=np.ravel, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_ravel, + ), + OpInfo('unravel_index', + ref=np.unravel_index, + dtypes=integral_types_and(), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_unravel_index, + ), + OpInfo('reshape', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('reshape_as', + op=lambda x, other: x.reshape_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('view', + op=lambda x, shape: x.view(shape), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('view_as', + op=lambda x, other: x.view_as(other), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides") + )), + OpInfo('atleast_1d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_atleast1d2d3d, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + ), + OpInfo('atleast_2d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('atleast_3d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('flatten', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_flatten, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_flatten, + reference_inputs_func=reference_inputs_flatten, + ), + OpInfo('unflatten', + op=torch.unflatten, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_unflatten, + ), + OpInfo('column_stack', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),), + sample_inputs_func=sample_inputs_column_stack,), + OpInfo('pinverse', + op=torch.pinverse, + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_invertible, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('gather', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_gather, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_gather, + ), + OpInfo('index_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp'), + ), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True)), + OpInfo('index_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_select', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_select, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_add', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_add, + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_reduce', + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_out=True, + sample_inputs_func=sample_inputs_index_reduce), + OpInfo('__getitem__', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + supports_scripting=False, + op=torch.Tensor.__getitem__, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), + sample_inputs_func=sample_inputs_getitem), + OpInfo('index_put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + test_neg_view=False, + sample_inputs_func=sample_inputs_index_put, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.skip("Skipped"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.float64], + device_type='cuda', active_if=(TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR)), + )), + OpInfo('sort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + )), + OpInfo('unique', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + sample_inputs_func=sample_inputs_unique, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Output order is undefined when sorted=False'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('unique_consecutive', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + sample_inputs_func=sample_inputs_unique_consecutive, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, # vmap complains of the sizes + sample_inputs_func=sample_inputs_put), + OpInfo('take', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + check_batched_grad=False, # vmap complains of the sizes + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_take, + error_inputs_func=error_inputs_take), + OpInfo('scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter, + error_inputs_func=error_inputs_scatter_and_scatter_add), + UnaryUfuncInfo( + 'bfloat16', + op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'bool', + op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attributis not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'byte', + op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'char', + op=lambda x, *args, **kwargs: x.char(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'double', + op=lambda x, *args, **kwargs: x.double(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'float', + op=lambda x, *args, **kwargs: x.float(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'half', + op=lambda x, *args, **kwargs: x.half(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=True, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'int', + op=lambda x, *args, **kwargs: x.int(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'long', + op=lambda x, *args, **kwargs: x.long(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'short', + op=lambda x, *args, **kwargs: x.short(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'cdouble', + op=torch.Tensor.cdouble, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'cfloat', + op=torch.Tensor.cfloat, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'chalf', + op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + # use of lambda doesn't work with test_normalize_operator_exhaustive + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', + device_type='cpu'), + # TypeError: 'int' object is not iterable + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + OpInfo('empty_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + reference_inputs_func=reference_inputs_like_fns, + supports_autograd=False, + skips=( + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), + "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('zeros_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + )), + OpInfo('ones_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + )), + OpInfo('randn', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda *args, **kwargs: wrapper_set_seed(torch.randn, *args, **kwargs), + supports_out=True, + sample_inputs_func=sample_inputs_randn, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randn generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + # randn fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('randn_like', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('rand_like', + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('randint', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda *args, **kwargs: + wrapper_set_seed(torch.randint, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randint generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # randint fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_multiple_devices', + dtypes=[torch.float32, torch.int64], active_if=TEST_WITH_ROCM), + )), + OpInfo('randint_like', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint_like, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('full_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_full_like, + supports_autograd=False, + skips=( + )), + OpInfo('new_zeros', + op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('new_ones', + op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('ones', + op=torch.ones, + supports_autograd=False, + supports_varargs=True, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('zeros', + op=torch.zeros, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('full', + op=torch.full, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_full, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # RuntimeError: UNSUPPORTED DTYPE: bool + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('new_empty', + op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + supports_autograd=False), + OpInfo('new_empty_strided', + op=lambda x, *args, **kwargs: x.new_empty_strided(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=partial(sample_inputs_new_fns, is_strided=True), + supports_autograd=False, + skips=( + # FX failed to normalize op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Lazy tensor failures + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('empty_strided', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.empty_strided, inp, *args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.half), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_empty_strided, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), + # Lazy tensor failures + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), + # RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single + # memory location. Please clone() the tensor before performing the operation. + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('empty', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('eye', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_eye, + error_inputs_func=error_inputs_eye, + supports_out=True, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('empty_permuted', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty_permuted, + error_inputs_func=error_inputs_empty_permuted, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('scalar_tensor', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_scalar_tensor, + supports_autograd=False, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('new_full', + op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_full, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('multinomial', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_multinomial, + error_inputs_func=error_inputs_multinomial, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Strides are not the same! + # This may not be reproducible in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_autograd=False), + OpInfo('normal', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.normal, inp, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_first, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # RuntimeError: Difference from {dtype} is larger with decomposition + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # The inplace variant (Tensor.normal_) is different from torch.normal + # inplace varaint Tensor.normal_ is decomposed using randn_like() + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'))), + OpInfo('normal', + # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here + variant_test_name='number_mean', + op=lambda std, mean, *args, **kwargs: + wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_second, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestEagerFusionOpInfo'), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # AssertionError in CUDA variant + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDeviceUtils', 'test_device_mode_ops'))), + OpInfo('bernoulli', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), + # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli + inplace_variant=None, + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_bernoulli, + error_inputs_func=error_inputs_bernoulli, + skips=( + # vmap: We do not yet support calling random operations inside of vmap + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Expected RuntimeError when doing an unsafe cast from a result of + # dtype torch.float32 into an out= with dtype torch.lon + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + OpInfo('scatter_add', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_add, + error_inputs_func=error_inputs_scatter_and_scatter_add, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('stack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_stack, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # https://github.com/pytorch/pytorch/issues/77046 + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ), + ), + OpInfo('_chunk_cat', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_chunk_cat, + error_inputs_func=error_inputs_chunk_cat, + supports_autograd=False, + supports_out=True, + ), + OpInfo('hstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + BinaryUfuncInfo('hypot', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False), + OpInfo('histogram', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU + sample_inputs_func=sample_inputs_histogram, + supports_autograd=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Not Implemented on XLA. + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), + )), + OpInfo('histogramdd', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU + sample_inputs_func=sample_inputs_histogramdd, + error_inputs_func=error_inputs_histogramdd, + supports_autograd=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + # Not implemented on CUDA + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('histc', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), + sample_inputs_func=sample_inputs_histc, + supports_out=True, + supports_autograd=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor + # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast + # from a result of dtype torch.float32 into an out= with dtype torch.long" + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('bincount', + dtypes=integral_types_and(), + sample_inputs_func=sample_inputs_bincount, + supports_out=False, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('bucketize', + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_bucketize, + reference_inputs_func=reference_inputs_bucketize, + error_inputs_func=error_inputs_bucketize, + supports_autograd=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('searchsorted', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_searchsorted, + supports_autograd=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + ref=reference_searchsorted, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cat', + ref=_cat_np, + aliases=('concat', 'concatenate'), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + sample_inputs_func=sample_inputs_cat_concat, + reference_inputs_func=reference_inputs_cat, + error_inputs_func=error_inputs_cat, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + assert_autodiffed=True, + skips=( + # https://github.com/pytorch/pytorch/issues/89353 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: Arguments for call not valid. + # Expected a value of type 'List[Tensor]' for argument + # 'tensors' but instead found type 'Tensor (inferred)'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # see https://github.com/pytorch/pytorch/issues/99806 + # RuntimeError: The size of tensor a (25) must match the size of tensor b (0) at non-singleton dimension 0. + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + )), + OpInfo('unbind', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + ref=reference_unbind, + sample_inputs_func=sample_inputs_unbind, + error_inputs_func=error_inputs_unbind, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + supports_out=False, + ), + OpInfo('vstack', + aliases=('row_stack',), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: _fn() Expected a value of type + # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), + OpInfo('dstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo('unfold', + op=lambda x, *args: x.unfold(*args), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ), + sample_inputs_func=sample_inputs_unfold), + OpInfo('unfold_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_unfold), + OpInfo('msort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_msort, + skips=( + )), + OpInfo('movedim', + aliases=('moveaxis',), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_movedim_moveaxis, + reference_inputs_func=reference_movedim_moveaxis, + error_inputs_func=error_movedim_moveaxis), + OpInfo('renorm', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_renorm, + error_inputs_func=error_inputs_renorm, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: Difference from float64 is larger with decomposition + # linalg_vector_norm.default than original on output 0. + # Original max diff: 2.560596747969157e-07, + # Decomp max diff: 1.8187482915266173e-06 + DecorateInfo(unittest.skip("Inconsistent accuracy"), 'TestDecomp', 'test_comprehensive', + device_type='cpu', dtypes=(torch.float16,)), + )), + ShapeFuncInfo('repeat', + op=lambda x, dims: x.repeat(dims), + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('squeeze', + ref=_squeeze_ref, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze), + OpInfo('squeeze', + ref=_squeeze_ref, + variant_test_name="multiple", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze_multiple), + UnaryUfuncInfo( + 'fill', + ref=_fill_np, + method_variant=None, + sample_kwargs=_fill_sample_kwargs, + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + skips=( + # JIT has issue when op is passed as lambda + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), + )), + OpInfo('resize_', + op=lambda x, shape: x.clone().resize_(shape), + method_variant=None, + inplace_variant=torch.Tensor.resize_, + # the test fails because resize_ doesn't work with imag views as expected by the test + # https://github.com/pytorch/pytorch/issues/65945 + test_neg_view=False, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('resize_as_', + op=lambda x, other: torch.resize_as_(x.clone(), other), + method_variant=None, + inplace_variant=torch.Tensor.resize_as_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('take_along_dim', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_take_along_dim, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + ShapeFuncInfo('tile', + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile), + OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_trapezoid), + OpInfo('trapezoid', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_trapezoid), + OpInfo('cumulative_trapezoid', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_cumulative_trapezoid,), + OpInfo('unsqueeze', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + sample_inputs_func=sample_unsqueeze), + BinaryUfuncInfo('xlogy', + aliases=('special.xlogy',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test 0 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=0.01)), + OpInfo('zero_', + op=lambda x: torch.zero_(x.clone()), + method_variant=None, + inplace_variant=torch.Tensor.zero_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_zero_), + OpInfo('logsumexp', + aliases=('special.logsumexp',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_fast_mode=False, + sample_inputs_func=sample_inputs_logsumexp, + reference_inputs_func=reference_inputs_logsumexp), + OpInfo('trace', + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + error_inputs_func=error_inputs_trace, + supports_inplace_autograd=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_trace), + OpInfo('transpose', + ref=_numpy_ref_transpose, + aliases=('swapdims', 'swapaxes'), + assert_jit_shape_analysis=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_transpose_swapdims), + OpInfo('T', + op=lambda x: x.T, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T, + error_inputs_func=error_inputs_T), + OpInfo('H', + op=lambda x: x.H, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T), + OpInfo('mT', + op=lambda x: x.mT, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('mH', + op=lambda x: x.mH, + aliases=('adjoint',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('tril', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.triu_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('tril_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.tril_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('kron', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kron, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('inner', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_inner, + ), + OpInfo('tensordot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_tensordot, + skips=( + # Skip operator schema test because this is a functional and not an operator. + # Reference: https://github.com/pytorch/pytorch/issues/54574 + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ) + ), + OpInfo('to_sparse', + op=lambda x, *args: x.to_sparse(*args), + sample_inputs_func=sample_inputs_to_sparse, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + backward_dtypes=floating_types(), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_sparse_csr=True, + supports_sparse_csc=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_cow_input_no_materialize=False, + skips=( + # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend + DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), + # TODO: FIXME: complex inputs requiring grad error in forward + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Allowed exception: sparse tensors don't have strides + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'), + # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. + DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), + 'TestSparseCSR', 'test_sparse_csr_consistency'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ) + ), + OpInfo('logcumsumexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), + # RuntimeError: "max_values_cpu" not implemented for 'ComplexDouble' + # Falling back to non-numerically stablized exp, causing nan in the results. + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.complex128]), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), + ), + sample_inputs_func=sample_inputs_logcumsumexp, + error_inputs_func=error_inputs_logcumsumexp), + UnaryUfuncInfo('sigmoid', + aliases=('special.expit', 'nn.functional.sigmoid'), + aten_backward_name='sigmoid_backward', + ref=reference_sigmoid if TEST_SCIPY else None, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble])), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + assert_autodiffed=True, + # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 1j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + UnaryUfuncInfo('digamma', + ref=scipy.special.digamma if TEST_SCIPY else None, + aliases=('special.psi', 'special.digamma',), + decorators=(precisionOverride({torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erf', + ref=scipy.special.erf if TEST_SCIPY else None, + aliases=('special.erf', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + + ), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfc', + ref=scipy.special.erfc if TEST_SCIPY else None, + aliases=('special.erfc', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfinv', + ref=scipy.special.erfinv if TEST_SCIPY else None, + aliases=('special.erfinv', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + domain=(-1, 1), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + )), + OpInfo("nn.functional.smooth_l1_loss", + ref=reference_smooth_l1_loss, + sample_inputs_func=sample_inputs_smooth_l1_loss, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), + OpInfo( + "nn.functional.l1_loss", + ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), + sample_inputs_func=sample_inputs_l1_loss, + error_inputs_func=error_inputs_l1_loss, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + UnaryUfuncInfo('lgamma', + ref=reference_lgamma if TEST_SCIPY else None, + aliases=('special.gammaln', ), + decorators=(precisionOverride({torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + # lgamma have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + OpInfo( + 'logdet', + dtypes=floating_and_complex_types(), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + # `log_softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + OpInfo( + 'log_softmax', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + aten_backward_name='_log_softmax_backward_data', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + assert_autodiffed=True), + OpInfo( + 'log_softmax', + variant_test_name='with_dtype', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('logit', + aten_backward_name='logit_backward', + ref=scipy.special.logit if TEST_SCIPY else None, + domain=(0, 1), + aliases=('special.logit', ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_logit), + OpInfo('where', + # Currently only the `input` is tested in gradcheck. + # If we pass `condition` first, none of the input which supports + # autograd will be tested. Hence the following lambda. + op=lambda self, condition, other, **kwargs: torch.where(condition, self, other, **kwargs), + ref=lambda self, condition, other: np.where(condition, self, other), + sample_inputs_func=sample_inputs_where, + reference_inputs_func=reference_inputs_where, + error_inputs_func=error_inputs_where, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + ), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), + OpInfo('nonzero', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # nonzero(): argument 'out' must be Tensor, not tuple + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67458 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # nonzero is not raising a warning when the out is resized + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Can't find schemas for this operator for some reason + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('nonzero_static', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero_static, + supports_out=False, + supports_autograd=False, + decorators=[onlyCPU], + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + # Following tests are for jiterator's python interface + # Jiterator can be used to author elementwise CUDA kernel + # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op + # See create_jit_fn in jiterator.py for more information + UnaryUfuncInfo( + 'jiterator_unary', + op=torch.cuda.jiterator._create_jit_fn("template T unary(T x) { return x * x + x; }"), + ref=lambda x: x * x + x, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[ + onlyCUDA, + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_hard'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + ], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=[torch.bool]), + # ROCm generates -inf+infj instead of nan+infj for complex64 for some of the results + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.complex64], active_if=TEST_WITH_ROCM), + # Expected failure: torch.jiterator_unary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_binary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_4inputs_with_extra_args', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", + alpha=1, beta=1), + ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary_return_by_ref', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_return_by_ref(T i0, T i1, T& out0) { + out0 = i0 + i1; + } + """, + num_outputs=1), + ref=operator.add, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_2inputs_2outputs', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_2outputs(T i0, T i1, T& out0, T& out1) { + out0 = i0 + i1; + out1 = i0 - i1; + } + """, + num_outputs=2), + ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + # `torch.norm` has multiple code paths depending on the value of `p`. + # These paths have different dtype support. Also JIT supports, + # most variants but not all of them. So we split the OpInfo entries, + # for `norm` based on the code-paths and JIT support. + OpInfo( + "norm", + sample_inputs_func=sample_inputs_norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # TODO Benchmark again with the new implementation + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='nuc', + sample_inputs_func=sample_inputs_norm_nuc, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + check_batched_gradgrad=False, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types(), + dtypesIfCUDA=floating_and_complex_types(), + skips=( + # Dispatches in Python to matrix_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='fro', + sample_inputs_func=sample_inputs_norm_fro, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # MPS has some mild accuracy issues for float16. We divide the tolerances by 10 + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-4, rtol=0.01)}), + 'TestConsistency', + 'test_output_match', + + ), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo( + "norm", + variant_test_name="inf", + sample_inputs_func=sample_inputs_norm_inf, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + # fast gradcheck produces NaNs + gradcheck_fast_mode=False, + skips=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)) + ), + ), + OpInfo('t', + sample_inputs_func=sample_inputs_t, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + assert_autodiffed=True, + error_inputs_func=error_inputs_t), + OpInfo( + "nn.functional.dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Probably because we have used lambda for the op here + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # inplace variant dispatches to dropout kernel, while on CUDA + # the op dispatches to _fused_dropout (with a few more conditions) + # hence, different values and this skip here + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "native_dropout_backward", + op=torch.ops.aten.native_dropout_backward.default, + aten_name="native_dropout_backward", + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_dropout_backward, + skips=( + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # Lazy tensor failures + DecorateInfo(unittest.skip('Skipped!'), 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # These tests fail only when built with ASAN + DecorateInfo(unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness', active_if=TEST_WITH_ASAN), + DecorateInfo( + unittest.skip("Fails with ASAN"), + 'TestLazyOpInfo', + 'test_correctness_with_reusing_ir', + active_if=TEST_WITH_ASAN + ), + ), + ), + OpInfo( + "nn.functional.dropout2d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (3, 4) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.dropout3d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + check_batched_forward_grad=False, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs, inplace=True), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: Tensor-likes are not close! + # Fails in cuda11.7 + # Error Log: https://github.com/pytorch/pytorch/actions/runs/3440108478/jobs/5738475757 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),), + # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype + # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="with_train", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # vmap: We do not yet support calling random operations inside of vmap. + # Please perform random operations outside of vmap as a workaround + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_forward_mode_AD"), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_inplace_forward_mode_AD"), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="without_train", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=partial(sample_inputs_dropout, train=False), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.one_hot", + ref=reference_one_hot, + supports_out=False, + dtypes=_dispatch_dtypes((torch.int64,)), + sample_inputs_func=sample_inputs_one_hot, + ), + OpInfo( + "nn.functional.embedding", + aten_backward_name="embedding_dense_backward", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_embedding, + error_inputs_func=error_inputs_embedding, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Fails on CI https://github.com/pytorch/pytorch/issues/85377 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + # Reference: https://github.com/pytorch/pytorch/issues/67084 + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + # Not a problem: embedding does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + supports_expanded_weight=True, + supports_out=False, + ), + OpInfo( + "nn.functional.embedding_bag", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + # backward is not supported for mode `max` and dtype `bfloat16` + backward_dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_embedding_bag, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + ), + OpInfo( + "nn.functional.multi_head_attention_forward", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.multi_head_attention_forward, input, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_multi_head_attention_forward, + skips=( + # Tensor-likes are not close + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive'), + + # TODO skip this for now since we can't skip on runtime arch support (taken from scaled_dot_product_attention) + DecorateInfo(unittest.skip("Skipped!"), 'TestInductorOpInfo', 'test_comprehensive'), + # randomness + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # lambda impl + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # tests running very slowly break slow tests, so we skip them instead of using `slowTest`. + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_comprehensive', + dtypes=(torch.bfloat16, torch.float16), + ), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_quick', + dtypes=(torch.bfloat16, torch.float16))), + supports_out=False, + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + ), + UnaryUfuncInfo( + "nn.functional.softplus", + aten_backward_name='softplus_backward', + ref=reference_softplus, + sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + decorators=( + DecorateInfo( + toleranceOverride + ({ + torch.half: tol(atol=1e-2, rtol=1e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1e-2), + }), + 'TestUnaryUfuncs'), + ), + ), + OpInfo( + "nn.functional.mse_loss", + aten_backward_name='mse_loss_backward', + ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), + sample_inputs_func=sample_inputs_loss, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16), + backward_dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.grid_sample", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sample, + reference_inputs_func=reference_inputs_grid_sample, + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + gradcheck_nondet_tol=1e-15), + # TODO: delete this OpInfo once we add meta support for grid_sampler_3d + OpInfo( + "grid_sampler_2d", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sampler_2d, + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + gradcheck_nondet_tol=1e-15), + OpInfo( + "argwhere", + ref=np.argwhere, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_argwhere, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ), + ), + ReductionOpInfo( + 'all', + identity=True, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.all), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'any', + identity=False, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.any), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'amax', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amax), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'amin', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amin), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'argmax', + supports_multiple_dims=False, + supports_autograd=False, + assert_jit_shape_analysis=True, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + ), + ReductionOpInfo( + 'argmin', + supports_multiple_dims=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + ), + ReductionOpInfo( + 'count_nonzero', + identity=0, + supports_out=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_reduction_count_nonzero, + ref=reference_reduction_numpy(np.count_nonzero), + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionOpInfo( + 'mean', + nan_policy='propagate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # FIXME: mean needs 'dim' parameter when using the 'out' overload. + # Adding it with 'generate_args_kwargs' does not work, since these also get passed + # onto the reference implementations. + supports_out=False, + assert_autodiffed=True, + assert_jit_shape_analysis=True, + promotes_int_to_float=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.mean), + error_inputs_func=error_inputs_mean, + skips=( + # FIXME: mean does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'nanmean', + nan_policy='omit', + assert_autodiffed=True, + promotes_int_to_float=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nanmean), + skips=( + # AssertionError: False is not true : + # Failure in testing nodes' autodifferentiation. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + device_type='cuda', dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'std', + nan_policy='propagate', + supports_out=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.std), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + ReductionOpInfo( + 'std', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'var', + nan_policy='propagate', + supports_out=True, + assert_autodiffed=True, + promotes_int_to_float=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.var), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), + # NumPy is giving NaN for this + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), + ), + ), + ReductionOpInfo( + 'var', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'prod', + identity=1, + nan_policy='propagate', + supports_multiple_dims=False, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_prod, + ref=prod_numpy, + skips=( + # FIXME: prod does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: prod does not support passing None to dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.uint8, torch.float16, torch.complex64]), + # FIXME: ValueError: The data in MaskedTensor a and Tensor b do not match + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float16]), + ), + ), + ReductionOpInfo( + 'sum', + identity=0, + nan_policy='propagate', + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_reduction_numpy(np.sum), + error_inputs_sparse_func=error_inputs_sparse_reduction_sum, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsc), + skips=( + # FIXME: sum does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + ReductionOpInfo( + 'nansum', + identity=0, + nan_policy='omit', + supports_out=True, + promotes_int_to_int64=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nansum), + skips=( + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: nansum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: flaky test so skipped instead of xfailed + # possibly bad low precision reference in numpy + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + ), + ), + OpInfo( + "nn.functional.ctc_loss", + dtypes=floating_types(), + supports_out=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_ctc_loss, + skips=( + # https://github.com/pytorch/pytorch/issues/67462 + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_grad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + # Ref: https://github.com/pytorch/pytorch/issues/85231 + DecorateInfo(unittest.skip("Fails with ASAN"), + 'TestProxyTensorOpInfo', + 'test_make_fx_fake_exhaustive', active_if=TEST_WITH_ASAN), + ), + ), + OpInfo( + "nn.functional.cosine_embedding_loss", + dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_cosine_embedding_loss, + ), + OpInfo( + "nn.functional.nll_loss", + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_nll_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0, i1): + # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120782 + DecorateInfo( + unittest.skip("Skipped!"), + "TestCompositeCompliance", + "test_cow_input", + device_type='cuda', + ), + ), + ), + OpInfo( + "nn.functional.gaussian_nll_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_gaussian_nll_loss, + error_inputs_func=error_inputs_gaussian_nll_loss, + skips=( + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.hinge_embedding_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_hinge_embedding_loss, + error_inputs_func=error_inputs_hinge_embedding_loss, + reference_inputs_func=reference_inputs_hinge_embedding_loss, + ), + OpInfo( + "nn.functional.huber_loss", + aten_backward_name='huber_loss_backward', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_huber_loss, + error_inputs_func=error_inputs_huber_loss, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ) + ), + OpInfo( + "nn.functional.pdist", + ref=reference_pdist, + sample_inputs_func=sample_inputs_pdist, + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + ) + ), + OpInfo( + "nn.functional.poisson_nll_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_poisson_nll_loss, + error_inputs_func=error_inputs_poisson_nll_loss, + ), + OpInfo( + "argsort", + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + OpInfo( + "repeat_interleave", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_repeat_interleave, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pairwise_distance", + ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( + np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) + ), + sample_inputs_func=sample_inputs_pairwise_distance, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_shuffle", + sample_inputs_func=sample_inputs_pixel_shuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_unshuffle", + sample_inputs_func=sample_inputs_pixel_unshuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.kl_div", + sample_inputs_func=sample_inputs_kl_div, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "diagflat", + ref=lambda input, offset=0: np.diagflat(input, k=offset), + sample_inputs_func=sample_inputs_diagflat, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='sum', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='prod', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_reduce, + skips=( + # Not implemented + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + ), + ), + OpInfo( + 'scatter_reduce', + variant_test_name='mean', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amin', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amax', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='lengths', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=sample_inputs_segment_reduce, + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='offsets', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + # TODO: Avoid COW materialize + supports_cow_input_no_materialize=False, + sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'), + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), +] +op_db += opinfo.definitions.op_db + + +# Separate registry for experimental Python Reference OpInfos. +python_ref_db = [ + # + # Elementwise Unary OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.abs", + torch_opinfo_name="abs", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acos", + torch_opinfo_name="acos", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acosh", + torch_opinfo_name="acosh", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asin", + torch_opinfo_name="asin", + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asinh", + torch_opinfo_name="asinh", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + PythonRefInfo( + "_refs.lerp", + torch_opinfo_name="lerp", + ), + PythonRefInfo( + "_refs.ones", + torch_opinfo_name="ones", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.zeros", + torch_opinfo_name="zeros", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.cauchy", + torch_opinfo_name="cauchy", + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.exponential", + torch_opinfo_name="exponential", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.geometric", + torch_opinfo_name="geometric", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.log_normal", + torch_opinfo_name="log_normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + torch_opinfo_variant_name="number_mean", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal_", + op=torch.Tensor.normal_, + torch_opinfo_name="normal", + torch_opinfo_variant_name="in_place", + supports_out=False, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.arange", + torch_opinfo_name="arange", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="variadic_tensors", + ), + PythonRefInfo( + "_refs.take_along_dim", + torch_opinfo_name="take_along_dim", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.to", + torch_opinfo_name="to", + ), + PythonRefInfo( + "_refs.triu", + torch_opinfo_name="triu", + ), + PythonRefInfo( + "_refs.tril", + torch_opinfo_name="tril", + ), + PythonRefInfo( + "_refs.triu_indices", + torch_opinfo_name="triu_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.tril_indices", + torch_opinfo_name="tril_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="list_of_tensors", + ), + PythonRefInfo( + "_refs.movedim", + aliases=('moveaxis',), + torch_opinfo_name="movedim", + ), + PythonRefInfo( + "_refs.bucketize", + torch_opinfo_name="bucketize", + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor with + # aten._local_scalar_dense.default - erroring out! [...] + # triggered by mid_val = boundaries[mid] + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref_executor"), + ) + ), + PythonRefInfo( + "_refs.equal", + torch_opinfo_name="equal", + skips=( + # RuntimeError: Cannot cast FakeTensor to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atan", + torch_opinfo_name="atan", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + active_if=TEST_WITH_ROCM, device_type='cuda', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', + dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atanh", + torch_opinfo_name="atanh", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', + dtypes=[torch.complex128]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.bitwise_not", + torch_opinfo_name="bitwise_not", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.ceil", + torch_opinfo_name="ceil", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.item", + torch_opinfo_name="item", + skips=( + # RuntimeError: Cannot cast FakeTensor(FakeTensor(..., device='meta', size=()), cpu) to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # ValueError: Can't convert a tensor with 10 elements to a number! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj_physical", + torch_opinfo_name="conj_physical", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cos", + torch_opinfo_name="cos", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cosh", + torch_opinfo_name="cosh", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.digamma", + torch_opinfo_name="digamma", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erf", + torch_opinfo_name="erf", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfinv", + torch_opinfo_name="erfinv", + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfc", + torch_opinfo_name="erfc", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp", + torch_opinfo_name="exp", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.expm1", + torch_opinfo_name="expm1", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp2", + torch_opinfo_name="exp2", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.fill", + torch_opinfo_name="fill", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.floor", + torch_opinfo_name="floor", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frexp", + torch_opinfo_name="frexp", + # Skipped due to numerical failures on Windows CI. + # This is also skipped in frexp earlier in the file. + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frac", + torch_opinfo_name="frac", + skips=( + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.imag", + torch_opinfo_name="imag", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isfinite", + torch_opinfo_name="isfinite", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isinf", + torch_opinfo_name="isinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isposinf", + torch_opinfo_name="isposinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isneginf", + torch_opinfo_name="isneginf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isnan", + torch_opinfo_name="isnan", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isreal", + torch_opinfo_name="isreal", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.i0", + torch_opinfo_name="i0", + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.int8,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.lgamma", + torch_opinfo_name="lgamma", + decorators=(precisionOverride({torch.float16: 7e-1}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_1", + skips=skips_mvlgamma(), + decorators=( + DecorateInfo(torch.testing._internal.common_utils.markDynamoStrictTest, 'TestUnaryUfuncs', + 'test_reference_numerics_large'), + DecorateInfo(torch.testing._internal.common_utils.xfailIfTorchDynamo, 'TestUnaryUfuncs', + 'test_reference_numerics_large'), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_3", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_5", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log", + torch_opinfo_name="log", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log1p", + torch_opinfo_name="log1p", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log10", + torch_opinfo_name="log10", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log2", + torch_opinfo_name="log2", + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + PythonRefInfo( + "_refs.logsumexp", + torch_opinfo_name="logsumexp", + # When keepdim=False logsumexp function uses squeeze operation + # that is not yet exposed in nvFuser's Python API. + ), + PythonRefInfo( + "_refs.log_softmax", + torch_opinfo_name="log_softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nan_to_num", + torch_opinfo_name="nan_to_num", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.neg", + torch_opinfo_name="neg", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.positive", + torch_opinfo_name="positive", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.real", + torch_opinfo_name="real", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.reciprocal", + torch_opinfo_name="reciprocal", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.round", + torch_opinfo_name="round", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + skips=( + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rsqrt", + torch_opinfo_name="rsqrt", + decorators=(precisionOverride({torch.half: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sigmoid", + torch_opinfo_name="sigmoid", + aliases=('_refs.special.expit',), + # Reference: https://github.com/pytorch/pytorch/issues/56012 + handles_complex_extremal_values=False, + handles_large_floats=False, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble]) + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sign", + torch_opinfo_name="sign", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sgn", + torch_opinfo_name="sgn", + # This is an issue with the vectorised abs on CPU + handles_complex_extremal_values=False, + handles_large_floats=False, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.signbit", + torch_opinfo_name="signbit", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sin", + torch_opinfo_name="sin", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinc", + torch_opinfo_name="sinc", + decorators=(precisionOverride({torch.bfloat16: 1e-2, + torch.float16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49133 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.cfloat]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinh", + torch_opinfo_name="sinh", + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + ), + ), + PythonRefInfo( + "_refs.softmax", + torch_opinfo_name="softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sqrt", + torch_opinfo_name="sqrt", + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.bfloat16,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.square", + torch_opinfo_name="square", + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + skips=( + # AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)), + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tan", + torch_opinfo_name="tan", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tanh", + torch_opinfo_name="tanh", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.trunc", + torch_opinfo_name="trunc", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.special.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.special.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.logit", + torch_opinfo_name="logit", + ), + # + # Elementwise Unary nn.functional OpInfos + # + PythonRefInfo( + "_refs.nn.functional.alpha_dropout", + torch_opinfo_name="nn.functional.alpha_dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.celu", + torch_opinfo_name="nn.functional.celu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.threshold", + torch_opinfo_name="nn.functional.threshold", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.dropout", + torch_opinfo_name="nn.functional.dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # dropout is not comparable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.elu", + torch_opinfo_name="nn.functional.elu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardtanh", + torch_opinfo_name="nn.functional.hardtanh", + supports_out=True, + ), + PythonRefInfo( # TODO: Port this to an UnaryOpInfo + "_refs.nn.functional.gelu", + torch_opinfo_name="nn.functional.gelu", + ), + PythonRefInfo( + "_refs.nn.functional.layer_norm", + torch_opinfo_name="nn.functional.layer_norm", + skips=( + # Reference result was farther (3.5762786809723224e-07) from the precise computation + # than the torch result was (2.5068410824946596e-07)! + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float32,), device_type='cpu'), + ), + ), + PythonRefInfo( + "_refs.nn.functional.glu", + torch_opinfo_name="nn.functional.glu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pairwise_distance", + torch_opinfo_name="nn.functional.pairwise_distance", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pdist", + torch_opinfo_name="nn.functional.pdist", + supports_out=True, + skips=( + # RunTimeError: no _refs support for torch.Tensor.index_select + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + )), + PythonRefInfo( + "_refs.nn.functional.leaky_relu", + torch_opinfo_name="nn.functional.leaky_relu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.pixel_shuffle", + torch_opinfo_name="nn.functional.pixel_shuffle", + ), + PythonRefInfo( + "_refs.nn.functional.pixel_unshuffle", + torch_opinfo_name="nn.functional.pixel_unshuffle", + ), + PythonRefInfo( + "_refs.nn.functional.poisson_nll_loss", + torch_opinfo_name="nn.functional.poisson_nll_loss", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.prelu", + torch_opinfo_name="nn.functional.prelu", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu", + torch_opinfo_name="nn.functional.relu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu6", + torch_opinfo_name="nn.functional.relu6", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.mish", + torch_opinfo_name="nn.functional.mish", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + 'TestUnaryUfuncs',), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.selu", + torch_opinfo_name="nn.functional.selu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + PythonRefInfo( + "_refs.nn.functional.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.softmin", + torch_opinfo_name="nn.functional.softmin", + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softplus", + torch_opinfo_name="nn.functional.softplus", + ), + PythonRefInfo( + "_refs.nn.functional.l1_loss", + torch_opinfo_name="nn.functional.l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.margin_ranking_loss", + torch_opinfo_name="nn.functional.margin_ranking_loss", + ), + PythonRefInfo( + "_refs.nn.functional.mse_loss", + torch_opinfo_name="nn.functional.mse_loss", + ), + PythonRefInfo( + "_refs.nn.functional.smooth_l1_loss", + torch_opinfo_name="nn.functional.smooth_l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.hinge_embedding_loss", + torch_opinfo_name="nn.functional.hinge_embedding_loss", + ), + PythonRefInfo( + "_refs.nn.functional.nll_loss", + torch_opinfo_name="nn.functional.nll_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + # For simpler indexing, we flatten target indices, then reshape the result tensor. + # This creates inconsistent view state with reference impl. + validate_view_consistency=False, + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor - erroring out! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', device_type="cuda" + ), + ), + ), + PythonRefInfo( + "_refs.nn.functional.huber_loss", + torch_opinfo_name="nn.functional.huber_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.tanhshrink", + torch_opinfo_name="nn.functional.tanhshrink", + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02), + torch.complex64: tol(atol=6e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), + active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), + device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardshrink", + torch_opinfo_name="nn.functional.hardshrink", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softshrink", + torch_opinfo_name="nn.functional.softshrink", + ), + # + # Elementwise Binary Reference OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.add", + torch_opinfo_name="add", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.atan2", + torch_opinfo_name="atan2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_and", + torch_opinfo_name="bitwise_and", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_left_shift", + torch_opinfo_name="bitwise_left_shift", + skips=( + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_right_shift", + torch_opinfo_name="bitwise_right_shift", + skips=( + # # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Skipped some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_or", + torch_opinfo_name="bitwise_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_xor", + torch_opinfo_name="bitwise_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.copysign", + torch_opinfo_name="copysign", + skips=( + # RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu! + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="no_rounding_mode", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # NotImplementedError: argument of type: + DecorateInfo( + unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32, torch.complex64, torch.complex128,) + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="trunc_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="floor_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.eq", + torch_opinfo_name="eq", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.float_power", + torch_opinfo_name="float_power", + skips=( + # Test doesn't account for float -> double type promotion + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logaddexp", + torch_opinfo_name="logaddexp", + skips=( + # failure due to mismatch in edge cases, which boils down to what torch.exp(inf + infj) should be + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + PythonRefInfo( + "_refs.logaddexp2", + torch_opinfo_name="logaddexp2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.floor_divide", + torch_opinfo_name="floor_divide", + rhs_make_tensor_kwargs=dict(exclude_zero=True), + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + # bfloat16 floor_divide compared with a float32 reference works inconsistently + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,)), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmax", + torch_opinfo_name="fmax", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmin", + torch_opinfo_name="fmin", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmod", + torch_opinfo_name="fmod", + rhs_make_tensor_kwargs={'exclude_zero': True}, + supports_rhs_python_scalar=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gcd", + torch_opinfo_name="gcd", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ge", + torch_opinfo_name="ge", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gt", + torch_opinfo_name="gt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.heaviside", + torch_opinfo_name="heaviside", + supports_rhs_python_scalar=False, + skips=( + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.hypot", + torch_opinfo_name="hypot", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igamma", + torch_opinfo_name="igamma", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igammac", + torch_opinfo_name="igammac", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.isclose", + torch_opinfo_name="isclose", + skips=( + # Intentional xfail -- isclose does not type promote + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lcm", + torch_opinfo_name="lcm", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.le", + torch_opinfo_name="le", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_and", + torch_opinfo_name="logical_and", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.logical_not", + torch_opinfo_name="logical_not", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_or", + torch_opinfo_name="logical_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_xor", + torch_opinfo_name="logical_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lt", + torch_opinfo_name="lt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.maximum", + torch_opinfo_name="maximum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.minimum", + torch_opinfo_name="minimum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.mul", + torch_opinfo_name="mul", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type='cuda' + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type='cuda' + ), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ne", + torch_opinfo_name="ne", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.nextafter", + torch_opinfo_name="nextafter", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.pow", + torch_opinfo_name="pow", + decorators=( + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.remainder", + torch_opinfo_name="remainder", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.rsub", + torch_opinfo_name="rsub", + # https://github.com/pytorch/pytorch/issues/76944 + skips=( + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.chalf,), device_type='cpu'), + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.chalf,), device_type='cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.sub", + torch_opinfo_name="sub", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.true_divide", + torch_opinfo_name="true_divide", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + # + # Elementwise Ternary Reference OpInfos + # + PythonRefInfo( + "_refs.addcdiv", + torch_opinfo_name="addcdiv", + ), + PythonRefInfo( + "_refs.addcmul", + torch_opinfo_name="addcmul", + skips=( + # Reference result was farther (1.3343989849090576e-05) + # from the precise computation than the torch result + # was (9.592622518539429e-06)! + # FIXME: enable dtype-based tolerances in test_ops.py:TestCommon._ref_test_helper + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float16,), device_type="cpu"), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.float16,), device_type="cpu"), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_min", + torch_opinfo_name="clamp_min", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_max", + torch_opinfo_name="clamp_max", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.clamp", + torch_opinfo_name="clamp", + ), + PythonRefInfo( + "_refs.nn.functional.triplet_margin_loss", + torch_opinfo_name="nn.functional.triplet_margin_loss", + supports_out=False, + # TODO: Uses minimum and clamp + skips=( + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: 6.103515625e-05 at index (4,) (up to 1e-05 allowed) + # Greatest relative difference: 8.519846983548175e-06 at index (4,) (up to 1.3e-06 allowed) + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8,), device_type="cpu"), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.xlogy", + torch_opinfo_name="xlogy", + supports_one_python_scalar=True, + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.xlog1py", + torch_opinfo_name="special.xlog1py", + supports_one_python_scalar=True, + ), + # + # Data Conversion & Data Movement Opinfos + # + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bfloat16", + torch_opinfo_name="bfloat16", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bool", + torch_opinfo_name="bool", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.byte", + torch_opinfo_name="byte", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.char", + torch_opinfo_name="char", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.complex", + torch_opinfo_name="complex", + error_inputs_func=partial(error_inputs_complex, is_ref=True), + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.polar", + torch_opinfo_name="polar", + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.double", + torch_opinfo_name="double", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.float", + torch_opinfo_name="float", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.half", + torch_opinfo_name="half", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.int", + torch_opinfo_name="int", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.long", + torch_opinfo_name="long", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.short", + torch_opinfo_name="short", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.chalf", + torch_opinfo_name="chalf", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cfloat", + torch_opinfo_name="cfloat", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cdouble", + torch_opinfo_name="cdouble", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.clone", + torch_opinfo_name="clone", + ), + # + # View & Shape OpInfos + # + PythonRefInfo( + "_refs.atleast_1d", + torch_opinfo_name="atleast_1d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_2d", + torch_opinfo_name="atleast_2d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_3d", + torch_opinfo_name="atleast_3d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + torch_opinfo_variant_name="partial_views", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.as_strided_scatter", + torch_opinfo_name="as_strided_scatter", + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.block_diag", + torch_opinfo_name="block_diag", + ), + PythonRefInfo( + "_refs.broadcast_shapes", + torch_opinfo_name="broadcast_shapes", + ), + PythonRefInfo( + "_refs.broadcast_tensors", + torch_opinfo_name="broadcast_tensors", + ), + PythonRefInfo( + "_refs.broadcast_to", + torch_opinfo_name="broadcast_to", + ), + PythonRefInfo( + "_refs.cat", + torch_opinfo_name="cat", + skips=( + # FIXME: AssertionError: RuntimeError not raised + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.chunk", + torch_opinfo_name="chunk", + ), + PythonRefInfo( + "_refs.column_stack", + torch_opinfo_name="column_stack", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj", + torch_opinfo_name="conj", + ), + PythonRefInfo( + "_refs.constant_pad_nd", + torch_opinfo_name="constant_pad_nd", + ), + PythonRefInfo( + "_refs.contiguous", + torch_opinfo_name="contiguous", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.deg2rad", + torch_opinfo_name="deg2rad", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.dsplit", + torch_opinfo_name="dsplit", + ), + PythonRefInfo( + "_refs.diag", + torch_opinfo_name="diag", + ), + PythonRefInfo( + "_refs.diagonal", + torch_opinfo_name="diagonal", + ), + PythonRefInfo( + "_refs.diagonal_copy", + torch_opinfo_name="diagonal_copy", + ), + PythonRefInfo( + "_refs.diagonal_scatter", + torch_opinfo_name="diagonal_scatter", + supports_out=True, + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.diag_embed", + torch_opinfo_name="diag_embed", + supports_out=True, + ), + PythonRefInfo( + "_refs.dstack", + torch_opinfo_name="dstack", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.expand", + torch_opinfo_name="expand", + ), + PythonRefInfo( + "_refs.expand_as", + torch_opinfo_name="expand_as", + ), + PythonRefInfo( + "_refs.flatten", + torch_opinfo_name="flatten", + ), + PythonRefInfo( + "_refs.flip", + torch_opinfo_name="flip", + ), + PythonRefInfo( + "_refs.fliplr", + torch_opinfo_name="fliplr", + ), + PythonRefInfo( + "_refs.flipud", + torch_opinfo_name="flipud", + ), + PythonRefInfo( + "_refs.hstack", + torch_opinfo_name="hstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.narrow", + torch_opinfo_name="narrow", + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=True), + ), + PythonRefInfo( + "_refs.narrow_copy", + torch_opinfo_name="narrow_copy", + supports_out=True, + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=True), + ), + PythonRefInfo( + "_refs.nn.functional.group_norm", + torch_opinfo_name="nn.functional.group_norm", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.native_layer_norm", + torch_opinfo_name="native_layer_norm", + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref", + device_type="cpu", dtypes=(torch.float32,)), + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref_torch_fallback", + device_type="cpu", dtypes=(torch.float32,)), + ), + ), + PythonRefInfo( + "_refs.permute", + torch_opinfo_name="permute", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rad2deg", + torch_opinfo_name="rad2deg", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.ravel", + torch_opinfo_name="ravel", + ), + PythonRefInfo( + "_refs.renorm", + torch_opinfo_name="renorm", + ), + PythonRefInfo( + "_refs.repeat", + torch_opinfo_name="repeat", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.reshape", + torch_opinfo_name="reshape", + ), + PythonRefInfo( + "_refs.reshape_as", + torch_opinfo_name="reshape_as", + ), + PythonRefInfo( + "_refs.roll", + torch_opinfo_name="roll", + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.__getitem__ + # Leaving it as a ref because fftshift uses it + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.rot90", + torch_opinfo_name="rot90", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.stack", + torch_opinfo_name="stack", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + torch_opinfo_variant_name="multiple", + ), + PythonRefInfo( + "_refs.tensor_split", + torch_opinfo_name="tensor_split", + skips=( + # TensorMeta doesn't support tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # RuntimeError: no _refs support for torch.Tensor.tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.hsplit", + torch_opinfo_name="hsplit", + ), + PythonRefInfo( + "_refs.vsplit", + torch_opinfo_name="vsplit", + ), + PythonRefInfo( + "_refs.dot", + torch_opinfo_name="dot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.vdot", + torch_opinfo_name="vdot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.transpose", + torch_opinfo_name="transpose", + ), + PythonRefInfo( + "_refs.t", + torch_opinfo_name="t", + ), + PythonRefInfo( + "_refs.T", + torch_opinfo_name="T", + error_inputs_func=partial(error_inputs_T, has_ndims_error=True), + ), + PythonRefInfo( + "_refs.unfold", + torch_opinfo_name="unfold", + ), + PythonRefInfo( + "_refs.unfold_copy", + torch_opinfo_name="unfold_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.unsqueeze", + torch_opinfo_name="unsqueeze", + ), + PythonRefInfo( + "_refs.view", + torch_opinfo_name="view", + ), + PythonRefInfo( + "_refs.view_as", + torch_opinfo_name="view_as", + ), + PythonRefInfo( + "_refs.vstack", + torch_opinfo_name="vstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.unflatten", + torch_opinfo_name="unflatten", + ), + PythonRefInfo( + "_refs.unbind", + torch_opinfo_name="unbind", + ), + # + # Reduction Reference OpInfos + # + ReductionPythonRefInfo( + "_refs.all", + torch_opinfo_name="all", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.amax", + torch_opinfo_name="amax", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.amin", + torch_opinfo_name="amin", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.any", + torch_opinfo_name="any", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.count_nonzero", + torch_opinfo_name="count_nonzero", + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_default_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_multi_unsorted_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionPythonRefInfo( + "_refs.mean", + torch_opinfo_name="mean", + supports_out=True, + error_inputs_func=partial(error_inputs_mean, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.std", + torch_opinfo_name="std", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + # std_mean and var_mean are not ReductionInfos + PythonRefInfo( + "_refs.std_mean", + torch_opinfo_name="std_mean", + ), + ReductionPythonRefInfo( + "_refs.sum", + torch_opinfo_name="sum", + supports_out=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + PythonRefInfo( + "_refs.cumsum", + torch_opinfo_name="cumsum", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.cumprod", + torch_opinfo_name="cumprod", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.sum_to_size", + torch_opinfo_name="sum_to_size", + validate_view_consistency=False, + ), + ReductionPythonRefInfo( + "_refs.prod", + torch_opinfo_name="prod", + supports_out=True, + supports_multiple_dims=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + ), + ), + ReductionPythonRefInfo( + "_refs.var", + torch_opinfo_name="var", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + ), + ), + PythonRefInfo( + "_refs.var_mean", + torch_opinfo_name="var_mean", + validate_view_consistency=False, + ), + # + # Linear Algebra Operators + # + PythonRefInfo( + "_refs.addr", + torch_opinfo_name="addr", + decorators=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), + ), + ), + PythonRefInfo( + "_refs.trace", + torch_opinfo_name="trace", + ), + PythonRefInfo( + "_refs.norm", + torch_opinfo_name="norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + ), + # + # Tensor Creation Reference OpInfos + # + PythonRefInfo( + "_refs.empty", + torch_opinfo_name="empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: shouldn't check empty results + DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.empty_like", + torch_opinfo_name="empty_like", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.randn", + torch_opinfo_name="randn", + op=lambda *args, **kwargs: wrapper_set_seed(refs.randn, *args, **kwargs), + skips=( + # see https://github.com/pytorch/pytorch/issues/85121 + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), + 'TestCommon', + 'test_python_ref_executor'), + # These tests expect the input to be a tensor or a sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.eye", + torch_opinfo_name="eye", + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + ), + ), + PythonRefInfo( + "_refs.new_empty", + torch_opinfo_name="new_empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_empty_strided", + torch_opinfo_name="new_empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + ), + ), + PythonRefInfo( + "_refs.empty_strided", + torch_opinfo_name="empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_full", + torch_opinfo_name="new_full", + ), + PythonRefInfo( + "_refs.new_ones", + torch_opinfo_name="new_ones", + ), + PythonRefInfo( + "_refs.new_zeros", + torch_opinfo_name="new_zeros", + ), + # + # Conditional Reference OpInfos + # + PythonRefInfo( + "_refs.masked_fill", + torch_opinfo_name="masked_fill", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.where", + torch_opinfo_name="where", + op=lambda self, condition, other: refs.where(condition, self, other), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors', device_type='cuda'), + ), + ), + PythonRefInfo( + "_refs.index_select", + torch_opinfo_name="index_select", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + # Sample out= with a stride of zero. This _out operation checks that the input has no + # inner overlap + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),) + ), + PythonRefInfo( + "_refs.index_copy", + torch_opinfo_name="index_copy", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.index_add", + torch_opinfo_name="index_add", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.index_fill", + torch_opinfo_name="index_fill", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),) + ), + # + # Test-related functions + # + PythonRefInfo( + "_refs.allclose", + torch_opinfo_name="allclose", + ), + # + # Misc functions + # + PythonRefInfo( + "_refs.stft", + torch_opinfo_name="stft", + skips=[ + # RuntimeError: no _refs support for aten.pad + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.istft", + torch_opinfo_name="istft", + skips=[ + # RuntimeError: no _refs support for aten.unfold_backward + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.view_as_complex", + torch_opinfo_name="view_as_complex", + ), +] +python_ref_db += opinfo.definitions.python_ref_db + +# Common operator groupings +ops_and_refs = op_db + python_ref_db +unary_ufuncs = [op for op in ops_and_refs if isinstance(op, UnaryUfuncInfo)] +binary_ufuncs = [op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)] +binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) +spectral_funcs = [op for op in ops_and_refs if isinstance(op, SpectralFuncInfo)] +sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] +sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] +sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] +shape_funcs = [op for op in ops_and_refs if isinstance(op, ShapeFuncInfo)] +reduction_ops = [op for op in ops_and_refs if isinstance(op, ReductionOpInfo)] +reference_filtered_ops = [op for op in reduction_ops if op.ref is not None] +reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('masked.')] +sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('masked.')] + +# TODO: review porting these to make_tensor +def index_variable(shape, max_indices, device=torch.device('cpu')): + if not isinstance(shape, tuple): + shape = (shape,) + index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long() + return index + +def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): + assert len(shape) == 2 + assert index_dim < 2 + batch_dim = 1 - index_dim + index = torch.zeros(*shape, dtype=torch.long, device=device) + for i in range(shape[index_dim]): + index.select(index_dim, i).copy_( + torch.randperm(max_indices, device=device)[:shape[batch_dim]]) + if duplicate: + index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) + return index + +def bernoulli_scalar(): + return torch.tensor(0, dtype=torch.bool).bernoulli_() + +def mask_not_all_zeros(shape): + assert len(shape) > 0 + while True: + result = torch.randn(shape).gt(0) + if result.sum() > 0: + return result + +# Copied from functorch +def xfail(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, True) + + +def skip(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, False) + + +def skipOps(test_case_name, base_test_name, to_skip): + all_opinfos = op_db + for xfail in to_skip: + op_name, variant_name, device_type, dtypes, expected_failure = xfail + matching_opinfos = [o for o in all_opinfos + if o.name == op_name and o.variant_test_name == variant_name] + assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" + for op in matching_opinfos: + decorators = list(op.decorators) + if expected_failure: + decorator = DecorateInfo(unittest.expectedFailure, + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + else: + decorator = DecorateInfo(unittest.skip("Skipped!"), + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + op.decorators = tuple(decorators) + + # This decorator doesn't modify fn in any way + def wrapped(fn): + return fn + return wrapped diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py new file mode 100644 index 0000000000000000000000000000000000000000..4a9d01cf9cde74e5252c8e796659a46af93de1f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py @@ -0,0 +1,78 @@ +# mypy: ignore-errors + +import contextlib +import functools +import inspect + +import torch + + +# Test whether hardware BF32 math mode enabled. It is enabled only on: +# - MKLDNN is available +# - BF16 is supported by MKLDNN +def bf32_is_not_fp32(): + if not torch.backends.mkldnn.is_available(): + return False + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + return False + return True + + +@contextlib.contextmanager +def bf32_off(): + old_matmul_precision = torch.get_float32_matmul_precision() + try: + torch.set_float32_matmul_precision("highest") + yield + finally: + torch.set_float32_matmul_precision(old_matmul_precision) + + +@contextlib.contextmanager +def bf32_on(self, bf32_precision=1e-5): + old_matmul_precision = torch.get_float32_matmul_precision() + old_precision = self.precision + try: + torch.set_float32_matmul_precision("medium") + self.precision = bf32_precision + yield + finally: + torch.set_float32_matmul_precision(old_matmul_precision) + self.precision = old_precision + + +# This is a wrapper that wraps a test to run this test twice, one with +# allow_bf32=True, another with allow_bf32=False. When running with +# allow_bf32=True, it will use reduced precision as specified by the +# argument +def bf32_on_and_off(bf32_precision=1e-5): + def with_bf32_disabled(self, function_call): + with bf32_off(): + function_call() + + def with_bf32_enabled(self, function_call): + with bf32_on(self, bf32_precision): + function_call() + + def wrapper(f): + params = inspect.signature(f).parameters + arg_names = tuple(params.keys()) + + @functools.wraps(f) + def wrapped(*args, **kwargs): + for k, v in zip(arg_names, args): + kwargs[k] = v + cond = bf32_is_not_fp32() + if "device" in kwargs: + cond = cond and (torch.device(kwargs["device"]).type == "cpu") + if "dtype" in kwargs: + cond = cond and (kwargs["dtype"] == torch.float) + if cond: + with_bf32_disabled(kwargs["self"], lambda: f(**kwargs)) + with_bf32_enabled(kwargs["self"], lambda: f(**kwargs)) + else: + f(**kwargs) + + return wrapped + + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc7090c02e115fdd474d4457e02cc4b9d4b6537 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py @@ -0,0 +1,3987 @@ +# mypy: ignore-errors + +from abc import abstractmethod +import tempfile +import unittest + +from copy import deepcopy +from functools import reduce, partial +from itertools import product +from operator import mul + + +import torch +import torch.cuda +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import _reduction as _Reduction +from torch.testing._internal.common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \ + gradcheck, gradgradcheck, set_default_dtype, skipIfTorchDynamo +from torch.testing._internal.common_cuda import TEST_CUDA, SM90OrLater +from torch.autograd.gradcheck import _get_numerical_jacobian, _iter_tensors +from torch.autograd import Variable +from torch.types import _TensorOrTensors +import torch.backends.cudnn + +from typing import Dict, Callable, Tuple, List, Sequence, Union, Any + +TemporaryFile = tempfile.TemporaryFile +PRECISION = 1e-5 + + +def get_reduction(m): + result = getattr(m, 'reduction', None) + if result is None: + result = _Reduction.legacy_get_string(getattr(m, 'sizeAverage', None), True, emit_warning=False) + assert result is not None + return result + + +def get_weight(m): + result = getattr(m, 'weight', None) + if result is not None: + return result + return getattr(m, 'weights', None) + +# NOTE [How to check NN module / functional API parity between Python and C++ frontends] +# +# The way to check API parity is to add parity tests for the NN module / functional of interest. +# Here are the detailed steps: +# +# For NN module: +# 1. Make sure you already have a test dict with the module configuration you want to test. +# 2. Add `cpp_constructor_args` entry to the test dict, with its value exactly matching +# the Python module constructor arguments. For example, if in the test dict we pass +# `(10, 8)` to `torch.nn.Linear` constructor, then we should pass `torch::nn::LinearOptions(10, 8)` +# as the corresponding C++ constructor argument to `torch::nn::Linear`. +# 3. If in the process of performing the above step you referenced any variables +# in the `cpp_constructor_args` entry, you must add `cpp_var_map` entry +# to the test dict to make sure that those variables are populated with the right Python values. +# For example, if the Python constructor call is +# `torch.nn.FractionalMaxPool2d(2, output_ratio=0.5, _random_samples=random_samples)`, +# the corresponding C++ constructor argument is +# `torch::nn::FractionalMaxPool2dOptions(2).output_ratio(0.5)._random_samples(random_samples)`, +# and the `cpp_var_map` entry must be +# `{'random_samples': random_samples}` in order to populate the C++ variable `random_samples` +# used in the C++ constructor argument with the Python tensor value `random_samples`. +# +# For NN functional: +# 1. Make sure you already have a test dict with the functional configuration you want to test. +# 2. If the test dict's `constructor` entry looks like `wrap_functional(F.some_functional_name, ...)`, +# then you must add `cpp_options_args` entry to the test dict, with its value exactly matching the Python +# functional optional arguments. For example, if the test dict's `constructor` entry is +# `wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest')`, +# then the `cpp_options_args` entry should be +# "F::InterpolateFuncOptions().size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)". +# 3. Otherwise, if the test dict's `constructor` entry looks like +# `wrap_functional(lambda i: F.some_functional_name(...))`, +# then you must add `cpp_function_call` entry to the test dict, with its value exactly matching the Python +# functional function call. For example, if the test dict's `constructor` entry is +# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`, +# then the `cpp_function_call` entry should be +# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))". +# 4. If in the process of performing the above two steps you referenced any variables +# in the `cpp_options_args` or `cpp_function_call` entry, you must +# add `cpp_var_map` entry to the test dict to make sure that those variables +# are populated with the right Python values. For example, if the test dict's `constructor` entry is +# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`, +# then the `cpp_function_call` entry should be +# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))". +# Notice that there are two variables `i` and `t` that need to have their values provided, +# and the way to do so is to add a `cpp_var_map` entry: `cpp_var_map={'i': '_get_input()', 't': t}`. +# (Note that for `i`, since we want it to take the Python input value, we pass '_get_input()' string as value +# and the C++ parity test mechanism will populate `i` with the Python input value correctly.) +# +# There are also a few optional flags in the test dict to control the C++ parity test behavior: +# +# - `test_cpp_api_parity`: if `False`, skips the C++ parity test for this test dict. Default: True. +# - `has_parity`: if `False`, expects this test dict to fail the C++ parity test. Default: True. + + +module_tests = [ + dict( + module_name='Linear', + constructor_args=(10, 8), + cpp_constructor_args='torch::nn::LinearOptions(10, 8)', + input_size=(4, 10), + reference_fn=lambda i, p, _: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Linear', + constructor_args=(10, 8, False), + cpp_constructor_args='torch::nn::LinearOptions(10, 8).bias(false)', + input_size=(4, 10), + desc='no_bias', + reference_fn=lambda i, p, _: torch.mm(i, p[0].t()), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='RReLU', + input_size=(1, 2, 2), + test_cuda=False, + default_dtype=torch.double, + ), + dict( + module_name='RReLU', + constructor_args=(0.1, 0.9), + cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)', + input_size=(4, 4, 5), + desc='with_up_down', + test_cuda=False, + default_dtype=torch.double, + ), + dict( + module_name='Flatten', + input_size=(2, 3, 4, 5), + reference_fn=lambda i, *_: torch.flatten(i, 1), + default_dtype=torch.double, + ), + # TODO: reference function + dict( + module_name='CrossMapLRN2d', + constructor_args=(5, 5e-3, 1e-3, 2), + cpp_constructor_args='torch::nn::CrossMapLRN2dOptions(5).alpha(5e-3).beta(1e-3).k(2)', + input_size=(2, 3, 6, 6), + check_gradgrad=False, + # TODO(#50743): Figure out the error. "RuntimeError: Unrecognized tensor type ID: Batched" + check_batched_grad=False, + default_dtype=torch.double, + ), +] + + +# Generates rand tensor with non-equal values. This ensures that duplicate +# values won't be causing test failure for modules like MaxPooling. +# size should be small, otherwise randperm fails / long overflows. +def _rand_tensor_non_equal(*size): + total = reduce(mul, size, 1) + return torch.randperm(total).view(*size).double() + + +def wrap_functional(fn, **kwargs): + class FunctionalModule(nn.Module): + def forward(self, *args): + return fn(*args, **kwargs) + return FunctionalModule + + +def poissonnllloss_no_reduce_test(): + t = torch.randn(10, 10) + return dict( + fullname='PoissonNLLLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::poisson_nll_loss(' + 'i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(10, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: i.exp() - t.mul(i), + pickle=False, + default_dtype=torch.double) + + +def bceloss_no_reduce_test(): + t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) + return dict( + fullname='BCELoss_no_reduce', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')), + cpp_function_call='F::binary_cross_entropy(' + 'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()), + pickle=False, + precision=7e-4, + default_dtype=torch.double) + + +def bceloss_no_reduce_scalar_test(): + t = torch.randn(()).gt(0).to(torch.double) + return dict( + fullname='BCELoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')), + cpp_function_call='F::binary_cross_entropy(' + 'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()), + pickle=False, + default_dtype=torch.double) + + +def bceloss_weights_no_reduce_test(): + t = Variable(torch.randn(15, 10, dtype=torch.double).gt(0).to(torch.double)) + weights = torch.rand(10, dtype=torch.double) + return dict( + fullname='BCELoss_weights_no_reduce', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), + weight=weights.type_as(i), reduction='none')), + cpp_function_call='F::binary_cross_entropy(' + 'i, t.to(i.options()), ' + 'F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, + pickle=False, + precision=3e-4, + default_dtype=torch.double, + ) + + +def bceloss_weights_no_reduce_scalar_test(): + t = torch.randn(()).gt(0).to(torch.double) + weights = torch.rand((), dtype=torch.double) + return dict( + fullname='BCELoss_weights_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), + weight=weights.type_as(i), reduction='none')), + cpp_function_call='''F::binary_cross_entropy( + i, t.to(i.options()), + F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), + reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, + pickle=False, + default_dtype=torch.double, + ) + + +def bce_with_logistic_legacy_enum_test(): + t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) + sigmoid = nn.Sigmoid() + return dict( + fullname='BCEWithLogitsLoss_legacy_enum', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduce=False)), + cpp_function_call='''F::binary_cross_entropy_with_logits( + i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double, + ) + + +def bce_with_logistic_no_reduce_test(): + t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) + sigmoid = nn.Sigmoid() + return dict( + fullname='BCEWithLogitsLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::binary_cross_entropy_with_logits( + i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double, + ) + + +def bce_with_logistic_no_reduce_scalar_test(): + t = torch.randn(()).gt(0).to(torch.double) + sigmoid = nn.Sigmoid() + return dict( + fullname='BCEWithLogitsLoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::binary_cross_entropy_with_logits( + i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double, + ) + + +def kldivloss_with_target_no_reduce_test(): + t = torch.rand(10, 10, dtype=torch.double) + return dict( + fullname='KLDivLoss_with_target_no_reduce', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none')), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def kldivloss_no_reduce_test(): + t = torch.rand(10, 10, dtype=torch.double) + return dict( + fullname='KLDivLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none')), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double, + ) + + +def kldivloss_no_reduce_scalar_test(): + t = torch.rand((), dtype=torch.double) + return dict( + fullname='KLDivLoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none')), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(()).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def kldivloss_with_log_target_no_reduce_test(): + t = torch.rand(10, 10, dtype=torch.double).log() + return dict( + fullname='KLDivLoss_with_log_target_no_reduce', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def kldivloss_no_reduce_log_target_test(): + t = torch.rand(10, 10, dtype=torch.double).log() + return dict( + fullname='KLDivLoss_no_reduce_log_target', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double, + ) + + +def kldivloss_no_reduce_scalar_log_target_test(): + t = torch.rand((), dtype=torch.double).log() + return dict( + fullname='KLDivLoss_no_reduce_scalar_log_target', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', + input_fn=lambda: torch.rand(()).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def l1loss_no_reduce_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='L1Loss_no_reduce', + constructor=wrap_functional( + lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def l1loss_no_reduce_complex_test(): + t = torch.randn(2, 3, 4, dtype=torch.cdouble) + return dict( + fullname='L1Loss_no_reduce_complex', + constructor=wrap_functional( + lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.randn(2, 3, 4, dtype=torch.cdouble), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), + supports_forward_ad=True, + pickle=False) + + +def l1loss_no_reduce_scalar_test(): + t = torch.randn((), dtype=torch.double) + return dict( + fullname='L1Loss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.randn(()), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def mseloss_no_reduce_test(): + input_size = (2, 3, 4, 5) + target = torch.randn(*input_size, dtype=torch.double) + return dict( + fullname='MSELoss_no_reduce', + constructor=wrap_functional( + lambda i: F.mse_loss(i, target.type_as(i), reduction='none')), + cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))', + input_size=input_size, + cpp_var_map={'i': '_get_input()', 'target': target}, + reference_fn=lambda i, *_: (i - target).pow(2), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def mseloss_no_reduce_scalar_test(): + input_size = () + target = torch.randn(input_size, dtype=torch.double) + return dict( + fullname='MSELoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.mse_loss(i, target.type_as(i), reduction='none')), + cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))', + input_size=input_size, + cpp_var_map={'i': '_get_input()', 'target': target}, + reference_fn=lambda i, *_: (i - target).pow(2), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + kwargs = {'reduction': 'none'} + return dict( + fullname='NLLLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_ignore_index_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + kwargs: Dict[str, Union[int, str]] = {'ignore_index': 2, 'reduction': 'none'} + return dict( + fullname='NLLLoss_no_reduce_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), + reduction=str(kwargs['reduction']))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(2).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_weights_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + weight = torch.rand(10) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none'} + + return dict( + fullname='NLLLoss_no_reduce_weights', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_weights_ignore_index_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + weight = torch.rand(10) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none', + 'ignore_index': 2} + + return dict( + fullname='NLLLoss_no_reduce_weights_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(2))''', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_weights_ignore_index_neg_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + weight = torch.rand(10) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none', + 'ignore_index': -1} + + return dict( + fullname='NLLLoss_no_reduce_weights_ignore_index_neg', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(-1))''', + input=torch.rand(15, 10, dtype=torch.double).add(1e-2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nllloss2d_no_reduce_test(): + t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) + kwargs = {'reduction': 'none'} + return dict( + fullname='NLLLoss2d_no_reduce', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss2d_no_reduce_ignore_index_test(): + t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) + kwargs: Dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'} + return dict( + fullname='NLLLoss2d_no_reduce_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), + reduction=str(kwargs['reduction']))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss2d_no_reduce_weights_test(): + t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) + weight = torch.rand(3) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none'} + + return dict( + fullname='NLLLoss2d_no_reduce_weights', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nlllossNd_no_reduce_test(): + t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) + kwargs = {'reduction': 'none'} + return dict( + fullname='NLLLossNd_no_reduce', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nlllossNd_no_reduce_ignore_index_test(): + t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) + kwargs: Dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'} + return dict( + fullname='NLLLossNd_no_reduce_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), + reduction=str(kwargs['reduction']))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nlllossNd_no_reduce_weights_test(): + t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) + weight = torch.rand(3) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none'} + + return dict( + fullname='NLLLossNd_no_reduce_weights', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_no_reduce_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='SmoothL1Loss_no_reduce', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_no_reduce_scalar_test(): + t = torch.randn((), dtype=torch.double) + return dict( + fullname='SmoothL1Loss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(()), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_beta_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='SmoothL1Loss_beta', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0.5)), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0.5)''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0.5), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_zero_beta_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='SmoothL1Loss_zero_beta', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0)), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0)''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def huberloss_delta_test(): + t = torch.randn(2, 3, 4) + return dict( + fullname='HuberLoss_delta', + constructor=wrap_functional( + lambda i: F.huber_loss(i, t.type_as(i), reduction='none', delta=0.5)), + cpp_function_call='''F::huber_loss( + i, t.to(i.options()), F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5))''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['HuberLoss'](i, t.type_as(i), reduction='none', delta=0.5), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def multilabelmarginloss_0d_no_reduce_test(): + t = torch.zeros(()).long() + return dict( + fullname='MultiLabelMarginLoss_0d_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(()), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False) + + +def multilabelmarginloss_1d_no_reduce_test(): + t = Variable(torch.rand(10).mul(10).floor().long()) + return dict( + fullname='MultiLabelMarginLoss_1d_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multilabelmarginloss_index_neg_test(): + t = Variable(torch.clamp(torch.rand(5, 10).add(-.5).mul(20).floor().long(), min=-1)) + return dict( + fullname='MultiLabelMarginLoss_index_neg', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multilabelmarginloss_no_reduce_test(): + t = Variable(torch.rand(5, 10).mul(10).floor().long()) + return dict( + fullname='MultiLabelMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def hingeembeddingloss_no_reduce_test(): + t = Variable(torch.randn(10).gt(0).to(torch.double).mul_(2).sub(1)) + return dict( + fullname='HingeEmbeddingLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::hinge_embedding_loss( + i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), reduction='none'), + check_sum_reduction=True, + pickle=False, + default_dtype=torch.double) + + +def hingeembeddingloss_margin_no_reduce_test(): + t = Variable(torch.randn(10).gt(0).to(torch.double).mul_(2).sub(1)) + return dict( + fullname='HingeEmbeddingLoss_margin_no_reduce', + constructor=wrap_functional( + lambda i: F.hinge_embedding_loss(i, t.type_as(i), margin=0.5, reduction='none')), + cpp_function_call='''F::hinge_embedding_loss( + i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().margin(0.5).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), margin=0.5, reduction='none'), + check_sum_reduction=True, + pickle=False, + default_dtype=torch.double) + + +def softmarginloss_no_reduce_test(): + t = torch.randn(5, 5, dtype=torch.double) + return dict( + fullname='SoftMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.soft_margin_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::soft_margin_loss( + i, t.to(i.options()), F::SoftMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 5), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def multilabelsoftmarginloss_no_reduce_test(): + t = torch.rand(5, 10).mul(2).floor() + return dict( + fullname='MultiLabelSoftMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::multilabel_soft_margin_loss( + i, t.to(i.options()), F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())).sum(dim=1) / i.size(1), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multilabelsoftmarginloss_weights_no_reduce_test(): + t = torch.rand(5, 10).mul(2).floor() + weights = torch.rand(10) + return dict( + fullname='MultiLabelSoftMarginLoss_weights_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), + weight=weights.type_as(i), reduction='none')), + cpp_function_call='''F::multilabel_soft_margin_loss( + i, t.to(i.options()), + F::MultilabelSoftMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + reference_fn=lambda i, *_: + (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights).sum(dim=1) / i.size(1), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_1d_no_reduce_test(): + t = torch.rand(1).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_1d_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_1d_input_0d_target_no_reduce_test(): + t = torch.rand(()).mul(8).floor().long() + return dict( + fullname='multimarginloss_1d_input_0d_target_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_p_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_p_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().p(2).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), p=2, reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_margin_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_margin_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), margin=0.5, reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), + F::MultiMarginLossFuncOptions().margin(0.5).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), + margin=0.5, reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_weights_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + weights = torch.rand(10, dtype=torch.double) + return dict( + fullname='MultiMarginLoss_weights_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), weight=weights.type_as(i), + reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), + F::MultiMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), + weight=weights, reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def single_batch_reference_fn(input, parameters, module): + """Reference function for modules supporting no batch dimensions. + + The module is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + def unsqueeze_inp(inp): + if isinstance(inp, (list, tuple)): + return [t.unsqueeze(0) for t in inp] + return inp.unsqueeze(0) + + single_batch_input = unsqueeze_inp(input) + single_batch_input = [single_batch_input] if isinstance(single_batch_input, torch.Tensor) else single_batch_input + with freeze_rng_state(): + return module(*single_batch_input).squeeze(0) + + +new_module_tests = [ + poissonnllloss_no_reduce_test(), + bceloss_no_reduce_test(), + bceloss_weights_no_reduce_test(), + bce_with_logistic_legacy_enum_test(), + bce_with_logistic_no_reduce_test(), + bceloss_no_reduce_scalar_test(), + bceloss_weights_no_reduce_scalar_test(), + bce_with_logistic_no_reduce_scalar_test(), + kldivloss_with_target_no_reduce_test(), + kldivloss_no_reduce_test(), + kldivloss_no_reduce_scalar_test(), + kldivloss_with_log_target_no_reduce_test(), + kldivloss_no_reduce_log_target_test(), + kldivloss_no_reduce_scalar_log_target_test(), + l1loss_no_reduce_test(), + l1loss_no_reduce_complex_test(), + l1loss_no_reduce_scalar_test(), + mseloss_no_reduce_test(), + mseloss_no_reduce_scalar_test(), + nllloss_no_reduce_test(), + nllloss_no_reduce_ignore_index_test(), + nllloss_no_reduce_weights_test(), + nllloss_no_reduce_weights_ignore_index_test(), + nllloss_no_reduce_weights_ignore_index_neg_test(), + nllloss2d_no_reduce_test(), + nllloss2d_no_reduce_weights_test(), + nllloss2d_no_reduce_ignore_index_test(), + nlllossNd_no_reduce_test(), + nlllossNd_no_reduce_weights_test(), + nlllossNd_no_reduce_ignore_index_test(), + smoothl1loss_no_reduce_test(), + smoothl1loss_no_reduce_scalar_test(), + smoothl1loss_beta_test(), + smoothl1loss_zero_beta_test(), + huberloss_delta_test(), + multilabelmarginloss_0d_no_reduce_test(), + multilabelmarginloss_1d_no_reduce_test(), + multilabelmarginloss_index_neg_test(), + multilabelmarginloss_no_reduce_test(), + hingeembeddingloss_no_reduce_test(), + hingeembeddingloss_margin_no_reduce_test(), + softmarginloss_no_reduce_test(), + multilabelsoftmarginloss_no_reduce_test(), + multilabelsoftmarginloss_weights_no_reduce_test(), + multimarginloss_no_reduce_test(), + multimarginloss_1d_no_reduce_test(), + multimarginloss_1d_input_0d_target_no_reduce_test(), + multimarginloss_p_no_reduce_test(), + multimarginloss_margin_no_reduce_test(), + multimarginloss_weights_no_reduce_test(), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3, 2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(2)', + input_size=(2, 4, 10), + cudnn=True, + desc='stride', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3, 1, 1), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(1).padding(1)', + input_size=(2, 4, 10), + cudnn=True, + desc='pad1', + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 5, 1, 2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 5).stride(1).padding(2)', + input_size=(2, 4, 10), + cudnn=True, + desc='pad2', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 4, 3, 1, 1), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 3).stride(1).padding(1)', + input_size=(1, 4, 1), + cudnn=True, + desc='pad1size1', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 4, 5, 1, 2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 5).stride(1).padding(2)', + input_size=(1, 4, 1), + cudnn=True, + desc='pad2size1', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)', + input_size=(0, 4, 10), + cudnn=True, + desc='zero_batch', + with_tf32=True, + tf32_precision=0.005, + ), + dict( + fullname='Conv1d_dilated', + constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).dilation(2)', + input_size=(2, 4, 10), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_groups', + constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 6, 3).groups(2)', + input_size=(2, 4, 6), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_valid', + constructor=lambda: nn.Conv1d(4, 5, 3, padding="valid"), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kValid)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_same', + constructor=lambda: nn.Conv1d(4, 5, 3, padding="same"), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_same2', + constructor=lambda: nn.Conv1d(4, 5, 4, padding="same"), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 4).padding(torch::kSame)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_same_dilated', + constructor=lambda: nn.Conv1d(4, 5, 4, padding="same", dilation=2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame).dilation(2)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='ConvTranspose1d', + constructor=lambda: nn.ConvTranspose1d(3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)), + cpp_constructor_args='torch::nn::ConvTranspose1dOptions(3, 4, 3).stride(3).padding(1).output_padding(1)', + cudnn=True, + input_size=(1, 3, 7), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose1d', + constructor_args=(3, 4, 3, 2, 1, 1, 1, False), + cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3) + .stride(2).padding(1).output_padding(1).groups(1).bias(false)''', + input_size=(1, 3, 6), + cudnn=True, + desc='no_bias', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose1d', + constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2), + cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3) + .stride(2).padding(1).output_padding(1).groups(1).bias(true).dilation(2)''', + input_size=(1, 3, 6), + cudnn=True, + desc='dilated', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='ConvTranspose1d_groups', + constructor=lambda: nn.ConvTranspose1d(4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2), + cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(4, 6, 3) + .stride(3).padding(1).output_padding(1).groups(2)''', + cudnn=True, + input_size=(2, 4, 7), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})', + input_size=(2, 3, 7, 5), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 3), (2, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2})', + input_size=(2, 3, 6, 6), + cudnn=True, + desc='strided', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2}).padding({1, 1})', + input_size=(2, 3, 6, 6), + cudnn=True, + desc='padding', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 2, {3, 3}).stride({2, 2}).padding({1, 1}).dilation({2, 2})', + input_size=(2, 3, 8, 8), + cudnn=True, + desc='dilated', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False), + cpp_constructor_args='''torch::nn::Conv2dOptions(3, 4, {3, 2}) + .stride(1).padding(0).dilation(1).groups(1).bias(false)''', + input_size=(2, 3, 6, 5), + cudnn=True, + desc='no_bias', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.015, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})', + input_size=(0, 3, 7, 5), + cudnn=True, + desc='zero_batch', + check_with_long_tensor=True, + with_tf32=True, + ), + dict( + fullname='Conv2d_groups', + constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)', + input_size=(2, 4, 6, 5), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.015, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_groups_thnn', + constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)', + input_size=(2, 4, 6, 5), + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.015, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_pad_valid', + constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="valid"), + cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kValid)', + input_size=(2, 2, 6, 5), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_pad_same', + constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same"), + cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame)', + input_size=(2, 2, 6, 5), + cudnn=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_pad_same_dilated', + constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same", dilation=2), + cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame).dilation(2)', + input_size=(2, 2, 6, 5), + cudnn=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose2d', + constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)), + cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) + .stride({3, 2}).padding(1).output_padding({1, 1})''', + cudnn=True, + input_size=(1, 3, 7, 6), + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose2d', + constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)), + cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) + .stride({2, 3}) + .padding(1) + .output_padding({1, 1}) + .groups(1) + .bias(false) + .dilation({2, 2})''', + input_size=(1, 3, 6, 7), + cudnn=True, + desc='dilated', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose2d', + constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False), + cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) + .stride({2, 3}).padding(1).output_padding({1, 1}).groups(1).bias(false)''', + input_size=(1, 3, 6, 7), + cudnn=True, + desc='no_bias', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + fullname='ConvTranspose2d_groups', + constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2), + cpp_constructor_args='torch::nn::ConvTranspose2dOptions(2, 4, {2, 3}).groups(2)', + input_size=(1, 2, 4, 5), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise', + constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_with_multiplier', + constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 8, {3, 3}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_strided', + constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).stride({2, 2}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_padded', + constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).padding({1, 1}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_dilated', + constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {2, 2}).dilation({2, 2}).groups(4)', + input_size=(2, 4, 5, 5), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(2, 3, (2, 3, 2)), + cpp_constructor_args='torch::nn::Conv3dOptions(2, 3, {2, 3, 2})', + input_size=(1, 2, 4, 5, 4), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(2, 3, (2, 3, 4), 1, 0, 1, 1, False), + cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4}) + .stride(1).padding(0).dilation(1).groups(1).bias(false)''', + input_size=(1, 2, 3, 4, 5), + cudnn=True, + desc='no_bias', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(2, 3, (1, 1, 1), 1, 0, 1, 1, False), + cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4}) + .stride(1).padding(0).dilation(1).groups(1).bias(false)''', + input_size=(1, 2, 3, 4, 5), + cudnn=True, + desc='1x1x1_no_bias', + check_with_long_tensor=False, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(3, 4, 2, 2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2)', + input_size=(2, 3, 5, 5, 5), + cudnn=True, + desc='stride', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(3, 4, 2, 2, 1), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2).padding(1)', + input_size=(2, 3, 5, 5, 5), + cudnn=True, + desc='stride_padding', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(3, 4, (2, 3, 4)), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4})', + input_size=(0, 3, 3, 4, 5), + cudnn=True, + check_with_long_tensor=True, + desc='zero_batch', + with_tf32=True, + ), + dict( + fullname='Conv3d_groups', + constructor=lambda: nn.Conv3d(2, 4, kernel_size=3, groups=2), + cpp_constructor_args='torch::nn::Conv3dOptions(2, 4, 3).groups(2)', + input_size=(1, 2, 4, 5, 4), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_dilated', + constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2)', + input_size=(2, 3, 5, 5, 5), + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_dilated_strided', + constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2).stride(2)', + input_size=(2, 3, 5, 5, 5), + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_pad_valid', + constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="valid"), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kValid)', + input_size=(2, 3, 6, 5, 4), + cudnn=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_pad_same', + constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same"), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame)', + input_size=(2, 3, 6, 5, 4), + cudnn=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_pad_same_dilated', + constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same", dilation=2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame).dilation(2)', + input_size=(2, 3, 6, 5, 4), + cudnn=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose3d', + constructor_args=(2, 3, (2, 3, 2)), + cpp_constructor_args='torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})', + cudnn=True, + input_size=(1, 2, 4, 5, 4), + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose3d', + constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)), + cpp_constructor_args='''torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2}) + .stride(1).padding(0).output_padding(0).groups(1).bias(true).dilation({2, 2, 2})''', + cudnn=True, + input_size=(1, 2, 4, 5, 4), + desc='dilated', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='ReplicationPad3d', + constructor_args=((1, 2, 3, 3, 2, 1),), + cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', + input_size=(2, 3, 2, 2, 2), + default_dtype=torch.double, + ), + dict( + module_name='ReplicationPad3d', + constructor_args=((1, 2, 3, 3, 2, 1),), + cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', + input_size=(3, 2, 2, 2), + reference_fn=single_batch_reference_fn, + desc='no_batch_dim', + default_dtype=torch.double, + ), + dict( + module_name='ReplicationPad3d', + constructor_args=((1, 2, 3, 3, 2, 1),), + cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', + input_fn=lambda: torch.rand(2, 3, 2, 2, 2, dtype=torch.complex128, requires_grad=True), + skip_half=True, + desc='complex' + ), + dict( + module_name='Embedding', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + default_dtype=torch.double, + decorator=skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/117971") + ), + dict( + module_name='Embedding', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)', + input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512), + check_gradgrad=False, + desc='discontiguous', + default_dtype=torch.double, + decorator=skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/117971") + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + desc='mean', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)', + input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512), + check_gradgrad=False, + desc='discontiguous', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3, None, 2., False, 'sum'), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum)''', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + desc='sum', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3, None, 2., False, 'max'), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax)''', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + desc='max', + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_mean_padding_idx', + constructor=lambda: nn.EmbeddingBag(4, 3, padding_idx=1), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).padding_idx(1)', + input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_sum_padding_idx', + constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'sum', padding_idx=1), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum).padding_idx(1)''', + input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_max_padding_idx', + constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'max', padding_idx=1), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax).padding_idx(1)''', + input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_sparse', + constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True, dtype=torch.double), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).sparse(true)._weight(torch::rand({4, 3}).to(torch::kFloat64))', + input_fn=lambda: torch.randperm(2).repeat(1, 2), + check_gradgrad=False, + has_sparse_gradients=True, + ), + dict( + constructor=lambda: nn.Embedding(4, 3, dtype=torch.double, sparse=True), + cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3).sparse(true)._weight(torch::rand({4, 3}).to(torch::kFloat64))', + input_fn=lambda: torch.randperm(2).repeat(1, 2), + fullname='Embedding_sparse', + check_gradgrad=False, + has_sparse_gradients=True, + ), + dict( + module_name='PixelShuffle', + constructor_args=(3,), + cpp_constructor_args='torch::nn::PixelShuffleOptions(3)', + input_size=(1, 9, 4, 4), + default_dtype=torch.double, + ), + dict( + module_name='PixelUnshuffle', + constructor_args=(3,), + cpp_constructor_args='torch::nn::PixelUnshuffleOptions(3)', + input_size=(1, 1, 12, 12), + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''', + input_size=(1, 2, 4), + fullname='interpolate_nearest_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''', + input_size=(0, 2, 4), + fullname='interpolate_nearest_1d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(12, ), scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''', + input_size=(1, 2, 3), + fullname='interpolate_nearest_tuple_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt).scale_factor(std::vector({4.})).mode(torch::kNearest)''', + input_size=(1, 2, 4), + fullname='interpolate_nearest_scale_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, ), scale_factor=None, mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(1, 2, 3), + fullname='interpolate_linear_tuple_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4.})) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_scale_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(0, 2, 4), + fullname='interpolate_linear_1d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(true)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_1d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4.})) + .mode(torch::kLinear) + .align_corners(true)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_scale_1d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=2, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({2, 2})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 128, 1, 1), + fullname='interpolate_nearest_2d_launch_configs', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_nearest_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(12, 16), scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 16})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 3, 4), + fullname='interpolate_nearest_tuple_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4.})) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_nearest_scale_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(0, 2, 4, 4), + fullname='interpolate_nearest_2d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(0, 2, 4, 4), + fullname='interpolate_bilinear_2d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 2, 3), + fullname='interpolate_bilinear_tuple_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4.})) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.), + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 2.})) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_tuple_shared_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_tuple_skewed_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_tuple_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBilinear) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_tuple_skewed_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(0, 2, 4, 4), + fullname='interpolate_bicubic_2d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, + mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 2, 3), + fullname='interpolate_bicubic_tuple_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4.})) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.), + mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 2.})) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_tuple_shared_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_tuple_skewed_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bicubic', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_tuple_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bicubic', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBicubic) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_tuple_skewed_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4, 4), + fullname='interpolate_nearest_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(0, 2, 4, 4, 4), + fullname='interpolate_nearest_3d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(12, 16, 16), scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 16, 16})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 3, 4, 4), + fullname='interpolate_nearest_tuple_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4., 4.})) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4, 4), + fullname='interpolate_nearest_scale_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4, 4), + fullname='interpolate_trilinear_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(0, 2, 4, 4, 4), + fullname='interpolate_trilinear_3d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6, 6), + scale_factor=None, mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(1, 2, 2, 3, 3), + fullname='interpolate_trilinear_tuple_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({3., 3., 3.})) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(1, 2, 3, 4, 5), + fullname='interpolate_trilinear_scale_3d', + # See https://github.com/pytorch/pytorch/issues/5006 + precision=3e-4, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6, 6), scale_factor=None, + mode='trilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(true)''', + input_size=(1, 2, 2, 3, 3), + fullname='interpolate_trilinear_tuple_3d_align_corners', + pickle=False, + default_dtype=torch.double + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({3., 3., 3.})) + .mode(torch::kTrilinear) + .align_corners(true)''', + input_size=(1, 2, 3, 4, 4), + fullname='interpolate_trilinear_scale_3d_align_corners', + # See https://github.com/pytorch/pytorch/issues/5006 + precision=3e-4, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=-1), + cpp_options_args='F::SoftmaxFuncOptions(-1)', + input_size=(2, 128), # trigger the last-dim algo in CUDA + fullname='softmax_lastdim', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64), + cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)', + input_size=(2, 128), + fullname='softmax_lastdim_dtype', + pickle=False, + test_cuda=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1), + cpp_options_args='F::SoftmaxFuncOptions(1)', + input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo + fullname='softmax_spatial_special', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1), + cpp_options_args='F::SoftmaxFuncOptions(1)', + input_size=(2, 2, 4, 4), # regular spatial algorithm + fullname='softmax_spatial', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64), + cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)', + input_size=(2, 2, 4, 4), # regular spatial algorithm + fullname='softmax_spatial_dtype', + pickle=False, + test_cuda=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=0), + cpp_options_args='F::SoftmaxFuncOptions(0)', + input_size=(2, 3, 4, 5), + fullname='softmax_functional_dim0', + test_cuda=False, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=3), + cpp_options_args='F::SoftmaxFuncOptions(3)', + input_size=(2, 3, 4, 5), + fullname='softmax_functional_dim3', + test_cuda=False, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=-1), + cpp_options_args='F::SoftmaxFuncOptions(-1)', + input_size=(), + fullname='softmax_functional_scalar', + test_cuda=False, + pickle=False, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=-1), + cpp_options_args='F::LogSoftmaxFuncOptions(-1)', + input_size=(2, 128), # trigger the last-dim algo in CUDA + fullname='log_softmax_lastdim', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=1), + cpp_options_args='F::LogSoftmaxFuncOptions(1)', + input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo + fullname='log_softmax_spatial_special', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=1), + cpp_options_args='F::LogSoftmaxFuncOptions(1)', + input_size=(2, 2, 4, 4), # regular spatial algorithm + fullname='log_softmax_spatial', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=0), + cpp_options_args='F::LogSoftmaxFuncOptions(0)', + input_size=(2, 3, 4, 5), + fullname='log_softmax_dim0', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=3), + cpp_options_args='F::LogSoftmaxFuncOptions(3)', + input_size=(2, 3, 4, 5), + fullname='log_softmax_dim3', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=0), + cpp_options_args='F::LogSoftmaxFuncOptions(0)', + input_size=(), + fullname='log_softmax_scalar', + pickle=False, + ), + dict( + fullname='Unfold', + constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)), + cpp_constructor_args='torch::nn::UnfoldOptions({2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', + input_size=(2, 4, 3, 3), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold', + constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)), + cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', + input_size=(2, 16, 4), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold_no_batch_dim_input', + constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)), + cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', + input_size=(16, 4), + check_gradgrad=False, + ref=single_batch_reference_fn, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Unfold_int_input', + constructor=lambda: nn.Unfold(2, 1, 0, 1), + cpp_constructor_args='torch::nn::UnfoldOptions(2).dilation(1).padding(0).stride(1)', + input_size=(2, 4, 3, 3), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold_int_input', + constructor=lambda: nn.Fold(3, 2, 1, 0, 1), + cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)', + input_size=(2, 16, 4), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold_no_batch_dim_int_input', + constructor=lambda: nn.Fold(3, 2, 1, 0, 1), + cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)', + input_size=(16, 4), + ref=single_batch_reference_fn, + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + module_name='RReLU', + constructor_args=(0.1, 0.9), + cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)', + input_size=(), + desc='with_up_down_scalar', + test_cuda=False, + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)), + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(10, 1), torch.randn(10, 8)), + desc='broadcast_lhs', + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(10, 8), torch.randn(1, 8)), + desc='broadcast_rhs', + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + constructor_args=(1.5, 1e-05, True), + cpp_constructor_args='torch::nn::PairwiseDistanceOptions().p(1.5).eps(1e-05).keepdim(true)', + input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)), + desc='with_non_default_args', + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(8), torch.randn(8)), + reference_fn=single_batch_reference_fn, + desc='no_batch_dim', + default_dtype=torch.double, + ), + dict( + module_name='TransformerEncoderLayer', + constructor_args=(4, 2, 16, 0.0), + cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2) + .dim_feedforward(16) + .dropout(0.0)''', + input_size=(2, 3, 4), + desc='relu_activation', + with_tf32=True, + tf32_precision=0.1, + # TODO(#50743): figure out the error + # RuntimeError: The size of tensor a (6) must match the size of tensor b (4) + # at non-singleton dimension 2 + check_batched_grad=False, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='TransformerEncoderLayer', + constructor_args=(4, 2, 8, 0.0, F.gelu), + cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2) + .dim_feedforward(8) + .dropout(0.0) + .activation(torch::kGELU)''', + input_size=(2, 3, 4), + check_gradgrad=False, + desc='gelu_activation', + with_tf32=True, + tf32_precision=0.08 if SM90OrLater else 0.05, + default_dtype=torch.double, + ), + dict( + module_name='TransformerDecoderLayer', + constructor_args=(4, 2, 8, 0.0), + cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2) + .dim_feedforward(8) + .dropout(0.0)''', + input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)), + check_gradgrad=False, + desc='relu_activation', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='TransformerDecoderLayer', + constructor_args=(4, 2, 8, 0.0, F.gelu), + cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2) + .dim_feedforward(8) + .dropout(0.0) + .activation(torch::kGELU)''', + input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)), + check_gradgrad=False, + desc='gelu_activation', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Transformer', + constructor_args=(4, 2, 2, 2, 8, 0.0, F.relu), + cpp_constructor_args='''torch::nn::TransformerOptions() + .d_model(4) + .nhead(2) + .num_encoder_layers(2) + .num_decoder_layers(2) + .dim_feedforward(8) + .dropout(0.0) + .activation(torch::kReLU)''', + input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4), torch.rand(3, 3)), + check_gradgrad=False, + desc='multilayer_coder', + with_tf32=True, + tf32_precision=0.05 if SM90OrLater else 0.03, + default_dtype=torch.double, + ), + dict( + module_name='Linear', + constructor_args=(3, 5), + cpp_constructor_args='torch::nn::LinearOptions(3, 5)', + input_fn=lambda: torch.rand(3), + reference_fn=lambda i, p, _: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1], + desc="no_batch_dim", + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Flatten', + cpp_constructor_args='torch::nn::FlattenOptions().start_dim(-3).end_dim(-1)', + constructor_args=(-3, -1), + input_size=(3, 4, 5), + reference_fn=single_batch_reference_fn, + desc="no_batch_dim", + default_dtype=torch.double, + ), + dict( + module_name='Unflatten', + cpp_constructor_args='torch::nn::UnflattenOptions(-2, {2, 2})', + constructor_args=(-2, torch.Size([2, 2])), + input_size=(3, 4, 5), + reference_fn=single_batch_reference_fn, + desc="no_batch_dim", + default_dtype=torch.double, + ), + dict( + module_name='LayerNorm', + constructor_args=([56, 56, 56], 1e-5, False), + cpp_constructor_args='torch::nn::LayerNormOptions({56, 56, 56}).eps(1e-5).elementwise_affine(false)', + input_size=(4, 56, 56, 56), + cudnn=True, + check_eval=True, + gradcheck_fast_mode=True, + check_half=True, + desc='3d_no_affine_large_feature', + ), +] + +# add conv padding mode tests: +for padding_mode, cpp_padding_mode in zip( + ['reflect', 'circular', 'replicate', 'zeros'], + ['torch::kReflect', 'torch::kCircular', 'torch::kReplicate', 'torch::kZeros']): + # conv signature: + # in_channels, out_channels, kernel_size, stride=1, + # padding=0, dilation=1, groups=1, + # bias=True, padding_mode='zeros' + for d in (1, 2, 3): + if d == 3 and padding_mode == 'reflect': + # FIXME: remove after implementing reflection pad 3d + # https://github.com/pytorch/pytorch/issues/27655 + continue + padding = tuple(range(1, d + 1)) + cpp_padding = '{' + ', '.join(map(str, padding)) + '}' + input_size = (2, 2) + (4,) * d + output_size = (2, 3) + tuple(p + 1 for p in padding) # simplified from `(4 + 2 * p - 3) // 2 + 1` + new_module_tests.append( + dict( + module_name=f'Conv{d}d', + constructor_args=(2, 3, 3, 2, padding, 1, 1, True, padding_mode), + cpp_constructor_args=f'''torch::nn::Conv{d}dOptions(2, 3, 3) + .stride(2) + .padding({cpp_padding}) + .dilation(1) + .groups(1) + .bias(true) + .padding_mode({cpp_padding_mode})''', + input_size=input_size, + output_size=output_size, + cudnn=True, + desc=f'{padding_mode}_stride2_pad2', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + ) + +# Check that non linear activations work with no batch dimensions +non_linear_activations_no_batch = [ + 'ELU', 'Hardshrink', 'Hardsigmoid', 'Hardtanh', 'Hardswish', 'LeakyReLU', + 'LogSigmoid', 'PReLU', 'ReLU', 'ReLU6', 'RReLU', 'SELU', 'CELU', 'GELU', 'GLU', + 'Sigmoid', 'SiLU', 'Mish', 'Softplus', 'Softshrink', 'Softsign', 'Tanh', + 'Tanhshrink', 'Threshold' +] +non_linear_activations_extra_info: Dict[str, dict] = { + 'CELU': {'constructor_args': (2.,), 'default_dtype': torch.double}, + 'Threshold': {'constructor_args': (2., 1.)}, + 'Hardsigmoid': {'check_gradgrad': False, 'check_jit': False, 'default_dtype': torch.double}, + 'Hardswish': {'check_gradgrad': False, 'check_jit': False, 'default_dtype': torch.double}, + # For RRelu, test that compare CPU and GPU results fail because RNG + # is different between CPU and GPU + 'RReLU': {'test_cuda': False, 'default_dtype': torch.double}, + 'ELU': {'default_dtype': torch.double}, + 'GELU': {'default_dtype': torch.double}, + 'GLU': {'default_dtype': torch.double}, + 'Hardshrink': {'default_dtype': torch.double}, + 'Hardtanh': {'default_dtype': torch.double}, + 'LeakyReLU': {'default_dtype': torch.double}, + 'LogSigmoid': {'default_dtype': torch.double}, + 'Mish': {'default_dtype': torch.double}, + 'PReLU': {'default_dtype': torch.double}, + 'ReLU6': {'default_dtype': torch.double}, + 'ReLU': {'default_dtype': torch.double}, + 'SELU': {'default_dtype': torch.double}, + 'SiLU': {'default_dtype': torch.double}, + 'Sigmoid': {'default_dtype': torch.double}, + 'Softplus': {'default_dtype': torch.double}, + 'Softshrink': {'default_dtype': torch.double}, + 'Softsign': {'default_dtype': torch.double}, + 'Tanh': {'default_dtype': torch.double}, + 'Tanhshrink': {'default_dtype': torch.double}, +} +for non_linear_activation in non_linear_activations_no_batch: + activation_test_info = dict( + module_name=non_linear_activation, + input_size=(4,), + reference_fn=single_batch_reference_fn, + desc='no_batch_dim', + test_cpp_api_parity=False, + ) + extra_info = non_linear_activations_extra_info.get(non_linear_activation, {}) + activation_test_info.update(extra_info) + new_module_tests.append(activation_test_info) + + +def kldivloss_reference(input, target, reduction='mean', log_target=False): + if log_target: + result = torch.exp(target) * (target - input) + else: + result = target * (target.log() - input) + if reduction == 'mean': + return result.mean() + elif reduction == 'sum': + return result.sum() + elif reduction == 'batchmean' and result.dim() != 0: + return result.sum() / result.size(0) + return result + + +def nlllossNd_reference(input, target, weight=None, ignore_index=-100, + reduction='mean'): + assert input.dim() >= 3 + N = input.size(0) + C = input.size(1) + out_size = (N,) + input.size()[2:] + output = torch.zeros(out_size).type_as(input) + + if weight is None: + weight = torch.ones(C).type_as(input) + total_weight = 0 + for tup in product(*[range(size) for size in out_size]): + t_nx = target[tup] + norm = 0. if ignore_index == t_nx else weight[t_nx].item() + input_index = list(tup) + input_index.insert(1, t_nx) + output[tup] = -input[tuple(input_index)] * norm + total_weight += norm + + if reduction == 'mean': + return output.sum() / total_weight + elif reduction == 'sum': + return output.sum() + return output + + +def cross_entropy_loss_prob_target_reference(input, target, weight=None, reduction='mean', + label_smoothing=0.0): + assert input.dim() >= 2 + + input = torch.log_softmax(input, 1) + C = input.size(1) + if weight is None: + weight = torch.ones(C).type_as(input) + weight = weight.view(1, C, *(1 for _ in input.shape[2:])) + + if label_smoothing > 0.0: + assert label_smoothing <= 1.0 + target = (target * (1 - label_smoothing) + label_smoothing / C) + + output = -(input * target * weight).sum(dim=1) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def cross_entropy_loss_indices_target_reference(input, target, weight=None, ignore_index=-100, + reduction='mean', label_smoothing=0.0): + log_softmax_input = torch.log_softmax(input, 1) + nllloss = F.nll_loss( + log_softmax_input, + target, + weight, + ignore_index=ignore_index, + reduction=reduction) + + if label_smoothing == 0.0: + return nllloss + + assert 0.0 < label_smoothing <= 1.0 + + input = torch.log_softmax(input, 1) + C = input.size(1) + if weight is not None: + input = input * weight.view(1, C, *(1 for _ in input.shape[2:])) + + smooth_loss = -torch.sum(input, 1) + + ignore_mask = target == ignore_index + smooth_loss.masked_fill_(ignore_mask, 0.0) + + if reduction == 'mean': + if weight is not None: + # TODO: This code can path can be removed if #61309 is resolved + # loss is normalized by the weights to be consistent with nll_loss_nd + ret = torch.sum(smooth_loss) / weight.gather(0, target.masked_select(ignore_mask.logical_not()).flatten()).sum() + else: + ret = torch.mean(smooth_loss.masked_select(ignore_mask.logical_not())) + elif reduction == 'sum': + ret = torch.sum(smooth_loss) + else: + ret = smooth_loss + + return (1 - label_smoothing) * nllloss + ret * (label_smoothing / C) + + +def cross_entropy_loss_reference(input, target, weight=None, ignore_index=-100, reduction='mean', + label_smoothing=0.0): + if input.shape == target.shape: + return cross_entropy_loss_prob_target_reference( + input, + target, + weight=weight, + reduction=reduction, + label_smoothing=label_smoothing) + else: + return cross_entropy_loss_indices_target_reference( + input, target, weight=weight, reduction=reduction, + ignore_index=ignore_index, label_smoothing=label_smoothing + ) + + +def nllloss_reference(input, target, weight=None, ignore_index=-100, + reduction='mean'): + + def nll_loss_helper(input, target, weight, ignore_index): + if target == ignore_index: + return (0, 0) + norm = 1 if weight is None else weight[target] + result = -input[target] * norm + return (result, norm) + + losses_and_weights = [nll_loss_helper(i, t, weight, ignore_index) + for i, t in zip(input, target)] + losses, weights = zip(*losses_and_weights) + losses_tensor = input.new_tensor(losses) + if reduction == 'mean': + return sum(losses_tensor) / sum(weights) + elif reduction == 'sum': + return sum(losses_tensor) + else: + return losses_tensor + + +def smoothl1loss_reference(input, target, reduction='mean', beta=1.0): + abs_diff = (input - target).abs() + ge_beta_mask = (abs_diff >= beta).type_as(abs_diff) + lt_beta_mask = (abs_diff < beta).type_as(abs_diff) + # when beta <= 0 we should just use l1_loss + if beta == 0: + output = abs_diff + else: + output = ge_beta_mask * (abs_diff - 0.5 * beta) + lt_beta_mask * 0.5 * (abs_diff ** 2) / beta + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def huberloss_reference(input, target, reduction='mean', delta=1.0): + abs_diff = (input - target).abs() + ge_delta_mask = (abs_diff >= delta) + lt_delta_mask = (abs_diff < delta) + output = ge_delta_mask * delta * (abs_diff - 0.5 * delta) + lt_delta_mask * 0.5 * (abs_diff ** 2) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def _multilabelmarginloss_reference(input, target): + targets = [] + for target_index in target: + if target_index < 0: + break + targets.append(target_index) + + sum = 0 + for target_index in targets: + for i in range(0, len(input)): + if i not in targets: + sum += max(0, 1 - input[target_index] + input[i]) + + return sum + + +def multilabelmarginloss_reference(input, target, reduction='mean'): + # make everything 2-dimensional + input_dim = input.dim() + if input.dim() < 2: + assert target.dim() < 2 + input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0) + target = target.unsqueeze(0) if target.dim() == 1 else target.unsqueeze(0).unsqueeze(0) + + n = input.size(0) + dim = input.size(1) + output = input.new(n).zero_() + for i in range(0, n): + output[i] = _multilabelmarginloss_reference(input[i], target[i]) + + if reduction == 'mean': + return output.mean() / dim + elif reduction == 'sum': + return output.sum() / dim + elif input_dim < 2: + # we know we have (1, C) X (1, C) -> (1,), so squeeze will get us + # back to correct dimensionality + return output.squeeze() / dim + else: + return output / dim + + +def hingeembeddingloss_reference(input, target, margin=1.0, reduction='mean'): + margin_clamp = (margin - input).clamp(min=0).type_as(input) + output = torch.where(target == 1, input, margin_clamp) + + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def softmarginloss_reference(input, target, reduction='mean'): + output = (1 + (-input * target).exp()).log() + + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def _multimarginloss_reference(input, target_idx, p, margin, weight): + if weight is None: + weight = input.new(len(input)).fill_(1) + + output = 0 + for i in range(0, len(input)): + if i != target_idx: + output += weight[target_idx] * (max(0, (margin - input[target_idx] + input[i])) ** p) + return output + + +def multimarginloss_reference(input, target, p=1, margin=1, weight=None, reduction='mean'): + if input.dim() < 2: + input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0) + + target_dim = target.dim() + if target.dim() == 0: + target = target.unsqueeze(0) + + n = input.size(0) + dim = input.size(1) + output = input.new(n) + for x in range(0, n): + output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight) + + if reduction == 'mean': + return output.mean() / dim + elif reduction == 'sum': + return output.sum() / dim + elif target_dim == 0: + return output.squeeze(0) / dim + return output / dim + + +def cosineembeddingloss_reference(input1, input2, target, margin=0, reduction='mean'): + def _cos(a, b): + cos = a.new(a.size(0)) + for i in range(0, a.size(0)): + cos[i] = (a[i] * b[i]).sum() / ((((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5) + return cos + + output = torch.where(target == 1, 1 - _cos(input1, input2), (_cos(input1, input2) - margin).clamp(min=0)) + + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def tripletmarginloss_reference(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, + reduction='mean'): + d_p = torch.pairwise_distance(anchor, positive, p, eps) + d_n = torch.pairwise_distance(anchor, negative, p, eps) + if swap: + d_s = torch.pairwise_distance(positive, negative, p, eps) + d_n = torch.min(d_n, d_s) + + output = torch.clamp(margin + d_p - d_n, min=0.0) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def marginrankingloss_reference(input1, input2, target, margin=0, reduction='mean'): + output = (-target * (input1 - input2) + margin).clamp(min=0) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +# this directly follows Graves et al's paper, in contrast to the production implementation, it does not use log-space +def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean'): + input_lengths = torch.as_tensor(input_lengths, dtype=torch.long) + target_lengths = torch.as_tensor(target_lengths, dtype=torch.long) + dt = log_probs.dtype + log_probs = log_probs.double() # we need the accuracy as we are not in logspace + targets = targets.long() + cum_target_lengths = target_lengths.cumsum(0) + losses = [] + for i in range(log_probs.size(1)): + input_length = input_lengths[i].item() + target_length = target_lengths[i].item() + cum_target_length = cum_target_lengths[i].item() + targets_prime = targets.new_full((2 * target_length + 1,), blank) + if targets.dim() == 2: + targets_prime[1::2] = targets[i, :target_length] + else: + targets_prime[1::2] = targets[cum_target_length - target_length:cum_target_length] + probs = log_probs[:input_length, i].exp() + alpha = log_probs.new_zeros((target_length * 2 + 1,)) + alpha[0] = probs[0, blank] + alpha[1] = probs[0, targets_prime[1]] + mask_third = (targets_prime[:-2] != targets_prime[2:]) + for t in range(1, input_length): + alpha_next = alpha.clone() + alpha_next[1:] += alpha[:-1] + alpha_next[2:] += torch.where(mask_third, alpha[:-2], alpha.new_zeros(1)) + alpha = probs[t, targets_prime] * alpha_next + losses.append(-alpha[-2:].sum().log()[None]) + output = torch.cat(losses, 0) + if reduction == 'mean': + output = (output / target_lengths.to(dtype=output.dtype, device=output.device)).mean() + elif reduction == 'sum': + output = output.sum() + output = output.to(dt) + return output + + +loss_reference_fns: Dict['str', Callable] = { + 'KLDivLoss': kldivloss_reference, + 'KLDivLoss_log_target': partial(kldivloss_reference, log_target=True), + 'NLLLoss': nllloss_reference, + 'NLLLossNd': nlllossNd_reference, + 'SmoothL1Loss': smoothl1loss_reference, + 'HuberLoss': huberloss_reference, + 'MultiLabelMarginLoss': multilabelmarginloss_reference, + 'HingeEmbeddingLoss': hingeembeddingloss_reference, + 'SoftMarginLoss': softmarginloss_reference, + 'MultiMarginLoss': multimarginloss_reference, + 'CosineEmbeddingLoss': cosineembeddingloss_reference, + 'TripletMarginLoss': tripletmarginloss_reference, + 'MarginRankingLoss': marginrankingloss_reference, + 'CTCLoss': ctcloss_reference, + 'CrossEntropyLoss': cross_entropy_loss_reference +} + + +criterion_tests = [] + + +def single_batch_reference_criterion_fn(*args): + """Reference function for criterion supporting no batch dimensions. + + The criterion is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + criterion = args[-1] + + def unsqueeze_inp(inp): + if isinstance(inp, (list, tuple)): + return [t.unsqueeze(0) for t in inp] + return inp.unsqueeze(0) + + def flatten(xs): + result = [] + if isinstance(xs, (list, tuple)): + for x in xs: + result.extend(flatten(x)) + else: + result.append(xs) + return result + + single_batch_input_args = flatten([unsqueeze_inp(input) for input in args[:-1]]) + + output = criterion(*single_batch_input_args) + reduction = get_reduction(criterion) + + if reduction == 'none': + return output.squeeze(0) + # reduction is 'sum' or 'mean' which results in a scalar + return output + + +# Check that regression criterion work with no batch dimensions +regression_criterion_no_batch = [ + 'L1Loss', 'MSELoss', 'PoissonNLLLoss', 'HuberLoss', 'SmoothL1Loss' +] +reductions = ['none', 'mean', 'sum'] +for name, reduction in product(regression_criterion_no_batch, reductions): + regression_test_info = dict( + fullname=f"{name}_no_batch_dim_{reduction}", + constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction), + input_size=(3, ), + target_size=(3, ), + reference_fn=single_batch_reference_criterion_fn, + test_cpp_api_parity=False, + default_dtype=torch.double, + ) + criterion_tests.append(regression_test_info) + + +for reduction in reductions: + regression_test_info = dict( + fullname=f"KLDivLoss_no_batch_dim_{reduction}", + constructor=lambda: nn.KLDivLoss(reduction=reduction), + input_fn=lambda: torch.rand((3,)).log(), + target_fn=lambda: torch.rand((3,)), + reference_fn=single_batch_reference_criterion_fn, + test_cpp_api_parity=False, + default_dtype=torch.double, + ) + criterion_tests.append(regression_test_info) + + +# Check that classification criterion work with no batch dimensions +# List of tuples of (name, input_fn, target_fn) +classification_criterion_no_batch = [ + ( + 'BCELoss', + lambda: torch.sigmoid(torch.randn(9, dtype=torch.double)), + lambda: torch.randn(9, dtype=torch.double).gt(0).to(torch.double) + ), + ('BCEWithLogitsLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.randn(9, dtype=torch.double)), + ('HingeEmbeddingLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.tensor([-1, 1, 1] * 3)), + ('MultiLabelMarginLoss', lambda: torch.randn(4, dtype=torch.double), lambda: torch.tensor([3, 0, -1, 1])), + ('SoftMarginLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.tensor([-1, 1, 1] * 3)), + ('NLLLoss', lambda: F.log_softmax(torch.randn(3, dtype=torch.double), dim=0), lambda: torch.tensor(1)), + ( + 'CosineEmbeddingLoss', + lambda: (torch.randn(9, dtype=torch.double), torch.randn(9, dtype=torch.double)), + lambda: torch.tensor(1, dtype=torch.double) + ), + # For MarginRankingLoss, input_fn : (x1, x2) and target_fn : target + ('MarginRankingLoss', lambda: (torch.randn(()), torch.randn(())), lambda: torch.randn(()).sign()), + # For TripletMarginLoss, input_fn : (anchor, positive) and target_fn : negative + ( + 'TripletMarginLoss', + lambda: (torch.randn(9, dtype=torch.double), torch.randn(9, dtype=torch.double)), + lambda: torch.randn(9, dtype=torch.double) + ), + ('MultiLabelSoftMarginLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.randn(9)), +] +classification_criterion_no_batch_extra_info: Dict[str, dict] = { + 'MultiLabelMarginLoss': {'check_gradgrad': False}, +} +# TODO : Fix these discrepancies +classification_cpp_parity = { + 'BCELoss': False, + 'BCEWithLogitsLoss': False, + 'HingeEmbeddingLoss': False, + 'NLLLoss': False, + 'SoftMarginLoss': False, +} +reductions = ['none', 'mean', 'sum'] +for (name, input_fn, target_fn), reduction in product(classification_criterion_no_batch, + reductions): + classification_test_info = dict( + fullname=f"{name}_no_batch_dim_{reduction}", + constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction), + input_fn=lambda f=input_fn: f(), + target_fn=lambda f=target_fn: f(), + reference_fn=single_batch_reference_criterion_fn, + test_cpp_api_parity=True, + has_parity=classification_cpp_parity.get(name, True) + ) + extra_info = classification_criterion_no_batch_extra_info.get(name, {}) + classification_test_info.update(extra_info) + criterion_tests.append(classification_test_info) + + +class NNTestCase(TestCase): + + # _forward is defined in classes inheriting from NNTestCase + @abstractmethod + def _forward(self, *args, **kwargs): + raise NotImplementedError + + @abstractmethod + def _get_parameters(self, module: nn.Module) -> Tuple[List[nn.Parameter], List[nn.Parameter]]: + raise NotImplementedError + + @abstractmethod + def _zero_grad_parameters(self, module: nn.Module) -> None: + raise NotImplementedError + + @abstractmethod + def _backward(self, module: nn.Module, + input: _TensorOrTensors, output: torch.Tensor, + grad_output: Union[torch.Tensor, Sequence[torch.Tensor]], + create_graph: bool = False): + raise NotImplementedError + + def _jacobian(self, input, num_out): + if isinstance(input, tuple): + return tuple(self._jacobian(elem, num_out) for elem in input) + elif isinstance(input, list): + return [self._jacobian(elem, num_out) for elem in input] + else: + return torch.zeros(input.nelement(), num_out) + + def _flatten_tensors(self, x): + if isinstance(x, torch.Tensor): + if x.is_sparse: + return x.to_dense().view(-1) + else: + return x.view(-1) + else: + return tuple(self._flatten_tensors(a) for a in x) + + def _zero_grad_input(self, input): + if isinstance(input, torch.Tensor): + if input.requires_grad and input.grad is not None: + input.grad.zero_() + input.grad.detach_() + else: + for i in input: + self._zero_grad_input(i) + + def _analytical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True): + output = self._forward(module, input) + output_size = output.nelement() + + if jacobian_input: + jacobian_inp = self._jacobian(input, output_size) + flat_jacobian_input = list(_iter_tensors(jacobian_inp)) + + if jacobian_parameters: + num_param = sum(p.numel() for p in self._get_parameters(module)[0]) + jacobian_param = torch.zeros(num_param, output_size) + + for i in range(output_size): + param, d_param = self._get_parameters(module) + # make non grad zeros + d_param = [torch.zeros_like(p) if d is None else d for (p, d) in zip(param, d_param)] + + d_out = torch.zeros_like(output) + flat_d_out = d_out.view(-1) + flat_d_out[i] = 1 + + if jacobian_parameters: + self._zero_grad_parameters(module) + # Tensors will accumulate gradient from multiple steps + if jacobian_input: + self._zero_grad_input(input) + d_input = self._backward(module, input, output, d_out) + + if jacobian_input: + for jacobian_x, d_x in zip(flat_jacobian_input, _iter_tensors(d_input)): + jacobian_x[:, i] = d_x.contiguous().view(-1) + if jacobian_parameters: + jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0) + + res: Tuple[torch.Tensor, ...] = tuple() + if jacobian_input: + res += jacobian_inp, + if jacobian_parameters: + res += jacobian_param, + + return res + + def _numerical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True): + def fw(*input): + return self._forward(module, input).detach() + + res: Tuple[torch.Tensor, ...] = tuple() + if jacobian_input: + res += _get_numerical_jacobian(fw, input, eps=1e-6), + if jacobian_parameters: + param, _ = self._get_parameters(module) + to_cat = [] + for p in param: + jacobian = _get_numerical_jacobian(fw, input, target=p, eps=1e-6) + # get_numerical_jacobian returns a list of tuples but we require a tensor + to_cat.append(jacobian[0][0]) + res += (torch.cat(to_cat, 0),) + return res + + def check_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True): + jacobian_parameters = bool(self._get_parameters(module)[0]) + analytical = self._analytical_jacobian(module, input, jacobian_input, jacobian_parameters) + numerical = self._numerical_jacobian(module, input, jacobian_input, jacobian_parameters) + analytical_t = list(_iter_tensors(analytical)) + numerical_t = list(_iter_tensors(numerical)) + + differences = [] + for a, n in zip(analytical_t, numerical_t): + if a.numel() != 0: + differences.append(a.add(n, alpha=-1).abs().max()) + # TODO: compare structure (ensure analytic jacobian has correct shape) + if len(differences) > 0: + self.assertLessEqual(max(differences), PRECISION) # type: ignore[type-var] + + +class TestBase: + + _required_arg_names = {'constructor_args', 'input', 'extra_args'} + + def __init__(self, constructor, desc='', reference_fn=None, fullname=None, **kwargs): + self.desc = desc + self.fullname = fullname + self.constructor = constructor + self.reference_fn = reference_fn + for name in self._required_arg_names: + if name not in kwargs and name + '_fn' not in kwargs and name + '_size' not in kwargs: + if name in {'constructor_args', 'extra_args'}: + kwargs[name] = tuple() + else: + raise ValueError("{}: Specify {} by a value, a function to generate it, or it's size!" + .format(self.get_name(), name)) + self._extra_kwargs = kwargs + self._arg_cache = {} + + def get_name(self): + if self.fullname is not None: + return 'test_' + self.fullname + + test_name = 'test_' + self.constructor.__name__ + if self.desc: + test_name += '_' + self.desc + return test_name + + def _unpack(self, value): + if isinstance(value, torch.Tensor): + return value + elif is_iterable(value): + return type(value)(self._unpack(v) for v in value) + else: + return value + + @property + def constructor_args(self): + return self._get_arg('constructor_args', True) + + @property + def extra_args(self): + return self._get_arg('extra_args', True) + + def _get_arg(self, name, unpack): + assert name in self._required_arg_names + + if name not in self._arg_cache: + fn_name = name + '_fn' + size_name = name + '_size' + + if name in self._extra_kwargs: + self._arg_cache[name] = self._extra_kwargs[name] + elif fn_name in self._extra_kwargs: + self._arg_cache[name] = self._extra_kwargs[fn_name]() + else: + assert size_name in self._extra_kwargs, \ + f"Missing `{name}`, `{size_name}` or `{fn_name}` for {self.get_name()}" + + def map_tensor_sizes(sizes): + if isinstance(sizes, list): + return [map_tensor_sizes(s) for s in sizes] + elif isinstance(sizes, torch.Tensor): + return sizes.double() + else: + return torch.randn(sizes) + + self._arg_cache[name] = map_tensor_sizes(self._extra_kwargs[size_name]) + + return self._unpack(self._arg_cache[name]) if unpack else self._arg_cache[name] + + def _get_input(self, unpack=True): + return self._get_arg('input', unpack) + + def __call__(self, test_case): + raise NotImplementedError + + +class ModuleTest(TestBase): + + @abstractmethod + def _do_test(self, test_case: Any, module: nn.Module, input: Any) -> Any: + raise NotImplementedError + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.jacobian_input = kwargs.get('jacobian_input', True) + self.should_test_cuda = kwargs.get('test_cuda', True) + self.should_test_pickle = kwargs.get('pickle', True) + self.check_gradgrad = kwargs.get('check_gradgrad', True) + self.FIXME_no_cuda_gradgrad_comparison = \ + kwargs.get('FIXME_no_cuda_gradgrad_comparison', False) + self.precision = kwargs.get('precision', 2e-4) + self.check_forward_only = kwargs.get('check_forward_only', False) + self.default_dtype = kwargs.get('default_dtype', None) + if self.default_dtype is None: + self.default_dtype = torch.get_default_dtype() + + def __call__(self, test_case): + with set_default_dtype(self.default_dtype): + module = self.constructor(*self.constructor_args) + input = self._get_input() + + if self.reference_fn is not None: + out = test_case._forward(module, input) + ref_input = deepcopy(input) + ref_module = deepcopy(module) + expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0], ref_module) + test_case.assertEqual(out, expected_out, exact_dtype=False) + if self.check_forward_only: + return + self.test_noncontig(test_case, module, input) + + if self.should_test_pickle: + # TODO: do this with in-memory files as soon as torch.save will support it + with tempfile.TemporaryFile() as f: + test_case._forward(module, input) + torch.save(module, f) + f.seek(0) + module_copy = torch.load(f) + test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input)) + + self._do_test(test_case, module, input) + + def noncontiguize(self, obj): + if isinstance(obj, list): + return [self.noncontiguize(o) for o in obj] + elif isinstance(obj, tuple): + return tuple(self.noncontiguize(o) for o in obj) + tensor = obj + ndim = tensor.dim() + # Always making only the last dimension noncontiguous is easy to hide + # bugs because .view(-1) will still work. So try to find a dim with size + # > 1 and make that non-contiguous, i.e., stack + select on the + # dimension directly after that. + dim = ndim + for d in range(ndim): + if tensor.size(d) > 1: + dim = d + 1 + break + noncontig = torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach() + assert noncontig.numel() == 1 or noncontig.numel() == 0 or not noncontig.is_contiguous() + noncontig.requires_grad = tensor.requires_grad + return noncontig + + def test_noncontig(self, test_case, module, input): + # check no scalars, can't make non-contig + if isinstance(input, torch.Tensor) and input.dim() == 0: + return + if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)): + return + + test_case._zero_grad_parameters(module) + test_case._zero_grad_input(input) + with freeze_rng_state(): + output = test_case._forward(module, input) + if getattr(module, "return_indices", False): + output = output[0] + grad_output = output.new(output.shape).normal_() + output = output.clone() + d_input = deepcopy(test_case._backward(module, input, output, grad_output)) + d_param = deepcopy(test_case._get_parameters(module)[1]) + + nc_input = self.noncontiguize(input) + nc_grad_output = self.noncontiguize(grad_output) + for contig_i, contig_g in product((True, False), repeat=2): + i = input if contig_i else nc_input + # Some ops, e.g., nn.Flatten, return gradient that shares + # storage with the grad_output. Hence we copy here. + go = deepcopy(grad_output if contig_g else nc_grad_output) + test_case._zero_grad_parameters(module) + test_case._zero_grad_input(i) + with freeze_rng_state(): + out = test_case._forward(module, i) + if getattr(module, "return_indices", False): + out = out[0] + grad = test_case._backward(module, i, out, go) + + test_case.assertEqual(out, output) + test_case.assertEqual(grad, d_input, atol=1e-4, rtol=0) + test_case.assertEqual(test_case._get_parameters(module)[1], d_param) + + def test_cuda(self, test_case): + if not TEST_CUDA or not self.should_test_cuda: + raise unittest.SkipTest('Excluded from CUDA tests') + + with set_default_dtype(self.default_dtype): + cpu_input = self._get_input() + + type_map = {torch.double: torch.float} + cpu_input_tuple = cpu_input if isinstance(cpu_input, tuple) else (cpu_input,) + + is_any_input_complex = any(isinstance(t, torch.Tensor) and t.dtype.is_complex for t in cpu_input_tuple) + + gpu_input_tuple = to_gpu(cpu_input_tuple, type_map=type_map) + + cpu_module = self.constructor(*self.constructor_args) + gpu_module = self.constructor(*self.constructor_args).float().cuda() + cpu_param = test_case._get_parameters(cpu_module) + gpu_param = test_case._get_parameters(gpu_module) + for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]): + gpu_p.data.copy_(cpu_p) + + test_case._zero_grad_input(cpu_input_tuple) + test_case._zero_grad_input(gpu_input_tuple) + test_case._zero_grad_parameters(cpu_module) + test_case._zero_grad_parameters(gpu_module) + cpu_output = test_case._forward(cpu_module, cpu_input_tuple) + gpu_output = test_case._forward(gpu_module, gpu_input_tuple) + if getattr(cpu_module, "return_indices", False): + cpu_output = cpu_output[0] + gpu_output = gpu_output[0] + test_case.assertEqual(cpu_output, gpu_output, atol=self.precision, rtol=0, exact_dtype=False) + + # Run backwards on CPU and GPU and compare results + for _ in range(5): + cpu_gradOutput = cpu_output.clone().normal_() + gpu_gradOutput = cpu_gradOutput.type_as(gpu_output) + cpu_gradInput = test_case._backward(cpu_module, cpu_input_tuple, cpu_output, cpu_gradOutput) + gpu_gradInput = test_case._backward(gpu_module, gpu_input_tuple, gpu_output, gpu_gradOutput) + test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False) + for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]): + test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0) + + # Run double-backwards on CPU and GPU and compare results + if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison: + cpu_output = cpu_module(*cpu_input_tuple) + gpu_output = gpu_module(*gpu_input_tuple) + if getattr(cpu_module, "return_indices", False): + cpu_output = cpu_output[0] + gpu_output = gpu_output[0] + + cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True) + gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach() + gpu_gradOutput.requires_grad = True + + cpu_gradInputs = torch.autograd.grad( + cpu_output, + cpu_input_tuple + tuple(cpu_module.parameters()), + cpu_gradOutput, + create_graph=True) + gpu_gradInputs = torch.autograd.grad( + gpu_output, + gpu_input_tuple + tuple(gpu_module.parameters()), + gpu_gradOutput, + create_graph=True) + + for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs): + test_case.assertEqual(cpu_d_i, gpu_d_i, atol=self.precision, rtol=0, exact_dtype=False) + + # We mix output into the second backwards computation so that + # torch.autograd.grad doesn't complain that some inputs + # are unreachable (which can happen if you differentiate + # only on the gradient. + if is_any_input_complex: + outputs_cpu = cpu_output.sum().abs() + sum(x.sum().abs() for x in cpu_gradInputs) + outputs_gpu = gpu_output.sum().abs() + sum(x.sum().abs() for x in gpu_gradInputs) + else: + outputs_cpu = cpu_output.sum() + sum(x.sum() for x in cpu_gradInputs) + outputs_gpu = gpu_output.sum() + sum(x.sum() for x in gpu_gradInputs) + + cpu_gg = torch.autograd.grad( + outputs_cpu, + cpu_input_tuple + (cpu_gradOutput,) + tuple(cpu_module.parameters()), + retain_graph=True) + gpu_gg = torch.autograd.grad( + outputs_gpu, + gpu_input_tuple + (gpu_gradOutput,) + tuple(gpu_module.parameters()), + retain_graph=True) + test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False) + for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg): + test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0, exact_dtype=False) + + self.test_noncontig(test_case, gpu_module, gpu_input_tuple) + + +class InputVariableMixin: + def _get_input(self): + input = TestBase._get_input(self, False) # type: ignore[arg-type] + + def map_variables(i): + if isinstance(i, torch.Tensor): + if i.is_floating_point() or i.is_complex(): + i.requires_grad = True + return i + else: + return type(i)(map_variables(elem) for elem in i) + + return map_variables(input) + + +class NewModuleTest(InputVariableMixin, ModuleTest): # type: ignore[misc] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.cudnn = kwargs.get('cudnn', False) + self.check_inplace = kwargs.get('check_inplace', False) + self.check_gradgrad = kwargs.get('check_gradgrad', True) + self.skip_double = kwargs.get('skip_double', False) + self.skip_half = kwargs.get('skip_half', False) + self.with_tf32 = kwargs.get('with_tf32', False) + self.tf32_precision = kwargs.get('tf32_precision', 0.001) + self.test_cpu = kwargs.get('test_cpu', True) + self.has_sparse_gradients = kwargs.get('has_sparse_gradients', False) + self.check_batched_grad = kwargs.get('check_batched_grad', True) + self.gradcheck_fast_mode = kwargs.get('gradcheck_fast_mode', None) + self.supports_forward_ad = kwargs.get('supports_forward_ad', False) + self.supports_fwgrad_bwgrad = kwargs.get('supports_fwgrad_bwgrad', False) + + def _check_gradients(self, test_case, module, input_tuple): + params = tuple(x for x in module.parameters()) + num_inputs = len(input_tuple) + + def fn_to_gradcheck(*inputs_and_params, **kwargs): + assert not kwargs + return test_case._forward(module, inputs_and_params[:num_inputs]) + + # gradcheck doesn't support operators that take in dense inputs but + # return sparse parameters. This only happens in the case of nn.Embedding + # and nn.EmbeddingBag. Instead, we call `self.check_jacobian`, which + # is a slightly different version of gradcheck that can handle this. + if self.has_sparse_gradients: + assert num_inputs == 1 + test_input_jacobian = torch.is_floating_point(input_tuple[0]) + test_case.check_jacobian(module, input_tuple[0], test_input_jacobian) + else: + test_case.assertTrue(gradcheck(fn_to_gradcheck, input_tuple + params, + check_batched_grad=self.check_batched_grad, + fast_mode=self.gradcheck_fast_mode, + check_forward_ad=self.supports_forward_ad)) + + if self.check_gradgrad: + test_case.assertTrue(gradgradcheck(fn_to_gradcheck, input_tuple + params, + check_batched_grad=self.check_batched_grad, + fast_mode=self.gradcheck_fast_mode, + check_fwd_over_rev=self.supports_fwgrad_bwgrad)) + + def _do_test(self, test_case, module, input): + num_threads = torch.get_num_threads() + torch.set_num_threads(1) + input_tuple = input if isinstance(input, tuple) else (input,) + + self._check_gradients(test_case, module, input_tuple) + + # check if module can be printed + module.__repr__() + + if self.check_inplace: + # check if the inplace variant of the module gives the same result + # as the out-of-place + + # check_inplace doesn't support multiple input tensors, since we don't have any modules + # that modify the inputs in-place and that accept more than one input + assert len(input_tuple) == 1 + input = input_tuple[0] + + module_ip = self.constructor(*self.constructor_args, inplace=True) + + input_version = input._version + with freeze_rng_state(): + output = module(input) + test_case.assertEqual(input._version, input_version) + + input_ip = deepcopy(input) + input_ip_clone = input_ip.clone() + with freeze_rng_state(): + output_ip = module_ip(input_ip_clone) + test_case.assertNotEqual(input_ip_clone._version, input_version) + test_case.assertEqual(output, output_ip) + grad = output.data.clone().normal_() + if input.grad is not None: + with torch.no_grad(): + input.grad.zero_() + if input_ip.grad is not None: + with torch.no_grad(): + input_ip.grad.zero_() + output.backward(grad) + output_ip.backward(grad) + test_case.assertEqual(input.grad, input_ip.grad) + + def assert_module_parameters_are(tensor_type, device_id=None): + for p in module.parameters(): + test_case.assertIsInstance(p, tensor_type) + if device_id is not None: + test_case.assertEqual(p.get_device(), device_id) + + if all(isinstance(t, torch.LongTensor) for t in input_tuple) and TEST_CUDA: + # check that cuda() moves module parameters to correct GPU device, + # and that float() casts parameters correctly + input_tuple = tuple(t.cuda() for t in input_tuple) + module.float().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + if torch.cuda.device_count() > 1: + input_tuple = tuple(t.cuda(1) for t in input_tuple) + module.cuda(1) + with torch.cuda.device(1): + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined] + else: + # check that float()/double() casters work correctly + def to_type(tensor, real, complex): + if tensor.is_complex(): + return tensor.to(complex) + elif tensor.is_floating_point(): + return tensor.to(real) + else: + return tensor + + def to_half(x): + # TODO: torch.complex32 when properly supported + return to_type(x, torch.float16, None) + + def to_single(x): + return to_type(x, torch.float32, torch.complex64) + + def to_double(x): + return to_type(x, torch.float64, torch.complex128) + + # to float + input_tuple = tuple(to_single(t) for t in input_tuple) + module.float() + module(*input_tuple) + assert_module_parameters_are(torch.FloatTensor) + + # and back to double + input_tuple = tuple(to_double(t) for t in input_tuple) + module.double() + module(*input_tuple) + assert_module_parameters_are(torch.DoubleTensor) + + if TEST_CUDA and self.should_test_cuda: + # check that cuda() moves module parameters to correct GPU device, + # and that float() casts parameters correctly + + # to GPU0 + input_tuple = tuple(to_single(t).cuda() for t in input_tuple) + module.float().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + # to CPU + input_tuple = tuple(t.cpu() for t in input_tuple) + module.cpu() + module(*input_tuple) + assert_module_parameters_are(torch.FloatTensor) + + # back to GPU0 + input_tuple = tuple(t.cuda() for t in input_tuple) + module.cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + # test that forwards of module runs correctly without cuDNN + if self.cudnn: + with torch.backends.cudnn.flags(enabled=False): + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + if torch.cuda.device_count() >= 2: + # test cross-GPU transfer works + # to GPU1 + input_tuple = tuple(t.cuda(1) for t in input_tuple) + module.cuda(1) + with torch.cuda.device(1): + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined] + + if not self.skip_double: + # test double() + input_tuple = tuple(to_double(t).cuda() for t in input_tuple) + module.double().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.DoubleTensor, 0) # type: ignore[attr-defined] + + # test half() + if not self.skip_half: + input_tuple = tuple(to_half(t).cuda() for t in input_tuple) + module.half().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.HalfTensor, 0) # type: ignore[attr-defined] + torch.set_num_threads(num_threads) + + def _get_target(self): + return self._get_arg('target', False) + + @property + def constructor_args(self): + return self._get_arg('constructor_args', False) + + +class CriterionTest(InputVariableMixin, TestBase): # type: ignore[misc] + # TODO: check that criterions don't ignore grad_output + + _required_arg_names = TestBase._required_arg_names.union({'target'}) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.should_test_cuda = kwargs.get('test_cuda', True) + self.check_forward_only = kwargs.get('check_forward_only', False) + self.check_gradgrad = kwargs.get('check_gradgrad', True) + self.check_half = kwargs.get('check_half', True) + self.check_bfloat16 = kwargs.get('check_bfloat16', False) + self.check_complex = kwargs.get('check_complex', False) + self.test_cpu = kwargs.get('test_cpu', True) + self.with_tf32 = kwargs.get('with_tf32', True) + self.tf32_precision = kwargs.get('tf32_precision', 0.001) + self.check_batched_grad = kwargs.get('check_batched_grad', True) + self.default_dtype = kwargs.get('default_dtype', None) + if self.default_dtype is None: + self.default_dtype = torch.get_default_dtype() + + def __call__(self, test_case): + with set_default_dtype(self.default_dtype): + module = self.constructor(*self.constructor_args) + input = self._get_input() + + # Check that these methods don't raise errors + module.__repr__() + str(module) + + target = self._get_target() + + if self.reference_fn is not None: + out = test_case._forward_criterion(module, input, target, extra_args=self.extra_args) + ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,) + expected_out = self.reference_fn(*ref_args) + test_case.assertEqual(out, expected_out) + + if self.check_forward_only: + return + + params = tuple(x for x in module.parameters()) + if not isinstance(input, tuple): + inputs = (input,) + params + (target,) + + def apply_fn(input, target, *params): + return module(input, target) + else: + inputs = input + params + (target,) + + def apply_fn(input1, input2, target, *params): # type: ignore[misc] + return module(input1, input2, target) + + gradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad) + + if self.check_gradgrad: + gradgradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad) + + def test_cuda(self, test_case, dtype, extra_args=None): + def convert_dtype(obj, dtype, requires_grad=False): + if isinstance(obj, torch.Tensor): + return obj.detach().to(dtype=dtype).requires_grad_(requires_grad) + elif isinstance(obj, tuple): + return tuple(convert_dtype(o, dtype, requires_grad) for o in obj) + else: + return obj + + if not TEST_CUDA or not self.should_test_cuda: + raise unittest.SkipTest('Excluded from CUDA tests') + + with set_default_dtype(self.default_dtype): + cpu_input = self._get_input() + cpu_target = self._get_target() + cpu_module = self.constructor(*self.constructor_args) + gpu_module = self.constructor(*self.constructor_args) + + # Convert input, target and module parameters to dtype + cpu_input = convert_dtype(cpu_input, dtype, True) + if cpu_target.is_floating_point() or cpu_target.is_complex(): + cpu_target = convert_dtype(cpu_target, dtype) + cpu_module.type(dtype) + gpu_module.type(dtype) + + # GPU setup + gpu_input = to_gpu(cpu_input) + gpu_target = to_gpu(cpu_target) + gpu_module.cuda() + + # torch.HalfTensor doesn't support most operations, converting back to default + if dtype in {torch.half, torch.bfloat16}: + cpu_input = self._get_input() + cpu_target = self._get_target() + # Loss modules with weights require consistent input/module weight types + cpu_module = self.constructor(*self.constructor_args) + + cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args) + gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args) + # dtype used to be able to be None, so set precision in this way instead of a precision map + test_case.assertEqual(cpu_output, gpu_output, + atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False) + + cpu_gradInput = test_case._backward_criterion( + cpu_module, cpu_input, cpu_output, cpu_target, extra_args=extra_args) + gpu_gradInput = test_case._backward_criterion( + gpu_module, gpu_input, gpu_output, gpu_target, extra_args=extra_args) + # dtype used to be able to be None, so set precision in this way instead of a precision map + test_case.assertEqual(cpu_gradInput, gpu_gradInput, + atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False) + + def _get_target(self): + return self._get_arg('target', False) + + @property + def constructor_args(self): + return self._get_arg('constructor_args', False) + + @property + def extra_args(self): + return self._get_arg('extra_args', False) + + +def _test_bfloat16_ops(test_case, op, device, inp_dims=(), prec=1e-2, scale_factor=None): + # fp32 compute + input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True) + if scale_factor is not None: + input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_() + out1 = op(input1) + grad_input1 = torch.randn_like(out1, device=device) + out1.backward(grad_input1) + + # bfloat16 compute + op_bfp16 = op.bfloat16() + input2 = input1.detach().bfloat16().requires_grad_() + grad_input2 = grad_input1.bfloat16() + out2 = op_bfp16(input2) + out2.backward(grad_input2) + + test_case.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False) + test_case.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False) + +def _test_module_empty_input(test_case, module, inp, check_size=True, inference=False): + if not inference: + inp.requires_grad_(True) + out = module(inp) + if not inference: + gO = torch.rand_like(out) + out.backward(gO) + if check_size: + test_case.assertEqual(out.size(), inp.size()) + if not inference: + for p in module.parameters(): + if p.requires_grad: + test_case.assertEqual(p.grad, torch.zeros_like(p.grad)) + test_case.assertEqual(inp.grad, torch.zeros_like(inp)) + + +def _create_basic_net(): + class Layer(nn.Module): + def __init__(self): + super().__init__() + self.layer_dummy_param = nn.Parameter(torch.empty(3, 5)) + self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7)) + + class Net(nn.Module): + def __init__(self): + super().__init__() + self.l1 = Layer() + self.dummy_param = nn.Parameter(torch.empty(3, 5)) + self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1)) + + l = Layer() + n = Net() + s = nn.Sequential(n, n) + + return l, n, s diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py new file mode 100644 index 0000000000000000000000000000000000000000..031e4ad9efbd40d7ea3e54cf1693c04d9bb9b838 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py @@ -0,0 +1,386 @@ +# mypy: ignore-errors + +# Owner(s): ["module: unknown"] + +from torch.ao.pruning import BaseSparsifier +import torch +import torch.nn.functional as F +from torch import nn + +class ImplementedSparsifier(BaseSparsifier): + def __init__(self, **kwargs): + super().__init__(defaults=kwargs) + + def update_mask(self, module, **kwargs): + module.parametrizations.weight[0].mask[0] = 0 + linear_state = self.state['linear1.weight'] + linear_state['step_count'] = linear_state.get('step_count', 0) + 1 + + +class MockSparseLinear(nn.Linear): + """ + This class is a MockSparseLinear class to check convert functionality. + It is the same as a normal Linear layer, except with a different type, as + well as an additional from_dense method. + """ + @classmethod + def from_dense(cls, mod): + """ + """ + linear = cls(mod.in_features, + mod.out_features) + return linear + + +def rows_are_subset(subset_tensor, superset_tensor) -> bool: + """ + Checks to see if all rows in subset tensor are present in the superset tensor + """ + i = 0 + for row in subset_tensor: + while i < len(superset_tensor): + if not torch.equal(row, superset_tensor[i]): + i += 1 + else: + break + else: + return False + return True + + +class SimpleLinear(nn.Module): + r"""Model with only Linear layers without biases, some wrapped in a Sequential, + some following the Sequential. Used to test basic pruned Linear-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=False), + nn.Linear(5, 6, bias=False), + nn.Linear(6, 4, bias=False), + ) + self.linear1 = nn.Linear(4, 4, bias=False) + self.linear2 = nn.Linear(4, 10, bias=False) + + def forward(self, x): + x = self.seq(x) + x = self.linear1(x) + x = self.linear2(x) + return x + + +class LinearBias(nn.Module): + r"""Model with only Linear layers, alternating layers with biases, + wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=True), + nn.Linear(5, 6, bias=False), + nn.Linear(6, 3, bias=True), + nn.Linear(3, 3, bias=True), + nn.Linear(3, 10, bias=False), + ) + + def forward(self, x): + x = self.seq(x) + return x + + +class LinearActivation(nn.Module): + r"""Model with only Linear layers, some with bias, some in a Sequential and some following. + Activation functions modules in between each Linear in the Sequential, and each outside layer. + Used to test pruned Linear(Bias)-Activation-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=True), + nn.ReLU(), + nn.Linear(5, 6, bias=False), + nn.Tanh(), + nn.Linear(6, 4, bias=True), + ) + self.linear1 = nn.Linear(4, 3, bias=True) + self.act1 = nn.ReLU() + self.linear2 = nn.Linear(3, 10, bias=False) + self.act2 = nn.Tanh() + + def forward(self, x): + x = self.seq(x) + x = self.linear1(x) + x = self.act1(x) + x = self.linear2(x) + x = self.act2(x) + return x + + +class LinearActivationFunctional(nn.Module): + r"""Model with only Linear layers, some with bias, some in a Sequential and some following. + Activation functions modules in between each Linear in the Sequential, and functional + activationals are called in between each outside layer. + Used to test pruned Linear(Bias)-Activation-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=True), + nn.ReLU(), + nn.Linear(5, 6, bias=False), + nn.ReLU(), + nn.Linear(6, 4, bias=True), + ) + self.linear1 = nn.Linear(4, 3, bias=True) + self.linear2 = nn.Linear(3, 8, bias=False) + self.linear3 = nn.Linear(8, 10, bias=False) + self.act1 = nn.ReLU() + + def forward(self, x): + x = self.seq(x) + x = self.linear1(x) + x = F.relu(x) + x = self.linear2(x) + x = F.relu(x) + x = self.linear3(x) + x = F.relu(x) + return x + + +class SimpleConv2d(nn.Module): + r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following. + Used to test pruned Conv2d-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, bias=False), + nn.Conv2d(32, 64, 3, 1, bias=False), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False) + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.conv2d2(x) + return x + + +class Conv2dBias(nn.Module): + r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside. + Used to test pruned Conv2d-Bias-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, bias=True), + nn.Conv2d(32, 32, 3, 1, bias=True), + nn.Conv2d(32, 64, 3, 1, bias=False), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=True) + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.conv2d2(x) + return x + + +class Conv2dActivation(nn.Module): + r"""Model with only Conv2d layers, some with bias, some in a Sequential and some following. + Activation function modules in between each Sequential layer, functional activations called + in-between each outside layer. + Used to test pruned Conv2d-Bias-Activation-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 64, 3, 1, bias=True), + nn.Tanh(), + nn.Conv2d(64, 64, 3, 1, bias=False), + nn.ReLU(), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False) + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = F.relu(x) + x = self.conv2d2(x) + x = F.hardtanh(x) + return x + + +class Conv2dPadBias(nn.Module): + r"""Model with only Conv2d layers, all with bias and some with padding > 0, + some in a Sequential and some following. Activation function modules in between each layer. + Used to test that bias is propagated correctly in the special case of + pruned Conv2d-Bias-(Activation)Conv2d fusion, when the second Conv2d layer has padding > 0.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, padding=1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 32, 3, 1, bias=False), + nn.ReLU(), + nn.Conv2d(32, 32, 3, 1, padding=1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 32, 3, 1, padding=1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 64, 3, 1, bias=True), + nn.Tanh(), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, padding=1, bias=True) + self.act1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, padding=1, bias=True) + self.act2 = nn.Tanh() + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.act1(x) + x = self.conv2d2(x) + x = self.act2(x) + return x + + +class Conv2dPool(nn.Module): + r"""Model with only Conv2d layers, all with bias, some in a Sequential and some following. + Activation function modules in between each layer, Pool2d modules in between each layer. + Used to test pruned Conv2d-Pool2d-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True), + nn.MaxPool2d(kernel_size=2, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=True), + nn.Tanh(), + nn.AvgPool2d(kernel_size=2, stride=2, padding=1), + ) + self.conv2d1 = nn.Conv2d(64, 48, kernel_size=3, padding=1, bias=True) + self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) + self.af1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(48, 52, kernel_size=3, padding=1, bias=True) + self.conv2d3 = nn.Conv2d(52, 52, kernel_size=3, padding=1, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.maxpool(x) + x = self.af1(x) + x = self.conv2d2(x) + x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=1) + x = F.relu(x) + x = self.conv2d3(x) + return x + + +class Conv2dPoolFlattenFunctional(nn.Module): + r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d + and a functional Flatten followed by a Linear layer. + Activation functions and Pool2ds in between each layer also. + Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True), + nn.MaxPool2d(kernel_size=2, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True), + nn.Tanh(), + nn.AvgPool2d(kernel_size=2, stride=2, padding=1), + ) + self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True) + self.af1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True) + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(11, 13, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) + x = self.af1(x) + x = self.conv2d2(x) + x = self.avg_pool(x) + x = torch.flatten(x, 1) # test functional flatten + x = self.fc(x) + return x + + +class Conv2dPoolFlatten(nn.Module): + r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d + and a Flatten module followed by a Linear layer. + Activation functions and Pool2ds in between each layer also. + Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True), + nn.MaxPool2d(kernel_size=2, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True), + nn.Tanh(), + nn.AvgPool2d(kernel_size=2, stride=2, padding=1), + ) + self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True) + self.af1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True) + self.avg_pool = nn.AdaptiveAvgPool2d((2, 2)) + self.flatten = nn.Flatten() + self.fc = nn.Linear(44, 13, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) + x = self.af1(x) + x = self.conv2d2(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +class LSTMLinearModel(nn.Module): + """Container module with an encoder, a recurrent module, and a linear.""" + + def __init__( + self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int + ): + super().__init__() + self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers) + self.linear = nn.Linear(hidden_dim, output_dim) + + def forward(self, input): + output, hidden = self.lstm(input) + decoded = self.linear(output) + return decoded, output + + +class LSTMLayerNormLinearModel(nn.Module): + """Container module with an LSTM, a LayerNorm, and a linear.""" + + def __init__( + self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int + ): + super().__init__() + self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers) + self.norm = nn.LayerNorm(hidden_dim) + self.linear = nn.Linear(hidden_dim, output_dim) + + def forward(self, x): + x, state = self.lstm(x) + x = self.norm(x) + x = self.linear(x) + return x, state diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd7b827dde32f0d26380bdbe5766c1eece3da6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py @@ -0,0 +1,227 @@ +# mypy: ignore-errors + +r"""Importing this file includes common utility methods for checking quantized +tensors and modules. +""" +import numpy as np +import torch +from contextlib import contextmanager +from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS + +supported_qengines = torch.backends.quantized.supported_engines +supported_qengines.remove('none') +# Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326 +# QNNPACK is not supported on PPC +# QNNPACK throws ASAN heap-buffer-overflow error. +if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]): + supported_qengines.remove('qnnpack') + +def _conv_output_shape(input_size, kernel_size, padding, stride, dilation, + output_padding=0): + """Computes the output shape given convolution parameters.""" + return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1) + * (dilation - 1)) / stride) + 2 * output_padding + 1 + +# Quantization references +def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8): + """Quantizes a numpy array.""" + if qmin is None: + qmin = np.iinfo(dtype).min + if qmax is None: + qmax = np.iinfo(dtype).max + qx = np.round(x / scale + zero_point).astype(np.int64) + qx = np.clip(qx, qmin, qmax) + qx = qx.astype(dtype) + return qx + + +def _dequantize(qx, scale, zero_point): + """Dequantizes a numpy array.""" + x = (qx.astype(float) - zero_point) * scale + return x + + +def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): + """Requantizes a numpy array, i.e., intermediate int32 or int16 values are + converted back to given type""" + qx = (x * multiplier).round() + zero_point + qx = np.clip(qx, qmin, qmax).astype(qtype) + return qx + +def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric) + if qscheme == torch.per_tensor_symmetric: + assert dtype == torch.qint8 + if isinstance(X, torch.Tensor): + X = X.numpy() + if dtype == torch.qint8: + if reduce_range: + qmin, qmax = -64, 63 + else: + qmin, qmax = -128, 127 + else: # dtype == torch.quint8 + if reduce_range: + qmin, qmax = 0, 127 + else: + qmin, qmax = 0, 255 + min_val = X.min() + max_val = X.max() + is_symmetric = (qscheme == torch.per_tensor_symmetric) + if min_val == max_val: + scale = 1.0 + zero_point = 0 + else: + if is_symmetric: + max_val = max(max_val, -min_val) + min_val = -max_val + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = qmin - round(min_val / scale) + zero_point = max(qmin, zero_point) + zero_point = min(qmax, zero_point) + return [float(scale), int(zero_point)] + +def _calculate_dynamic_per_channel_qparams(X, dtype): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + if isinstance(X, torch.Tensor): + X = X.numpy() + qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max + n_levels = qmax - qmin + scale = np.zeros(X.shape[0], dtype=np.float64) + zero_point = np.zeros(X.shape[0], dtype=np.int64) + for i in range(zero_point.shape[0]): + min_val = X.min() + max_val = X.max() + if min_val == max_val: + scale[i] = 1.0 + zero_point[i] = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale[i] = (max_val - min_val) / n_levels + scale[i] = max(scale[i], np.finfo(np.float32).eps) + zero_point[i] = qmin - round(min_val / scale[i]) + zero_point[i] = max(qmin, zero_point[i]) + zero_point[i] = min(qmax, zero_point[i]) + + return scale, zero_point + +def _snr(x, x_hat): + """Calculates the signal to noise ratio and returns the signal and noise + power, as well as the SNR in dB. + If the input is a list/tuple this function is called recursively on each + element. The result will have the same nested structure as the inputs. + + Args: + x, x_hat: Either a tensor or a nested list/tuple of tensors. + Returns: + signal, noise, SNR(in dB): Either floats or a nested list of floats + """ + if isinstance(x, (list, tuple)): + assert len(x) == len(x_hat) + res = [] + for idx in range(len(x)): + res.append(_snr(x[idx], x_hat[idx])) + return res + if x_hat.is_quantized: + x_hat = x_hat.dequantize() + if x.is_quantized: + x = x.dequantize() + noise = (x - x_hat).norm() + if noise == 0: + return 0.0, float('inf'), float('inf') + signal = x.norm() + snr = signal / noise + snr_db = 20 * snr.log10() + return signal, noise, snr_db + +@contextmanager +def override_quantized_engine(qengine): + previous = torch.backends.quantized.engine + torch.backends.quantized.engine = qengine + try: + yield + finally: + torch.backends.quantized.engine = previous + +@contextmanager +def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack): + try: + if qengine_is_qnnpack: + torch._C._set_default_mobile_cpu_allocator() + yield + finally: + if qengine_is_qnnpack: + torch._C._unset_default_mobile_cpu_allocator() + +# TODO: Update all quantization tests to use this decorator. +# Currently for some of the tests it seems to have inconsistent params +# for fbgemm vs qnnpack. +def override_qengines(qfunction): + def test_fn(*args, **kwargs): + for qengine in supported_qengines: + with override_quantized_engine(qengine): + # qfunction should not return anything. + qfunction(*args, **kwargs) + return test_fn + +def qengine_is_fbgemm(): + return torch.backends.quantized.engine == 'fbgemm' +def qengine_is_qnnpack(): + return torch.backends.quantized.engine == 'qnnpack' +def qengine_is_onednn(): + return torch.backends.quantized.engine == 'onednn' +def qengine_is_x86(): + return torch.backends.quantized.engine == 'x86' + +# Helper function used to simulate per-channel fake-quant against any axis +def _permute_to_axis_zero(X, axis): + new_axis_list = list(range(X.dim())) + new_axis_list[axis] = 0 + new_axis_list[0] = axis + y = X.permute(tuple(new_axis_list)) + return y, new_axis_list + +# Reference method for fake quantize +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + res = torch.zeros_like(X) + + for i in range(X.size()[0]): + res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) + + per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i] + + out = res.permute(tuple(permute_axis_list)) + return out.to(dtype) + +# Reference method for the gradient of the fake quantize operator +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + Xq = torch.zeros_like(X) + for i in range(X.size()[0]): + Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i]) + Xq = Xq.permute(tuple(permute_axis_list)) + mask = (Xq >= quant_min) * (Xq <= quant_max) + res = torch.zeros_like(dY) + res[mask] = dY[mask] + return res.to(dtype) + +def to_tensor(X, device): + if not isinstance(X, torch.Tensor): + X = torch.tensor(X) + else: + X = X.clone().detach() + return X.to(device=torch.device(device), dtype=torch.float32) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a8ed065cb819d3e52a951db5afe8c9a1edb107 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py @@ -0,0 +1,218 @@ +# mypy: ignore-errors + +import torch +from copy import deepcopy +from torch.utils._pytree import tree_map + +# TODO: Move LoggingTensor here. +from torch.testing._internal.logging_tensor import LoggingTensor + + +# Base class for wrapper-style tensors. +class WrapperTensor(torch.Tensor): + @staticmethod + def __new__(cls, *args, **kwargs): + t, kwargs = cls.get_wrapper_properties(*args, **kwargs) + if "size" not in kwargs: + size = t.size() + else: + size = kwargs["size"] + del kwargs["size"] + if "dtype" not in kwargs: + kwargs["dtype"] = t.dtype + if "layout" not in kwargs: + kwargs["layout"] = t.layout + if "device" not in kwargs: + kwargs["device"] = t.device + if "requires_grad" not in kwargs: + kwargs["requires_grad"] = False + # Ignore memory_format and pin memory for now as I don't know how to + # safely access them on a Tensor (if possible??) + + wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs) + wrapper._validate_methods() + return wrapper + + @classmethod + def get_wrapper_properties(cls, *args, **kwargs): + # Should return both an example Tensor and a dictionary of kwargs + # to override any of that example Tensor's properly. + # This is very similar to the `t.new_*(args)` API + raise NotImplementedError("You need to implement get_wrapper_properties") + + def _validate_methods(self): + # Skip this if not in debug mode? + # Changing these on the python side is wrong as it would not be properly reflected + # on the c++ side + # This doesn't catch attributes set in the __init__ + forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"] + for el in forbidden_overrides: + if getattr(self.__class__, el) is not getattr(torch.Tensor, el): + raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the " + f"property {el} but this is not allowed as such change would " + "not be reflected to c++ callers.") + + +class DiagTensorBelow(WrapperTensor): + @classmethod + def get_wrapper_properties(cls, diag, requires_grad=False): + assert diag.ndim == 1 + return diag, {"size": diag.size() + diag.size(), "requires_grad": requires_grad} + + def __init__(self, diag, requires_grad=False): + self.diag = diag + + handled_ops = {} + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + if not all(issubclass(cls, t) for t in types): + return NotImplemented + + # For everything else, call the handler: + fn = cls.handled_ops.get(func.__name__, None) + if fn: + return fn(*args, **(kwargs or {})) + else: + # Note that here, because we don't need to provide the autograd formulas + # we can have a default "fallback" that creates a plain Tensor based + # on the diag elements and calls the func again. + + def unwrap(e): + return e.diag.diag() if isinstance(e, DiagTensorBelow) else e + + def wrap(e): + if isinstance(e, torch.Tensor) and e.ndim == 1: + return DiagTensorBelow(e) + if isinstance(e, torch.Tensor) and e.ndim == 2 and e.count_nonzero() == e.diag().count_nonzero(): + return DiagTensorBelow(e.diag()) + return e + + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) + return rs + + def __repr__(self): + return super().__repr__(tensor_contents=f"diag={self.diag}") + + +class SparseTensor(WrapperTensor): + @classmethod + def get_wrapper_properties(cls, size, values, indices, requires_grad=False): + assert values.device == indices.device + return values, {"size": size, "requires_grad": requires_grad} + + def __init__(self, size, values, indices, requires_grad=False): + self.values = values + self.indices = indices + + def __repr__(self): + return super().__repr__(tensor_contents=f"values={self.values}, indices={self.indices}") + + def sparse_to_dense(self): + res = torch.zeros(self.size(), dtype=self.values.dtype) + res[self.indices.unbind(1)] = self.values + return res + + @staticmethod + def from_dense(t): + indices = t.nonzero() + values = t[indices.unbind(1)] + return SparseTensor(t.size(), values, indices) + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + func_name = f"{func.__module__}.{func.__name__}" + + res = cls._try_call_special_impl(func_name, args, kwargs) + if res is not NotImplemented: + return res + + # Otherwise, use a default implementation that construct dense + # tensors and use that to compute values + def unwrap(e): + return e.sparse_to_dense() if isinstance(e, SparseTensor) else e + + # Wrap back all Tensors into our custom class + def wrap(e): + # Check for zeros and use that to get indices + return SparseTensor.from_dense(e) if isinstance(e, torch.Tensor) else e + + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) + return rs + + # To show how things happen later + def __rmul__(self, other): + return super().__rmul__(other) + + _SPECIAL_IMPLS = {} + + @classmethod + def _try_call_special_impl(cls, func, args, kwargs): + if func not in cls._SPECIAL_IMPLS: + return NotImplemented + return cls._SPECIAL_IMPLS[func](args, kwargs) + + +# Example non-wrapper subclass that stores extra state. +class NonWrapperTensor(torch.Tensor): + def __new__(cls, data): + t = torch.Tensor._make_subclass(cls, data) + t.extra_state = { + 'last_func_called': None + } + return t + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + result = super().__torch_function__(func, types, args, kwargs) + + if isinstance(result, cls): + # Do something with the extra state. For the example here, just store the name of the + # last function called (skip for deepcopy so the copy has the same extra state). + if func is torch.Tensor.__deepcopy__: + result.extra_state = deepcopy(args[0].extra_state) + else: + result.extra_state = { + 'last_func_called': func.__name__, + } + + return result + + # new_empty() must be defined for deepcopy to work + def new_empty(self, shape): + return type(self)(torch.empty(shape)) + + +# Class used to store info about subclass tensors used in testing. +class SubclassInfo: + + __slots__ = ['name', 'create_fn', 'closed_under_ops'] + + def __init__(self, name, create_fn, closed_under_ops=True): + self.name = name + self.create_fn = create_fn # create_fn(shape) -> tensor instance + self.closed_under_ops = closed_under_ops + + +subclass_db = { + torch.Tensor: SubclassInfo( + 'base_tensor', create_fn=torch.randn + ), + NonWrapperTensor: SubclassInfo( + 'non_wrapper_tensor', + create_fn=lambda shape: NonWrapperTensor(torch.randn(shape)) + ), + LoggingTensor: SubclassInfo( + 'logging_tensor', + create_fn=lambda shape: LoggingTensor(torch.randn(shape)) + ), + SparseTensor: SubclassInfo( + 'sparse_tensor', + create_fn=lambda shape: SparseTensor.from_dense(torch.randn(shape).relu()) + ), + DiagTensorBelow: SubclassInfo( + 'diag_tensor_below', + create_fn=lambda shape: DiagTensorBelow(torch.randn(shape)), + closed_under_ops=False # sparse semantics + ), +} diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..64eeb6cbc18585f449a562c5f5dba8ad460e99eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py @@ -0,0 +1,5031 @@ +# mypy: ignore-errors + +r"""Importing this file must **not** initialize CUDA context. test_distributed +relies on this assumption to properly run. This means that when this is imported +no CUDA calls shall be made, including torch.cuda.device_count(), etc. + +torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported. +""" + +import argparse +import contextlib +import copy +import ctypes +import errno +import functools +import gc +import inspect +import io +import json +import logging +import math +import operator +import os +import platform +import random +import re +import shutil +import signal +import socket +import subprocess +import sys +import tempfile +import threading +import time +import types +import unittest +import warnings +from collections.abc import Mapping, Sequence +from contextlib import closing, contextmanager +from copy import deepcopy +from dataclasses import dataclass +from enum import Enum +from functools import partial, wraps +from itertools import product, chain +from pathlib import Path +from statistics import mean +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) +from unittest.mock import MagicMock + +import expecttest +import numpy as np + +import __main__ # type: ignore[import] +import torch +import torch.backends.cudnn +import torch.backends.mkl +import torch.backends.mps +import torch.backends.xnnpack +import torch.cuda +from torch import Tensor +from torch._C import ScriptDict, ScriptList # type: ignore[attr-defined] +from torch._utils_internal import get_writable_path +from torch.nn import ( + ModuleDict, + ModuleList, + ParameterDict, + ParameterList, + Sequential, +) +from torch.onnx import ( + register_custom_op_symbolic, + unregister_custom_op_symbolic, +) +from torch.testing import make_tensor +from torch.testing._comparison import ( + BooleanPair, + NonePair, + NumberPair, + Pair, + TensorLikePair, +) +from torch.testing._comparison import not_close_error_metas +from torch.testing._internal.common_dtype import get_all_dtypes +from torch.utils._import_utils import _check_module_exists +import torch.utils._pytree as pytree + +from .composite_compliance import no_dispatch + + +# Class to keep track of test flags configurable by environment variables. +# Flags set here are intended to be read-only and should not be modified after +# definition. +# TODO: Expand this class to handle abritrary settings in addition to boolean flags? +class TestEnvironment: + # Set of env vars to set for the repro command that is output on test failure. + # Specifically, this includes env vars that are set to non-default values and + # are not implied. Maps from env var name -> value (int) + repro_env_vars: dict = {} + + # Defines a flag usable throughout the test suite, determining its value by querying + # the specified environment variable. + # + # Args: + # name (str): The name of the flag. A global variable with this name will be set + # for convenient access throughout the test suite. + # env_var (str): The name of the primary environment variable from which to + # determine the value of this flag. If this is None or the environment variable + # is unset, the default value will be used unless otherwise implied (see + # implied_by_fn). Default: None + # default (bool): The default value to use for the flag if unset by the environment + # variable and unimplied. Default: False + # include_in_repro (bool): Indicates whether this flag should be included in the + # repro command that is output on test failure (i.e. whether it is possibly + # relevant to reproducing the test failure). Default: True + # enabled_fn (Callable): Callable returning whether the flag should be enabled + # given the environment variable value and the default value. Default: Lambda + # requiring "0" to disable if on by default OR "1" to enable if off by default. + # implied_by_fn (Callable): Thunk returning a bool to imply this flag as enabled + # by something outside of its primary environment variable setting. For example, + # this can be useful if the value of another environment variable implies the flag + # as enabled. Default: Lambda returning False to indicate no implications. + @staticmethod + def def_flag( + name, + env_var=None, + default=False, + include_in_repro=True, + enabled_fn=lambda env_var_val, default: ( + (env_var_val != "0") if default else (env_var_val == "1")), + implied_by_fn=lambda: False, + ): + enabled = default + if env_var is not None: + env_var_val = os.getenv(env_var) + enabled = enabled_fn(env_var_val, default) + implied = implied_by_fn() + enabled = enabled or implied + if include_in_repro and (env_var is not None) and (enabled != default) and not implied: + TestEnvironment.repro_env_vars[env_var] = env_var_val + + # export flag globally for convenience + assert name not in globals(), f"duplicate definition of flag '{name}'" + globals()[name] = enabled + + # Returns a string prefix usable to set environment variables for any test + # settings that should be explicitly set to match this instantiation of the + # test suite. + # Example: "PYTORCH_TEST_WITH_ASAN=1 PYTORCH_TEST_WITH_ROCM=1" + @staticmethod + def repro_env_var_prefix() -> str: + return " ".join([f"{env_var}={value}" + for env_var, value in TestEnvironment.repro_env_vars.items()]) + + +log = logging.getLogger(__name__) +torch.backends.disable_global_flags() + +FILE_SCHEMA = "file://" +if sys.platform == 'win32': + FILE_SCHEMA = "file:///" + +# NB: This flag differs semantically from others in that setting the env var to any +# non-empty value will cause it to be true: +# CI=1, CI="true", CI=0, etc. all set the flag to be true. +# CI= and an unset CI set the flag to be false. +# GitHub sets the value to CI="true" to enable it. +TestEnvironment.def_flag("IS_CI", env_var="CI", include_in_repro=False, + enabled_fn=lambda env_var_value, _: bool(env_var_value)) +TestEnvironment.def_flag( + "IS_SANDCASTLE", + env_var="SANDCASTLE", + implied_by_fn=lambda: os.getenv("TW_JOB_USER") == "sandcastle", + include_in_repro=False) + +_is_fbcode_default = ( + hasattr(torch._utils_internal, "IS_FBSOURCE") and + torch._utils_internal.IS_FBSOURCE +) + +TestEnvironment.def_flag("IS_FBCODE", env_var="PYTORCH_TEST_FBCODE", + default=_is_fbcode_default, + include_in_repro=False) +TestEnvironment.def_flag("IS_REMOTE_GPU", env_var="PYTORCH_TEST_REMOTE_GPU", + include_in_repro=False) + +TestEnvironment.def_flag( + "DISABLE_RUNNING_SCRIPT_CHK", + env_var="PYTORCH_DISABLE_RUNNING_SCRIPT_CHK", + include_in_repro=False) +# NB: enabled by default unless in an fbcode context. +TestEnvironment.def_flag("PRINT_REPRO_ON_FAILURE", env_var="PYTORCH_PRINT_REPRO_ON_FAILURE", + default=(not IS_FBCODE), include_in_repro=False) # noqa: F821 + +DEFAULT_DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json' +DEFAULT_SLOW_TESTS_FILE = '.pytorch-slow-tests.json' + +disabled_tests_dict = {} +slow_tests_dict = {} + +def maybe_load_json(filename): + if os.path.isfile(filename): + with open(filename) as fp: + return json.load(fp) + log.warning("Attempted to load json file '%s' but it does not exist.", filename) + return {} + +# set them here in case the tests are running in a subprocess that doesn't call run_tests +if os.getenv("SLOW_TESTS_FILE", ""): + slow_tests_dict = maybe_load_json(os.getenv("SLOW_TESTS_FILE", "")) +if os.getenv("DISABLED_TESTS_FILE", ""): + disabled_tests_dict = maybe_load_json(os.getenv("DISABLED_TESTS_FILE", "")) + +NATIVE_DEVICES = ('cpu', 'cuda', 'meta', torch._C._get_privateuse1_backend_name()) + +check_names = ['orin', 'concord', 'galen', 'xavier', 'nano', 'jetson', 'tegra'] +IS_JETSON = any(name in platform.platform() for name in check_names) + +def gcIfJetson(fn): + # Irregular Jetson host/device memory setup requires cleanup to avoid tests being killed + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if IS_JETSON: + gc.collect() + torch.cuda.empty_cache() + fn(*args, **kwargs) + return wrapper + +# Tries to extract the current test function by crawling the stack. +# If unsuccessful, return None. +def extract_test_fn() -> Optional[Callable]: + try: + stack = inspect.stack() + for frame_info in stack: + frame = frame_info.frame + if "self" not in frame.f_locals: + continue + self_val = frame.f_locals["self"] + if isinstance(self_val, unittest.TestCase): + test_id = self_val.id() + test_name = test_id.split('.')[2] + test_fn = getattr(self_val, test_name).__func__ + return test_fn + except Exception: + pass + return None + +# Contains tracked input data useful for debugging purposes +@dataclass +class TrackedInput: + index: int + val: Any + type_desc: str + +# Attempt to pull out tracked input information from the test function. +# A TrackedInputIter is used to insert this information. +def get_tracked_input() -> Optional[TrackedInput]: + test_fn = extract_test_fn() + if test_fn is None: + return None + if not hasattr(test_fn, "tracked_input"): + return None + return test_fn.tracked_input + +def clear_tracked_input(): + test_fn = extract_test_fn() + if test_fn is None: + return + if not hasattr(test_fn, "tracked_input"): + return None + test_fn.tracked_input = None + +# Wraps an iterator and tracks the most recent value the iterator produces +# for debugging purposes. Tracked values are stored on the test function. +class TrackedInputIter: + def __init__(self, child_iter, input_type_desc, callback=lambda x: x): + self.child_iter = enumerate(child_iter) + # Input type describes the things we're tracking (e.g. "sample input", "error input"). + self.input_type_desc = input_type_desc + # Callback is run on each iterated thing to get the thing to track. + self.callback = callback + self.test_fn = extract_test_fn() + + def __iter__(self): + return self + + def __next__(self): + # allow StopIteration to bubble up + input_idx, input_val = next(self.child_iter) + self._set_tracked_input( + TrackedInput( + index=input_idx, val=self.callback(input_val), type_desc=self.input_type_desc + ) + ) + return input_val + + def _set_tracked_input(self, tracked_input: TrackedInput): + if self.test_fn is None: + return + if not hasattr(self.test_fn, "tracked_input"): + return + self.test_fn.tracked_input = tracked_input + +class _TestParametrizer: + """ + Decorator class for parametrizing a test function, yielding a set of new tests spawned + from the original generic test, each specialized for a specific set of test inputs. For + example, parametrizing a test across the set of ops will result in a test function per op. + + The decision of how to parametrize / what to parametrize over is intended to be implemented + by each derived class. + + In the details, the decorator adds a 'parametrize_fn' property to the test function. This function + is intended to be called later by one of: + * Device-specific test instantiation via instantiate_device_type_tests(). Note that for this + case there is no need to explicitly parametrize over device type, as that is handled separately. + * Device-agnostic parametrized test instantiation via instantiate_parametrized_tests(). + + If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new + composite 'parametrize_fn' will be created that generates tests with the product of the parameters + generated by the old and new parametrize_fns. This allows for convenient composability of decorators. + """ + def _parametrize_test(self, test, generic_cls, device_cls): + """ + Parametrizes the given test function across whatever dimension is specified by the derived class. + Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all + ops, all modules, or all ops + their associated dtypes. + + Args: + test (fn): Test function to parametrize over + generic_cls (class): Generic test class object containing tests (e.g. TestFoo) + device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None + if the tests are not part of a device-specific set + + Returns: + Generator object returning 4-tuples of: + test (fn): Parametrized test function; must support a device arg and args for any params + test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to + the base name of the test + param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64}) + decorator_fn (callable): Callable[[Dict], List] for list of decorators to apply given param_kwargs + """ + raise NotImplementedError + + def __call__(self, fn): + if hasattr(fn, 'parametrize_fn'): + # Do composition with the product of args. + old_parametrize_fn = fn.parametrize_fn + new_parametrize_fn = self._parametrize_test + fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn) + else: + fn.parametrize_fn = self._parametrize_test + return fn + + +def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn): + """ + Returns a parametrize_fn that parametrizes over the product of the parameters handled + by the given parametrize_fns. Each given parametrize_fn should each have the signature + f(test, generic_cls, device_cls). + + The test names will be a combination of the names produced by the parametrize_fns in + "_" order. This order is done to match intuition for constructed names + when composing multiple decorators; the names will be built in top to bottom order when stacking + parametrization decorators. + + Args: + old_parametrize_fn (callable) - First parametrize_fn to compose. + new_parametrize_fn (callable) - Second parametrize_fn to compose. + """ + + def composite_fn(test, generic_cls, device_cls, + old_parametrize_fn=old_parametrize_fn, + new_parametrize_fn=new_parametrize_fn): + old_tests = list(old_parametrize_fn(test, generic_cls, device_cls)) + for (old_test, old_test_name, old_param_kwargs, old_dec_fn) in old_tests: + for (new_test, new_test_name, new_param_kwargs, new_dec_fn) in \ + new_parametrize_fn(old_test, generic_cls, device_cls): + redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys()) + if redundant_params: + raise RuntimeError('Parametrization over the same parameter by multiple parametrization ' + 'decorators is not supported. For test "{}", the following parameters ' + 'are handled multiple times: {}'.format( + test.__name__, redundant_params)) + full_param_kwargs = {**old_param_kwargs, **new_param_kwargs} + merged_test_name = '{}{}{}'.format(new_test_name, + '_' if old_test_name != '' and new_test_name != '' else '', + old_test_name) + + def merged_decorator_fn(param_kwargs, old_dec_fn=old_dec_fn, new_dec_fn=new_dec_fn): + return list(old_dec_fn(param_kwargs)) + list(new_dec_fn(param_kwargs)) + + yield (new_test, merged_test_name, full_param_kwargs, merged_decorator_fn) + + return composite_fn + + +def instantiate_parametrized_tests(generic_cls): + """ + Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a + decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by + parametrized tests with specialized names. This should be used instead of + instantiate_device_type_tests() if the test class contains device-agnostic tests. + + You can also use it as a class decorator. E.g. + + ``` + @instantiate_parametrized_tests + class TestFoo(TestCase): + ... + ``` + + Args: + generic_cls (class): Generic test class object containing tests (e.g. TestFoo) + """ + for attr_name in tuple(dir(generic_cls)): + class_attr = getattr(generic_cls, attr_name) + if not hasattr(class_attr, 'parametrize_fn'): + continue + + # Remove the generic test from the test class. + delattr(generic_cls, attr_name) + + # Add parametrized tests to the test class. + def instantiate_test_helper(cls, name, test, param_kwargs): + @wraps(test) + def instantiated_test(self, param_kwargs=param_kwargs): + test(self, **param_kwargs) + + assert not hasattr(generic_cls, name), f"Redefinition of test {name}" + setattr(generic_cls, name, instantiated_test) + + for (test, test_suffix, param_kwargs, decorator_fn) in class_attr.parametrize_fn( + class_attr, generic_cls=generic_cls, device_cls=None): + full_name = f'{test.__name__}_{test_suffix}' + + # Apply decorators based on full param kwargs. + for decorator in decorator_fn(param_kwargs): + test = decorator(test) + + instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs) + return generic_cls + + +class subtest: + """ + Explicit subtest case for use with test parametrization. + Allows for explicit naming of individual subtest cases as well as applying + decorators to the parametrized test. + + Args: + arg_values (iterable): Iterable of arg values (e.g. range(10)) or + tuples of arg values (e.g. [(1, 2), (3, 4)]). + name (str): Optional name to use for the test. + decorators (iterable): Iterable of decorators to apply to the generated test. + """ + __slots__ = ['arg_values', 'name', 'decorators'] + + def __init__(self, arg_values, name=None, decorators=None): + self.arg_values = arg_values + self.name = name + self.decorators = decorators if decorators else [] + + +class parametrize(_TestParametrizer): + """ + Decorator for applying generic test parametrizations. + + The interface for this decorator is modeled after `@pytest.mark.parametrize`. + Basic usage between this decorator and pytest's is identical. The first argument + should be a string containing comma-separated names of parameters for the test, and + the second argument should be an iterable returning values or tuples of values for + the case of multiple parameters. + + Beyond this basic usage, the decorator provides some additional functionality that + pytest does not. + + 1. Parametrized tests end up as generated test functions on unittest test classes. + Since this differs from how pytest works, this decorator takes on the additional + responsibility of naming these test functions. The default test names consists of + the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"), + but custom names can be defined using `name_fn` or the `subtest` structure (see below). + + 2. The decorator specially handles parameter values of type `subtest`, which allows for + more fine-grained control over both test naming and test execution. In particular, it can + be used to tag subtests with explicit test names or apply arbitrary decorators (see examples + below). + + Examples:: + + @parametrize("x", range(5)) + def test_foo(self, x): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) + def test_bar(self, x, y): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')], + name_fn=lambda x, y: '{}_{}'.format(x, y)) + def test_bar_custom_names(self, x, y): + ... + + @parametrize("x, y", [subtest((1, 2), name='double'), + subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]), + subtest((1, 4), name='quadruple')]) + def test_baz(self, x, y): + ... + + To actually instantiate the parametrized tests, one of instantiate_parametrized_tests() or + instantiate_device_type_tests() should be called. The former is intended for test classes + that contain device-agnostic tests, while the latter should be used for test classes that + contain device-specific tests. Both support arbitrary parametrizations using the decorator. + + Args: + arg_str (str): String of arg names separate by commas (e.g. "x,y"). + arg_values (iterable): Iterable of arg values (e.g. range(10)) or + tuples of arg values (e.g. [(1, 2), (3, 4)]). + name_fn (Callable): Optional function that takes in parameters and returns subtest name. + """ + def __init__(self, arg_str, arg_values, name_fn=None): + self.arg_names: List[str] = [s.strip() for s in arg_str.split(',') if s != ''] + self.arg_values = arg_values + self.name_fn = name_fn + + def _formatted_str_repr(self, idx, name, value): + """ Returns a string representation for the given arg that is suitable for use in test function names. """ + if isinstance(value, torch.dtype): + return dtype_name(value) + elif isinstance(value, torch.device): + return str(value) + # Can't use isinstance as it would cause a circular import + elif type(value).__name__ in {'OpInfo', 'ModuleInfo'}: + return value.formatted_name + elif isinstance(value, (int, float, str)): + return f"{name}_{str(value).replace('.', '_')}" + else: + return f"{name}{idx}" + + def _default_subtest_name(self, idx, values): + return '_'.join([self._formatted_str_repr(idx, a, v) for a, v in zip(self.arg_names, values)]) + + def _get_subtest_name(self, idx, values, explicit_name=None): + if explicit_name: + subtest_name = explicit_name + elif self.name_fn: + subtest_name = self.name_fn(*values) + else: + subtest_name = self._default_subtest_name(idx, values) + return subtest_name + + def _parametrize_test(self, test, generic_cls, device_cls): + if len(self.arg_names) == 0: + # No additional parameters needed for the test. + test_name = '' + yield (test, test_name, {}, lambda _: []) + else: + # Each "values" item is expected to be either: + # * A tuple of values with one for each arg. For a single arg, a single item is expected. + # * A subtest instance with arg_values matching the previous. + values = check_exhausted_iterator = object() + for idx, values in enumerate(self.arg_values): + maybe_name = None + + decorators = [] + if isinstance(values, subtest): + sub = values + values = sub.arg_values + maybe_name = sub.name + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + decorators = sub.decorators + gen_test = test_wrapper + else: + gen_test = test + + values = list(values) if len(self.arg_names) > 1 else [values] + if len(values) != len(self.arg_names): + raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} ' + f'values and {len(self.arg_names)} names for test "{test.__name__}"') + + param_kwargs = dict(zip(self.arg_names, values)) + + test_name = self._get_subtest_name(idx, values, explicit_name=maybe_name) + + def decorator_fn(_, decorators=decorators): + return decorators + + yield (gen_test, test_name, param_kwargs, decorator_fn) + + if values is check_exhausted_iterator: + raise ValueError(f'{test}: An empty arg_values was passed to @parametrize. ' + 'Note that this may result from reuse of a generator.') + + +class decorateIf(_TestParametrizer): + """ + Decorator for applying parameter-specific conditional decoration. + Composes with other test parametrizers (e.g. @modules, @ops, @parametrize, etc.). + + Examples:: + + @decorateIf(unittest.skip, lambda params: params["x"] == 2) + @parametrize("x", range(5)) + def test_foo(self, x): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) + @decorateIf( + unittest.expectedFailure, + lambda params: params["x"] == 3 and params["y"] == "baz" + ) + def test_bar(self, x, y): + ... + + @decorateIf( + unittest.expectedFailure, + lambda params: params["op"].name == "add" and params["dtype"] == torch.float16 + ) + @ops(op_db) + def test_op_foo(self, device, dtype, op): + ... + + @decorateIf( + unittest.skip, + lambda params: params["module_info"].module_cls is torch.nn.Linear and \ + params["device"] == "cpu" + ) + @modules(module_db) + def test_module_foo(self, device, dtype, module_info): + ... + + Args: + decorator: Test decorator to apply if the predicate is satisfied. + predicate_fn (Callable): Function taking in a dict of params and returning a boolean + indicating whether the decorator should be applied or not. + """ + def __init__(self, decorator, predicate_fn): + self.decorator = decorator + self.predicate_fn = predicate_fn + + def _parametrize_test(self, test, generic_cls, device_cls): + + # Leave test as-is and return the appropriate decorator_fn. + def decorator_fn(params, decorator=self.decorator, predicate_fn=self.predicate_fn): + if predicate_fn(params): + return [decorator] + else: + return [] + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + test_name = '' + yield (test_wrapper, test_name, {}, decorator_fn) + + +class ProfilingMode(Enum): + LEGACY = 1 + SIMPLE = 2 + PROFILING = 3 + +def cppProfilingFlagsToProfilingMode(): + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + + if old_prof_exec_state: + if old_prof_mode_state: + return ProfilingMode.PROFILING + else: + return ProfilingMode.SIMPLE + else: + return ProfilingMode.LEGACY + +@contextmanager +def enable_profiling_mode_for_profiling_tests(): + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + try: + yield + finally: + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + +@contextmanager +def enable_profiling_mode(): + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + try: + yield + finally: + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + +@contextmanager +def num_profiled_runs(num_runs): + old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs) + try: + yield + finally: + torch._C._jit_set_num_profiled_runs(old_num_runs) + +func_call = torch._C.ScriptFunction.__call__ +meth_call = torch._C.ScriptMethod.__call__ + +def prof_callable(callable, *args, **kwargs): + if 'profile_and_replay' in kwargs: + del kwargs['profile_and_replay'] + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + with enable_profiling_mode_for_profiling_tests(): + callable(*args, **kwargs) + return callable(*args, **kwargs) + + return callable(*args, **kwargs) + +def prof_func_call(*args, **kwargs): + return prof_callable(func_call, *args, **kwargs) + +def prof_meth_call(*args, **kwargs): + return prof_callable(meth_call, *args, **kwargs) + +torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[method-assign] +torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[method-assign] + +def _get_test_report_path(): + # allow users to override the test file location. We need this + # because the distributed tests run the same test file multiple + # times with different configurations. + override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE') + test_source = override if override is not None else 'python-unittest' + return os.path.join('test-reports', test_source) + +is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "") +parser = argparse.ArgumentParser(add_help=not is_running_via_run_test, allow_abbrev=False) +parser.add_argument('--subprocess', action='store_true', + help='whether to run each test in a subprocess') +parser.add_argument('--seed', type=int, default=1234) +parser.add_argument('--accept', action='store_true') +parser.add_argument('--jit-executor', '--jit_executor', type=str) +parser.add_argument('--repeat', type=int, default=1) +parser.add_argument('--test-bailouts', '--test_bailouts', action='store_true') +parser.add_argument('--use-pytest', action='store_true') +parser.add_argument('--save-xml', nargs='?', type=str, + const=_get_test_report_path(), + default=_get_test_report_path() if IS_CI else None) # noqa: F821 +parser.add_argument('--discover-tests', action='store_true') +parser.add_argument('--log-suffix', type=str, default="") +parser.add_argument('--run-parallel', type=int, default=1) +parser.add_argument('--import-slow-tests', type=str, nargs='?', const=DEFAULT_SLOW_TESTS_FILE) +parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DEFAULT_DISABLED_TESTS_FILE) +parser.add_argument('--rerun-disabled-tests', action='store_true') +parser.add_argument('--pytest-single-test', type=str, nargs=1) + +# Only run when -h or --help flag is active to display both unittest and parser help messages. +def run_unittest_help(argv): + unittest.main(argv=argv) + +if '-h' in sys.argv or '--help' in sys.argv: + help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,)) + help_thread.start() + help_thread.join() + +args, remaining = parser.parse_known_args() +if args.jit_executor == 'legacy': + GRAPH_EXECUTOR = ProfilingMode.LEGACY +elif args.jit_executor == 'profiling': + GRAPH_EXECUTOR = ProfilingMode.PROFILING +elif args.jit_executor == 'simple': + GRAPH_EXECUTOR = ProfilingMode.SIMPLE +else: + # infer flags based on the default settings + GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode() + +RERUN_DISABLED_TESTS = args.rerun_disabled_tests + +SLOW_TESTS_FILE = args.import_slow_tests +DISABLED_TESTS_FILE = args.import_disabled_tests +LOG_SUFFIX = args.log_suffix +RUN_PARALLEL = args.run_parallel +TEST_BAILOUTS = args.test_bailouts +USE_PYTEST = args.use_pytest +PYTEST_SINGLE_TEST = args.pytest_single_test +TEST_DISCOVER = args.discover_tests +TEST_IN_SUBPROCESS = args.subprocess +TEST_SAVE_XML = args.save_xml +REPEAT_COUNT = args.repeat +SEED = args.seed +if not getattr(expecttest, "ACCEPT", False): + expecttest.ACCEPT = args.accept +UNITTEST_ARGS = [sys.argv[0]] + remaining +torch.manual_seed(SEED) + +# CI Prefix path used only on CI environment +CI_TEST_PREFIX = str(Path(os.getcwd())) +CI_PT_ROOT = str(Path(os.getcwd()).parent) +CI_FUNCTORCH_ROOT = str(os.path.join(Path(os.getcwd()).parent, "functorch")) + +def wait_for_process(p, timeout=None): + try: + return p.wait(timeout=timeout) + except KeyboardInterrupt: + # Give `p` a chance to handle KeyboardInterrupt. Without this, + # `pytest` can't print errors it collected so far upon KeyboardInterrupt. + exit_status = p.wait(timeout=5) + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except subprocess.TimeoutExpired: + # send SIGINT to give pytest a chance to make xml + p.send_signal(signal.SIGINT) + exit_status = None + try: + exit_status = p.wait(timeout=5) + # try to handle the case where p.wait(timeout=5) times out as well as + # otherwise the wait() call in the finally block can potentially hang + except subprocess.TimeoutExpired: + pass + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except: # noqa: B001,E722, copied from python core library + p.kill() + raise + finally: + # Always call p.wait() to ensure exit + p.wait() + +def shell(command, cwd=None, env=None, stdout=None, stderr=None, timeout=None): + sys.stdout.flush() + sys.stderr.flush() + # The following cool snippet is copied from Py3 core library subprocess.call + # only the with + # 1. `except KeyboardInterrupt` block added for SIGINT handling. + # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do + # `p.wait()` in a `final` block for the code to be portable. + # + # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 + assert not isinstance(command, str), "Command to shell should be a list or tuple of tokens" + p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env, stdout=stdout, stderr=stderr) + return wait_for_process(p, timeout=timeout) + + +def retry_shell( + command, + cwd=None, + env=None, + stdout=None, + stderr=None, + timeout=None, + retries=1, + was_rerun=False, +) -> Tuple[int, bool]: + # Returns exicode + whether it was rerun + assert ( + retries >= 0 + ), f"Expecting non negative number for number of retries, got {retries}" + try: + exit_code = shell( + command, cwd=cwd, env=env, stdout=stdout, stderr=stderr, timeout=timeout + ) + if exit_code == 0 or retries == 0: + return exit_code, was_rerun + print( + f"Got exit code {exit_code}, retrying (retries left={retries})", + file=stdout, + flush=True, + ) + except subprocess.TimeoutExpired: + if retries == 0: + print( + f"Command took >{timeout // 60}min, returning 124", + file=stdout, + flush=True, + ) + return 124, was_rerun + print( + f"Command took >{timeout // 60}min, retrying (retries left={retries})", + file=stdout, + flush=True, + ) + return retry_shell( + command, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + timeout=timeout, + retries=retries - 1, + was_rerun=True, + ) + + +def discover_test_cases_recursively(suite_or_case): + if isinstance(suite_or_case, unittest.TestCase): + return [suite_or_case] + rc = [] + for element in suite_or_case: + print(element) + rc.extend(discover_test_cases_recursively(element)) + return rc + +def get_test_names(test_cases): + return ['.'.join(case.id().split('.')[-2:]) for case in test_cases] + +def _print_test_names(): + suite = unittest.TestLoader().loadTestsFromModule(__main__) + test_cases = discover_test_cases_recursively(suite) + for name in get_test_names(test_cases): + print(name) + +def chunk_list(lst, nchunks): + return [lst[i::nchunks] for i in range(nchunks)] + +# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api +def sanitize_test_filename(filename): + # inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed + if filename.startswith(CI_TEST_PREFIX): + filename = filename[len(CI_TEST_PREFIX) + 1:] + strip_py = re.sub(r'.py$', '', filename) + return re.sub('/', r'.', strip_py) + +def lint_test_case_extension(suite): + succeed = True + for test_case_or_suite in suite: + test_case = test_case_or_suite + if isinstance(test_case_or_suite, unittest.TestSuite): + first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None + if first_test is not None and isinstance(first_test, unittest.TestSuite): + return succeed and lint_test_case_extension(test_case_or_suite) + test_case = first_test + + if test_case is not None: + test_class = test_case.id().split('.', 1)[1].split('.')[0] + if not isinstance(test_case, TestCase): + err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't." + print(f"{test_class} - failed. {err}") + succeed = False + return succeed + + +def get_report_path(argv=UNITTEST_ARGS, pytest=False): + test_filename = sanitize_test_filename(argv[0]) + test_report_path = TEST_SAVE_XML + LOG_SUFFIX + test_report_path = os.path.join(test_report_path, test_filename) + if pytest: + test_report_path = test_report_path.replace('python-unittest', 'python-pytest') + os.makedirs(test_report_path, exist_ok=True) + test_report_path = os.path.join(test_report_path, f"{test_filename}-{os.urandom(8).hex()}.xml") + return test_report_path + os.makedirs(test_report_path, exist_ok=True) + return test_report_path + + +def sanitize_pytest_xml(xml_file: str): + # pytext xml is different from unittext xml, this function makes pytest xml more similar to unittest xml + # consider somehow modifying the XML logger in conftest to do this instead + import xml.etree.ElementTree as ET + tree = ET.parse(xml_file) + for testcase in tree.iter('testcase'): + full_classname = testcase.attrib['classname'] + # The test prefix is optional + regex_result = re.search(r"^(test\.)?(?P.*)\.(?P[^\.]*)$", full_classname) + if regex_result is None: + continue + classname = regex_result.group("classname") + file = regex_result.group("file").replace(".", "/") + testcase.set("classname", classname) + testcase.set("file", f"{file}.py") + tree.write(xml_file) + + +def get_pytest_test_cases(argv: List[str]) -> List[str]: + class TestCollectorPlugin: + def __init__(self): + self.tests = [] + + def pytest_collection_finish(self, session): + for item in session.items: + self.tests.append(session.config.cwd_relative_nodeid(item.nodeid)) + + test_collector_plugin = TestCollectorPlugin() + import pytest + pytest.main( + [arg for arg in argv if arg != '-vv'] + ['--collect-only', '-qq', '--use-main-module'], + plugins=[test_collector_plugin] + ) + return test_collector_plugin.tests + + +def run_tests(argv=UNITTEST_ARGS): + # import test files. + if SLOW_TESTS_FILE: + if os.path.exists(SLOW_TESTS_FILE): + with open(SLOW_TESTS_FILE) as fp: + global slow_tests_dict + slow_tests_dict = json.load(fp) + # use env vars so pytest-xdist subprocesses can still access them + os.environ['SLOW_TESTS_FILE'] = SLOW_TESTS_FILE + else: + warnings.warn(f'slow test file provided but not found: {SLOW_TESTS_FILE}') + if DISABLED_TESTS_FILE: + if os.path.exists(DISABLED_TESTS_FILE): + with open(DISABLED_TESTS_FILE) as fp: + global disabled_tests_dict + disabled_tests_dict = json.load(fp) + os.environ['DISABLED_TESTS_FILE'] = DISABLED_TESTS_FILE + else: + warnings.warn(f'disabled test file provided but not found: {DISABLED_TESTS_FILE}') + # Determine the test launch mechanism + if TEST_DISCOVER: + _print_test_names() + return + + # Before running the tests, lint to check that every test class extends from TestCase + suite = unittest.TestLoader().loadTestsFromModule(__main__) + if not lint_test_case_extension(suite): + sys.exit(1) + + if TEST_IN_SUBPROCESS: + other_args = [] + if DISABLED_TESTS_FILE: + other_args.append("--import-disabled-tests") + if SLOW_TESTS_FILE: + other_args.append("--import-slow-tests") + if USE_PYTEST: + other_args.append("--use-pytest") + if RERUN_DISABLED_TESTS: + other_args.append("--rerun-disabled-tests") + if TEST_SAVE_XML: + other_args += ['--save-xml', args.save_xml] + + test_cases = ( + get_pytest_test_cases(argv) if USE_PYTEST else + [case.id().split('.', 1)[1] for case in discover_test_cases_recursively(suite)] + ) + + failed_tests = [] + + for test_case_full_name in test_cases: + + cmd = ( + [sys.executable] + [argv[0]] + other_args + argv[1:] + + (["--pytest-single-test"] if USE_PYTEST else []) + + [test_case_full_name] + ) + string_cmd = " ".join(cmd) + + timeout = None if RERUN_DISABLED_TESTS else 15 * 60 + + exitcode, _ = retry_shell(cmd, timeout=timeout, retries=0 if RERUN_DISABLED_TESTS else 1) + + if exitcode != 0: + # This is sort of hacky, but add on relevant env variables for distributed tests. + if 'TestDistBackendWithSpawn' in test_case_full_name: + backend = os.environ.get("BACKEND", "") + world_size = os.environ.get("WORLD_SIZE", "") + env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}" + string_cmd = env_prefix + " " + string_cmd + # Log the command to reproduce the failure. + print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}") + failed_tests.append(test_case_full_name) + + assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format( + len(failed_tests), '\n\t'.join(failed_tests)) + + elif RUN_PARALLEL > 1: + test_cases = discover_test_cases_recursively(suite) + test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL) + processes = [] + for i in range(RUN_PARALLEL): + command = [sys.executable] + argv + [f'--log-suffix=-shard-{i + 1}'] + test_batches[i] + processes.append(subprocess.Popen(command, universal_newlines=True)) + failed = False + for p in processes: + failed |= wait_for_process(p) != 0 + assert not failed, "Some test shards have failed" + elif USE_PYTEST: + pytest_args = argv + ["--use-main-module"] + if TEST_SAVE_XML: + test_report_path = get_report_path(pytest=True) + print(f'Test results will be stored in {test_report_path}') + pytest_args.append(f'--junit-xml-reruns={test_report_path}') + if PYTEST_SINGLE_TEST: + pytest_args = PYTEST_SINGLE_TEST + pytest_args[1:] + + import pytest + os.environ["NO_COLOR"] = "1" + exit_code = pytest.main(args=pytest_args) + if TEST_SAVE_XML: + sanitize_pytest_xml(test_report_path) + + if not RERUN_DISABLED_TESTS: + # exitcode of 5 means no tests were found, which happens since some test configs don't + # run tests from certain files + sys.exit(0 if exit_code == 5 else exit_code) + else: + # Only record the test report and always return a success code when running under rerun + # disabled tests mode + sys.exit(0) + elif TEST_SAVE_XML is not None: + # import here so that non-CI doesn't need xmlrunner installed + import xmlrunner # type: ignore[import] + from xmlrunner.result import _XMLTestResult # type: ignore[import] + + class XMLTestResultVerbose(_XMLTestResult): + """ + Adding verbosity to test outputs: + by default test summary prints 'skip', + but we want to also print the skip reason. + GH issue: https://github.com/pytorch/pytorch/issues/69014 + + This works with unittest_xml_reporting<=3.2.0,>=2.0.0 + (3.2.0 is latest at the moment) + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def addSkip(self, test, reason): + super().addSkip(test, reason) + for c in self.callback.__closure__: + if isinstance(c.cell_contents, str) and c.cell_contents == 'skip': + # this message is printed in test summary; + # it stands for `verbose_str` captured in the closure + c.cell_contents = f"skip: {reason}" + + def printErrors(self) -> None: + super().printErrors() + self.printErrorList("XPASS", self.unexpectedSuccesses) + test_report_path = get_report_path() + verbose = '--verbose' in argv or '-v' in argv + if verbose: + print(f'Test results will be stored in {test_report_path}') + unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner( + output=test_report_path, + verbosity=2 if verbose else 1, + resultclass=XMLTestResultVerbose)) + elif REPEAT_COUNT > 1: + for _ in range(REPEAT_COUNT): + if not unittest.main(exit=False, argv=argv).result.wasSuccessful(): + sys.exit(-1) + else: + unittest.main(argv=argv) + +IS_LINUX = sys.platform == "linux" +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" +IS_PPC = platform.machine() == "ppc64le" +IS_X86 = platform.machine() in ('x86_64', 'i386') +IS_ARM64 = platform.machine() in ('arm64', 'aarch64') + +def is_avx512_vnni_supported(): + if sys.platform != 'linux': + return False + with open("/proc/cpuinfo", encoding="ascii") as f: + lines = f.read() + return "vnni" in lines + +IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported() + +if IS_WINDOWS: + @contextmanager + def TemporaryFileName(*args, **kwargs): + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + if 'delete' in kwargs: + if kwargs['delete'] is not False: + raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.") + else: + kwargs['delete'] = False + f = tempfile.NamedTemporaryFile(*args, **kwargs) + try: + f.close() + yield f.name + finally: + os.unlink(f.name) +else: + @contextmanager # noqa: T484 + def TemporaryFileName(*args, **kwargs): + with tempfile.NamedTemporaryFile(*args, **kwargs) as f: + yield f.name + +if IS_WINDOWS: + @contextmanager + def TemporaryDirectoryName(suffix=None): + # On Windows the directory created by TemporaryDirectory is likely to be removed prematurely, + # so we first create the directory using mkdtemp and then remove it manually + try: + dir_name = tempfile.mkdtemp(suffix=suffix) + yield dir_name + finally: + shutil.rmtree(dir_name) +else: + @contextmanager # noqa: T484 + def TemporaryDirectoryName(suffix=None): + with tempfile.TemporaryDirectory(suffix=suffix) as d: + yield d + +IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8' + +TEST_NUMPY = _check_module_exists('numpy') +TEST_FAIRSEQ = _check_module_exists('fairseq') +TEST_SCIPY = _check_module_exists('scipy') +TEST_MKL = torch.backends.mkl.is_available() +TEST_MPS = torch.backends.mps.is_available() +TEST_XPU = torch.xpu.is_available() +TEST_CUDA = torch.cuda.is_available() +custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name(), None) +TEST_PRIVATEUSE1 = True if (hasattr(custom_device_mod, "is_available") and custom_device_mod.is_available()) else False +TEST_NUMBA = _check_module_exists('numba') + +TEST_DILL = _check_module_exists('dill') + +TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64 + +TEST_OPT_EINSUM = _check_module_exists('opt_einsum') + +TEST_Z3 = _check_module_exists('z3') + +BUILD_WITH_CAFFE2 = torch.onnx._CAFFE2_ATEN_FALLBACK + +def split_if_not_empty(x: str): + return x.split(",") if len(x) != 0 else [] + +NOTEST_CPU = "cpu" in split_if_not_empty(os.getenv('PYTORCH_TESTING_DEVICE_EXCEPT_FOR', '')) + +skipIfNoDill = unittest.skipIf(not TEST_DILL, "no dill") + + +# Python 2.7 doesn't have spawn +TestEnvironment.def_flag("NO_MULTIPROCESSING_SPAWN", env_var="NO_MULTIPROCESSING_SPAWN") +TestEnvironment.def_flag("TEST_WITH_ASAN", env_var="PYTORCH_TEST_WITH_ASAN") +TestEnvironment.def_flag("TEST_WITH_DEV_DBG_ASAN", env_var="PYTORCH_TEST_WITH_DEV_DBG_ASAN") +TestEnvironment.def_flag("TEST_WITH_TSAN", env_var="PYTORCH_TEST_WITH_TSAN") +TestEnvironment.def_flag("TEST_WITH_UBSAN", env_var="PYTORCH_TEST_WITH_UBSAN") +TestEnvironment.def_flag("TEST_WITH_ROCM", env_var="PYTORCH_TEST_WITH_ROCM") + +# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen +# See #64427 +TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1' +# Enables tests that are slow to run (disabled by default) +TestEnvironment.def_flag("TEST_WITH_SLOW", env_var="PYTORCH_TEST_WITH_SLOW") + +# Disables non-slow tests (these tests enabled by default) +# This is usually used in conjunction with TEST_WITH_SLOW to +# run *only* slow tests. (I could have done an enum, but +# it felt a little awkward. +TestEnvironment.def_flag("TEST_SKIP_FAST", env_var="PYTORCH_TEST_SKIP_FAST") + +# Enables crossref tests, in addition to standard tests which +# are being run. crossref tests work by installing a torch +# function mode that runs extra compute alongside the regular +# computation that happens with the test. After both computations +# are done, we cross-reference them (thus the name) to check for +# correction, before throwing out the extra compute and proceeding +# as we had before. By default, we don't run these tests. +TestEnvironment.def_flag("TEST_WITH_CROSSREF", env_var="PYTORCH_TEST_WITH_CROSSREF") + +TestEnvironment.def_flag("TEST_SKIP_CUDAGRAPH", env_var="PYTORCH_TEST_SKIP_CUDAGRAPH") +TEST_CUDA_GRAPH = TEST_CUDA and (not TEST_SKIP_CUDAGRAPH) and ( # noqa: F821 + (torch.version.cuda and int(torch.version.cuda.split(".")[0]) >= 11) or + (torch.version.hip and float(".".join(torch.version.hip.split(".")[0:2])) >= 5.3) +) + +if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ: + num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2")) + # other libraries take up about 11% of space per process + torch.cuda.set_per_process_memory_fraction(round(1 / num_procs - .11, 2)) + +requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "Requires CUDA") + +def skipIfCrossRef(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_CROSSREF: # noqa: F821 + raise unittest.SkipTest("test doesn't currently with crossref") + else: + fn(*args, **kwargs) + return wrapper + +class CrossRefMode(torch.overrides.TorchFunctionMode): + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + r = func(*args, **kwargs) + return r + +# Run PyTorch tests with TorchDynamo +TestEnvironment.def_flag("TEST_WITH_TORCHINDUCTOR", env_var="PYTORCH_TEST_WITH_INDUCTOR") +# AOT_EAGER not tested in ci, useful for debugging +TestEnvironment.def_flag("TEST_WITH_AOT_EAGER", env_var="PYTORCH_TEST_WITH_AOT_EAGER") +TestEnvironment.def_flag("TEST_WITH_TORCHDYNAMO", env_var="PYTORCH_TEST_WITH_DYNAMO", + implied_by_fn=lambda: TEST_WITH_TORCHINDUCTOR or TEST_WITH_AOT_EAGER) # noqa: F821 + +if TEST_WITH_TORCHDYNAMO: # noqa: F821 + import torch._dynamo + # Do not spend time on helper functions that are called with different inputs + torch._dynamo.config.accumulated_cache_size_limit = 8 + # Do not log compilation metrics from unit tests + torch._dynamo.config.log_compilation_metrics = False + if TEST_WITH_TORCHINDUCTOR: # noqa: F821 + import torch._inductor.config + torch._inductor.config.fallback_random = True + + +def xpassIfTorchDynamo(func): + return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func) # noqa: F821 + + +def xfailIfTorchDynamo(func): + return unittest.expectedFailure(func) if TEST_WITH_TORCHDYNAMO else func # noqa: F821 + + +def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"): + """ + Usage: + @skipIfTorchDynamo(msg) + def test_blah(self): + ... + """ + assert isinstance(msg, str), "Are you using skipIfTorchDynamo correctly?" + + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_TORCHDYNAMO: # noqa: F821 + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert isinstance(fn, type) + if TEST_WITH_TORCHDYNAMO: # noqa: F821 + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + return decorator + +def skipIfTorchInductor(msg="test doesn't currently work with torchinductor", + condition=TEST_WITH_TORCHINDUCTOR): # noqa: F821 + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if condition: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert isinstance(fn, type) + if condition: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + return decorator + + +def unMarkDynamoStrictTest(cls=None): + def decorator(cls): + cls.dynamo_strict = False + return cls + + if cls is None: + return decorator + else: + return decorator(cls) + + +def markDynamoStrictTest(cls_or_func=None, nopython=False): + """ + Marks the test as 'strict'. In strict mode, we reset before and after the + test, and run without suppress errors. + + Args: + - nopython: if we should run torch._dynamo.optimize with nopython={True/False}. + """ + def decorator(cls_or_func): + if inspect.isclass(cls_or_func): + cls_or_func.dynamo_strict = True + cls_or_func.dynamo_strict_nopython = nopython + return cls_or_func + + fn = cls_or_func + + @wraps(fn) + def wrapper(*args, **kwargs): + torch._dynamo.reset() + with unittest.mock.patch("torch._dynamo.config.suppress_errors", False): + fn(*args, **kwargs) + torch._dynamo.reset() + return wrapper + + if cls_or_func is None: + return decorator + else: + return decorator(cls_or_func) + + +def skipRocmIfTorchInductor(msg="test doesn't currently work with torchinductor on the ROCm stack"): + return skipIfTorchInductor(msg=msg, condition=TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR) # noqa: F821 + +def skipIfLegacyJitExecutor(msg="test doesn't currently work with legacy JIT executor"): + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if GRAPH_EXECUTOR == ProfilingMode.LEGACY: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert isinstance(fn, type) + if GRAPH_EXECUTOR == ProfilingMode.LEGACY: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + + return decorator + + +# Run PyTorch tests with translation validation on. +TEST_WITH_TV = os.getenv('PYTORCH_TEST_WITH_TV') == '1' + +if TEST_WITH_TV: + torch.fx.experimental._config.translation_validation = True + +# Some tests take too long when dynamic_shapes is combined with +# translation_validation. Whenever that happens, we solve that by +# disabling translation_validation. +def disable_translation_validation_if_dynamic_shapes(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if torch._dynamo.config.dynamic_shapes: + # Turning TV off due to high latency on dynamic shapes. + torch.fx.experimental._config.translation_validation = False + return fn(*args, **kwargs) + return wrapper + + +# Determine whether to enable cuda memory leak check. +# CUDA mem leak check is expensive and thus we don't want to execute it on every +# test case / configuration. +# If this is True then CUDA memory leak checks are skipped. If this is false +# then CUDA memory leak checks are performed. +# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135 +TestEnvironment.def_flag("TEST_CUDA_MEM_LEAK_CHECK", env_var="PYTORCH_TEST_CUDA_MEM_LEAK_CHECK") + +# True if CI is running TBB-enabled Pytorch +IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "") + +# Dict of NumPy dtype -> torch dtype (when the correspondence exists) +numpy_to_torch_dtype_dict = { + np.bool_ : torch.bool, + np.uint8 : torch.uint8, + np.uint16 : torch.uint16, + np.uint32 : torch.uint32, + np.uint64 : torch.uint64, + np.int8 : torch.int8, + np.int16 : torch.int16, + np.int32 : torch.int32, + np.int64 : torch.int64, + np.float16 : torch.float16, + np.float32 : torch.float32, + np.float64 : torch.float64, + np.complex64 : torch.complex64, + np.complex128 : torch.complex128 +} + + +# numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like +# np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type. +# Especially when checking against a reference we can't be sure which variant we get, so we simply try both. +def numpy_to_torch_dtype(np_dtype): + try: + return numpy_to_torch_dtype_dict[np_dtype] + except KeyError: + return numpy_to_torch_dtype_dict[np_dtype.type] + + +def has_corresponding_torch_dtype(np_dtype): + try: + numpy_to_torch_dtype(np_dtype) + return True + except KeyError: + return False + + +if IS_WINDOWS: + # Size of `np.intc` is platform defined. + # It is returned by functions like `bitwise_not`. + # On Windows `int` is 32-bit + # https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160 + numpy_to_torch_dtype_dict[np.intc] = torch.int + +# Dict of torch dtype -> NumPy dtype +torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()} +torch_to_numpy_dtype_dict.update({ + torch.bfloat16: np.float32, + torch.complex32: np.complex64 +}) + +def skipIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): + def dec_fn(fn): + reason = f"skipIfRocm: {msg}" + + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_ROCM: # noqa: F821 + raise unittest.SkipTest(reason) + else: + return fn(*args, **kwargs) + return wrapper + if func: + return dec_fn(func) + return dec_fn + +def runOnRocm(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_ROCM: # noqa: F821 + fn(*args, **kwargs) + else: + raise unittest.SkipTest("test currently only works on the ROCm stack") + return wrapper + +def skipIfXpu(func=None, *, msg="test doesn't currently work on the XPU stack"): + def dec_fn(fn): + reason = f"skipIfXpu: {msg}" + + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_XPU: + raise unittest.SkipTest(reason) + else: + return fn(*args, **kwargs) + return wrapper + if func: + return dec_fn(func) + return dec_fn + +def skipIfMps(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_MPS: + raise unittest.SkipTest("test doesn't currently work with MPS") + else: + fn(*args, **kwargs) + return wrapper + +# Skips a test on CUDA if ROCm is available and its version is lower than requested. +def skipIfRocmVersionLessThan(version=None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if TEST_WITH_ROCM: # noqa: F821 + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) + if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): + reason = f"ROCm {rocm_version_tuple} is available but {version} required" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + return wrap_fn + return dec_fn + +def skipIfNotMiopenSuggestNHWC(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_WITH_MIOPEN_SUGGEST_NHWC: + raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation") + else: + fn(*args, **kwargs) + return wrapper + + +# Reverts the linalg backend back to default to make sure potential failures in one +# test do not affect other tests +def setLinalgBackendsToDefaultFinally(fn): + @wraps(fn) + def _fn(*args, **kwargs): + _preferred_backend = torch.backends.cuda.preferred_linalg_library() + try: + fn(*args, **kwargs) + finally: + torch.backends.cuda.preferred_linalg_library(_preferred_backend) + return _fn + + +# Context manager for setting deterministic flag and automatically +# resetting it to its original value +class DeterministicGuard: + def __init__(self, deterministic, *, warn_only=False, fill_uninitialized_memory=True): + self.deterministic = deterministic + self.warn_only = warn_only + self.fill_uninitialized_memory = fill_uninitialized_memory + + def __enter__(self): + self.deterministic_restore = torch.are_deterministic_algorithms_enabled() + self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled() + self.fill_uninitialized_memory_restore = torch.utils.deterministic.fill_uninitialized_memory + torch.use_deterministic_algorithms( + self.deterministic, + warn_only=self.warn_only) + torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory + + def __exit__(self, exception_type, exception_value, traceback): + torch.use_deterministic_algorithms( + self.deterministic_restore, + warn_only=self.warn_only_restore) + torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory_restore + +class AlwaysWarnTypedStorageRemoval: + def __init__(self, always_warn): + assert isinstance(always_warn, bool) + self.always_warn = always_warn + + def __enter__(self): + self.always_warn_restore = torch.storage._get_always_warn_typed_storage_removal() + torch.storage._set_always_warn_typed_storage_removal(self.always_warn) + + def __exit__(self, exception_type, exception_value, traceback): + torch.storage._set_always_warn_typed_storage_removal(self.always_warn_restore) + +# Context manager for setting cuda sync debug mode and reset it +# to original value +# we are not exposing it to the core because sync debug mode is +# global and thus not thread safe +class CudaSyncGuard: + def __init__(self, sync_debug_mode): + self.mode = sync_debug_mode + + def __enter__(self): + self.debug_mode_restore = torch.cuda.get_sync_debug_mode() + torch.cuda.set_sync_debug_mode(self.mode) + + def __exit__(self, exception_type, exception_value, traceback): + torch.cuda.set_sync_debug_mode(self.debug_mode_restore) + +# Context manager for setting torch.__future__.set_swap_module_params_on_conversion +# and automatically resetting it to its original value +class SwapTensorsGuard: + def __init__(self, use_swap_tensors): + self.use_swap_tensors = use_swap_tensors + + def __enter__(self): + self.swap_tensors_restore = torch.__future__.get_swap_module_params_on_conversion() + if self.use_swap_tensors is not None: + torch.__future__.set_swap_module_params_on_conversion(self.use_swap_tensors) + + def __exit__(self, exception_type, exception_value, traceback): + torch.__future__.set_swap_module_params_on_conversion(self.swap_tensors_restore) + +# This decorator can be used for API tests that call +# torch.use_deterministic_algorithms(). When the test is finished, it will +# restore the previous deterministic flag setting. +# +# If CUDA >= 10.2, this will set the environment variable +# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that +# setting is not thrown during the test unless the test changes that variable +# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be +# restored once the test is finished. +# +# Note that if a test requires CUDA to actually register the changed +# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because +# CUDA only checks the variable when the runtime initializes. Tests can be +# run inside a subprocess like so: +# +# import subprocess, sys, os +# script = ''' +# # Test code should go here +# ''' +# try: +# subprocess.check_output( +# [sys.executable, '-c', script], +# stderr=subprocess.STDOUT, +# cwd=os.path.dirname(os.path.realpath(__file__)), +# env=os.environ.copy()) +# except subprocess.CalledProcessError as e: +# error_message = e.output.decode('utf-8') +# # Handle exceptions raised by the subprocess here +# +def wrapDeterministicFlagAPITest(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with DeterministicGuard( + torch.are_deterministic_algorithms_enabled(), + warn_only=torch.is_deterministic_algorithms_warn_only_enabled()): + class CuBLASConfigGuard: + cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' + + def __enter__(self): + self.is_cuda10_2_or_higher = ( + (torch.version.cuda is not None) + and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) + if self.is_cuda10_2_or_higher: + self.cublas_config_restore = os.environ.get(self.cublas_var_name) + os.environ[self.cublas_var_name] = ':4096:8' + + def __exit__(self, exception_type, exception_value, traceback): + if self.is_cuda10_2_or_higher: + cur_cublas_config = os.environ.get(self.cublas_var_name) + if self.cublas_config_restore is None: + if cur_cublas_config is not None: + del os.environ[self.cublas_var_name] + else: + os.environ[self.cublas_var_name] = self.cublas_config_restore + with CuBLASConfigGuard(): + fn(*args, **kwargs) + return wrapper + +# This decorator can be used for API tests that want to safely call +# torch.__future__.set_swap_module_params_on_conversion. `swap` can be set to +# True, False or None where None indicates that the context manager does not +# set the flag. When the test is finished, it will restore the previous swap +# flag setting. +def wrapSwapTensorsTest(swap=None): + def dec_fn(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with SwapTensorsGuard(swap): + fn(*args, **kwargs) + return wrapper + return dec_fn + +# test parametrizer for swapping +class swap(_TestParametrizer): + def __init__(self, swap_values): + super().__init__() + self.swap_values = swap_values + + def _parametrize_test(self, test, generic_cls, device_cls): + for swap in self.swap_values: + yield wrapSwapTensorsTest(swap)(test), f'swap_{swap}', {}, lambda _: [] + +def skipIfCompiledWithoutNumpy(fn): + # Even if the numpy module is present, if `USE_NUMPY=0` is used during the + # build, numpy tests will fail + numpy_support = TEST_NUMPY + if numpy_support: + try: + # The numpy module is present, verify that PyTorch is compiled with + # numpy support + torch.from_numpy(np.array([2, 2])) + except RuntimeError: + numpy_support = False + + @wraps(fn) + def wrapper(*args, **kwargs): + if not numpy_support: + raise unittest.SkipTest("PyTorch was compiled without numpy support") + else: + fn(*args, **kwargs) + return wrapper + +def _test_function(fn, device): + def run_test_function(self): + return fn(self, device) + return run_test_function + +def skipIfNoXNNPACK(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not torch.backends.xnnpack.enabled: + raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.') + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoLapack(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not torch._C.has_lapack: + raise unittest.SkipTest('PyTorch compiled without Lapack') + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNotRegistered(op_name, message): + """Wraps the decorator to hide the import of the `core`. + + Args: + op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`. + message: message to fail with. + + Usage: + @skipIfNotRegistered('MyOp', 'MyOp is not linked!') + This will check if 'MyOp' is in the caffe2.python.core + """ + if not BUILD_WITH_CAFFE2: + return unittest.skip("Pytorch is compiled without Caffe2") + try: + from caffe2.python import core + skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS, + message) + except ImportError: + skipper = unittest.skip("Cannot import `caffe2.python.core`") + return skipper + +def _decide_skip_caffe2(expect_caffe2, reason): + def skip_dec(func): + @wraps(func) + def wrapper(self): + if torch.onnx._CAFFE2_ATEN_FALLBACK != expect_caffe2: + raise unittest.SkipTest(reason) + return func(self) + return wrapper + return skip_dec + +skipIfCaffe2 = _decide_skip_caffe2(False, "Not compatible with Caffe2") +skipIfNoCaffe2 = _decide_skip_caffe2(True, "Caffe2 is not available") + +def skipIfNoSciPy(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_SCIPY: + raise unittest.SkipTest("test require SciPy, but SciPy not found") + else: + fn(*args, **kwargs) + return wrapper + + +def skipIfTBB(message="This test makes TBB sad"): + def dec_fn(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if IS_TBB: + raise unittest.SkipTest(message) + else: + fn(*args, **kwargs) + return wrapper + return dec_fn + + +def skip_if_pytest(fn): + @wraps(fn) + def wrapped(*args, **kwargs): + if "PYTEST_CURRENT_TEST" in os.environ: + raise unittest.SkipTest("does not work under pytest") + return fn(*args, **kwargs) + + return wrapped + + +def slowTest(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_WITH_SLOW: # noqa: F821 + raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") + else: + fn(*args, **kwargs) + wrapper.__dict__['slow_test'] = True + return wrapper + + +def slowTestIf(condition): + return slowTest if condition else lambda fn: fn + + +def skipCUDAMemoryLeakCheckIf(condition): + def dec(fn): + if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True + fn._do_cuda_memory_leak_check = not condition + return fn + return dec + +def skipCUDANonDefaultStreamIf(condition): + def dec(fn): + if getattr(fn, '_do_cuda_non_default_stream', True): # if current True + fn._do_cuda_non_default_stream = not condition + return fn + return dec + +def suppress_warnings(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fn(*args, **kwargs) + return wrapper + + +def to_gpu(obj, type_map=None): + if type_map is None: + type_map = {} + if isinstance(obj, torch.Tensor): + assert obj.is_leaf + t = type_map.get(obj.dtype, obj.dtype) + with torch.no_grad(): + res = obj.clone().to(dtype=t, device="cuda") + res.requires_grad = obj.requires_grad + return res + elif torch.is_storage(obj): + return obj.new().resize_(obj.size()).copy_(obj) + elif isinstance(obj, list): + return [to_gpu(o, type_map) for o in obj] + elif isinstance(obj, tuple): + return tuple(to_gpu(o, type_map) for o in obj) + else: + return deepcopy(obj) + + +def get_function_arglist(func): + return inspect.getfullargspec(func).args + + +def set_rng_seed(seed): + torch.manual_seed(seed) + random.seed(seed) + if TEST_NUMPY: + np.random.seed(seed) + + +disable_functorch = torch._C._DisableFuncTorch + + +@contextlib.contextmanager +def freeze_rng_state(): + # no_dispatch needed for test_composite_compliance + # Some OpInfos use freeze_rng_state for rng determinism, but + # test_composite_compliance overrides dispatch for all torch functions + # which we need to disable to get and set rng state + with no_dispatch(), disable_functorch(): + rng_state = torch.get_rng_state() + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state() + try: + yield + finally: + # Modes are not happy with torch.cuda.set_rng_state + # because it clones the state (which could produce a Tensor Subclass) + # and then grabs the new tensor's data pointer in generator.set_state. + # + # In the long run torch.cuda.set_rng_state should probably be + # an operator. + # + # NB: Mode disable is to avoid running cross-ref tests on thes seeding + with no_dispatch(), disable_functorch(): + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) + torch.set_rng_state(rng_state) + +@contextlib.contextmanager +def set_default_dtype(dtype): + saved_dtype = torch.get_default_dtype() + torch.set_default_dtype(dtype) + try: + yield + finally: + torch.set_default_dtype(saved_dtype) + +@contextlib.contextmanager +def set_default_tensor_type(tensor_type): + saved_tensor_type = torch.tensor([]).type() + torch.set_default_tensor_type(tensor_type) + try: + yield + finally: + torch.set_default_tensor_type(saved_tensor_type) + +def iter_indices(tensor): + if tensor.dim() == 0: + return range(0) + if tensor.dim() == 1: + return range(tensor.size(0)) + return product(*(range(s) for s in tensor.size())) + + +def is_iterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +def is_iterable_of_tensors(iterable, include_empty=False): + """ Returns True if iterable is an iterable of tensors and False o.w. + + If the iterable is empty, the return value is :attr:`include_empty` + """ + # Tensor itself is iterable so we check this first + if isinstance(iterable, torch.Tensor): + return False + + try: + if len(iterable) == 0: + return include_empty + + for t in iter(iterable): + if not isinstance(t, torch.Tensor): + return False + + except TypeError as te: + return False + + return True + + +class CudaNonDefaultStream: + def __enter__(self): + # Before starting CUDA test save currently active streams on all + # CUDA devices and set new non default streams to all CUDA devices + # to ensure CUDA tests do not use default stream by mistake. + beforeDevice = torch.cuda.current_device() + self.beforeStreams = [] + for d in range(torch.cuda.device_count()): + self.beforeStreams.append(torch.cuda.current_stream(d)) + deviceStream = torch.cuda.Stream(device=d) + self.beforeStreams[-1].synchronize() + torch._C._cuda_setStream(stream_id=deviceStream.stream_id, + device_index=deviceStream.device_index, + device_type=deviceStream.device_type) + torch._C._cuda_setDevice(beforeDevice) + + def __exit__(self, exec_type, exec_value, traceback): + # After completing CUDA test load previously active streams on all + # CUDA devices. + beforeDevice = torch.cuda.current_device() + for d in range(torch.cuda.device_count()): + torch._C._cuda_setStream(stream_id=self.beforeStreams[d].stream_id, + device_index=self.beforeStreams[d].device_index, + device_type=self.beforeStreams[d].device_type) + torch._C._cuda_setDevice(beforeDevice) + +class CudaMemoryLeakCheck: + def __init__(self, testcase, name=None): + self.name = testcase.id() if name is None else name + self.testcase = testcase + + # initialize context & RNG to prevent false positive detections + # when the test is the first to initialize those + from torch.testing._internal.common_cuda import initialize_cuda_context_rng + initialize_cuda_context_rng() + + # Stores CUDA memory data provided by PyTorch's caching allocator and + # the CUDA driver. + # + # NOTE: The undocumented torch.cuda.mem_get_info() returns + # (#free bytes, #total bytes available) on the GPU + def __enter__(self): + self.caching_allocator_befores = [] + self.driver_befores = [] + + # Performs a gc if required (required if any CUDA memory is held) + num_devices = torch.cuda.device_count() + for i in range(num_devices): + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + # NOTE: gc is based exclusively on caching allocator memory + # because the driver will always have some bytes in use (context size?) + if caching_allocator_mem_allocated > 0: + gc.collect() + torch._C._cuda_clearCublasWorkspaces() + torch.cuda.empty_cache() + break + + # Acquires caching allocator and driver statistics before the test is run + for i in range(num_devices): + self.caching_allocator_befores.append(torch.cuda.memory_allocated(i)) + bytes_free, bytes_total = torch.cuda.mem_get_info(i) + driver_mem_allocated = bytes_total - bytes_free + self.driver_befores.append(driver_mem_allocated) + + def __exit__(self, exec_type, exec_value, traceback): + # Don't check for leaks if an exception was thrown + if exec_type is not None: + return + + # Compares caching allocator before/after statistics + # An increase in allocated memory is a discrepancy indicating a possible + # memory leak + discrepancy_detected = False + num_devices = torch.cuda.device_count() + for i in range(num_devices): + # avoid counting cublasWorkspace allocations + torch._C._cuda_clearCublasWorkspaces() + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + + if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: + discrepancy_detected = True + break + + # Short-circuits if no discrepancy detected + if not discrepancy_detected: + return + + # Validates the discrepancy persists after garbage collection and + # is confirmed by the driver API + + # NOTE: driver API iscrepancies alone are ignored because with the jiterator + # some tests may permanently increase the CUDA context size and + # that will appear as a driver memory leak but is the expected behavior. + + # GCs and clears the cache + gc.collect() + torch.cuda.empty_cache() + + for i in range(num_devices): + + discrepancy_detected = True + + # Query memory multiple items to ensure leak was not transient + for n in range(3): + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + bytes_free, bytes_total = torch.cuda.mem_get_info(i) + driver_mem_allocated = bytes_total - bytes_free + + caching_allocator_discrepancy = False + driver_discrepancy = False + + if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: + caching_allocator_discrepancy = True + + if driver_mem_allocated > self.driver_befores[i]: + driver_discrepancy = True + + if not (caching_allocator_discrepancy or driver_discrepancy): + # Leak was false positive, exit loop + discrepancy_detected = False + break + + if not discrepancy_detected: + continue + + if caching_allocator_discrepancy and not driver_discrepancy: + # Just raises a warning if the leak is not validated by the + # driver API + # NOTE: this may be a problem with how the caching allocator collects its + # statistics or a leak too small to trigger the allocation of an + # additional block of memory by the CUDA driver + msg = ("CUDA caching allocator reports a memory leak not " + "verified by the driver API in {}! " + "Caching allocator allocated memory was {} and is now reported as {} " + "on device {}. " + "CUDA driver allocated memory was {} and is now {}.").format( + self.name, + self.caching_allocator_befores[i], + caching_allocator_mem_allocated, + i, + self.driver_befores[i], + driver_mem_allocated) + warnings.warn(msg) + elif caching_allocator_discrepancy and driver_discrepancy: + # A caching allocator discrepancy validated by the driver API is a + # failure (except on ROCm, see below) + msg = ("CUDA driver API confirmed a leak in {}! " + "Caching allocator allocated memory was {} and is now reported as {} " + "on device {}. " + "CUDA driver allocated memory was {} and is now {}.").format( + self.name, + self.caching_allocator_befores[i], + caching_allocator_mem_allocated, + i, + self.driver_befores[i], + driver_mem_allocated) + + raise RuntimeError(msg) + +@contextmanager +def skip_exception_type(exc_type): + try: + yield + except exc_type as e: + raise unittest.SkipTest(f"not implemented: {e}") from e + +@contextmanager +def print_repro_on_failure(repro_str): + try: + yield + except unittest.SkipTest: + raise + except Exception as e: + # NB: Hacking the exception args is the cleanest way I've found to append + # failure reproduction info without poisoning the stack trace. + if len(e.args) >= 1: + e.args = (f"{e.args[0]}\n{repro_str}", *e.args[1:]) + raise + +# "min_satisfying_examples" setting has been deprecated in hypothesis +# 3.56.0 and removed in hypothesis 4.x +try: + import hypothesis + + def settings(*args, **kwargs): + if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0): + kwargs.pop('min_satisfying_examples') + return hypothesis.settings(*args, **kwargs) + + + hypothesis.settings.register_profile( + "pytorch_ci", + settings( + derandomize=True, + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=50, + verbosity=hypothesis.Verbosity.normal)) + hypothesis.settings.register_profile( + "dev", + settings( + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=10, + verbosity=hypothesis.Verbosity.normal)) + hypothesis.settings.register_profile( + "debug", + settings( + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=1000, + verbosity=hypothesis.Verbosity.verbose)) + + hypothesis.settings.load_profile( + "pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev') # noqa: F821 + ) +except ImportError: + print('Fail to import hypothesis in common_utils, tests are not derandomized') + +# Used in check_if_enable to see if a test method should be disabled by an issue, +# sanitizes a test method name from appended suffixes by @dtypes parametrization. +# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should +# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32 +def remove_device_and_dtype_suffixes(test_name: str) -> str: + # import statement is localized to avoid circular dependency issues with common_device_type.py + from torch.testing._internal.common_device_type import get_device_type_test_bases + device_suffixes = [x.device_type for x in get_device_type_test_bases()] + dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()] + + test_name_chunks = test_name.split("_") + if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes: + if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes: + return "_".join(test_name_chunks[0:-2]) + return "_".join(test_name_chunks[0:-1]) + return test_name + + +def check_if_enable(test: unittest.TestCase): + classname = str(test.__class__).split("'")[1].split(".")[-1] + sanitized_testname = remove_device_and_dtype_suffixes(test._testMethodName) + + def matches_test(target: str): + target_test_parts = target.split() + if len(target_test_parts) < 2: + # poorly formed target test name + return False + target_testname = target_test_parts[0] + target_classname = target_test_parts[1][1:-1].split(".")[-1] + # if test method name or its sanitized version exactly matches the disabled + # test method name AND allow non-parametrized suite names to disable + # parametrized ones (TestSuite disables TestSuiteCPU) + return classname.startswith(target_classname) and (target_testname in (test._testMethodName, sanitized_testname)) + + if any(matches_test(x) for x in slow_tests_dict.keys()): + getattr(test, test._testMethodName).__dict__['slow_test'] = True + if not TEST_WITH_SLOW: # noqa: F821 + raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") + + if not IS_SANDCASTLE: # noqa: F821 + should_skip = False + skip_msg = "" + + for disabled_test, (issue_url, platforms) in disabled_tests_dict.items(): + if matches_test(disabled_test): + platform_to_conditional: Dict = { + "mac": IS_MACOS, + "macos": IS_MACOS, + "win": IS_WINDOWS, + "windows": IS_WINDOWS, + "linux": IS_LINUX, + "rocm": TEST_WITH_ROCM, # noqa: F821 + "xpu": TEST_XPU, # noqa: F821 + "asan": TEST_WITH_ASAN, # noqa: F821 + "dynamo": TEST_WITH_TORCHDYNAMO, # noqa: F821 + "inductor": TEST_WITH_TORCHINDUCTOR, # noqa: F821 + "slow": TEST_WITH_SLOW, # noqa: F821 + } + + invalid_platforms = list(filter(lambda p: p not in platform_to_conditional, platforms)) + if len(invalid_platforms) > 0: + invalid_plats_str = ", ".join(invalid_platforms) + valid_plats = ", ".join(platform_to_conditional.keys()) + + print(f"Test {disabled_test} is disabled for some unrecognized ", + f"platforms: [{invalid_plats_str}]. Please edit issue {issue_url} to fix the platforms ", + "assigned to this flaky test, changing \"Platforms: ...\" to a comma separated ", + f"subset of the following (or leave it blank to match all platforms): {valid_plats}") + + # Sanitize the platforms list so that we continue to disable the test for any valid platforms given + platforms = list(filter(lambda p: p in platform_to_conditional, platforms)) + + if platforms == [] or any(platform_to_conditional[platform] for platform in platforms): + should_skip = True + skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \ + f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \ + "If you're seeing this on your local machine and would like to enable this test, " \ + "please make sure CI is not set and you are not using the flag --import-disabled-tests." + break + + if should_skip and not RERUN_DISABLED_TESTS: + # Skip the disabled test when not running under --rerun-disabled-tests verification mode + raise unittest.SkipTest(skip_msg) + + if not should_skip and RERUN_DISABLED_TESTS: + skip_msg = "Test is enabled but --rerun-disabled-tests verification mode is set, so only" \ + " disabled tests are run" + raise unittest.SkipTest(skip_msg) + + if TEST_SKIP_FAST: # noqa: F821 + if hasattr(test, test._testMethodName) and not getattr(test, test._testMethodName).__dict__.get('slow_test', False): + raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST") + + +# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very +# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of +# `torch.testing._comparison.are_equal`, used for example by the public testing function +# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence +# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only +# change the supported inputs, but the comparison logic is the same. +# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation. + +class RelaxedBooleanPair(BooleanPair): + """Pair for boolean-like inputs. + + In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single + element tensor-like. + """ + _supported_number_types = NumberPair(0, 0)._supported_types + + def _process_inputs(self, actual, expected, *, id): + # We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a + # number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans. + tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray) + other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types) + if not ( + (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) + or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) + ): + self._inputs_not_supported() + + return [self._to_bool(input, id=id) for input in (actual, expected)] + + def _to_bool(self, bool_like, *, id): + if isinstance(bool_like, np.number): + return bool(bool_like.item()) + elif type(bool_like) in self._supported_number_types: + return bool(bool_like) + elif isinstance(bool_like, (torch.Tensor, np.ndarray)): + numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size + if numel > 1: + self._fail( + ValueError, + f"Only single element tensor-likes can be compared against a boolean. " + f"Got {numel} elements instead.", + id=id + ) + + return bool(bool_like.item()) + else: + return super()._to_bool(bool_like, id=id) + + +class RelaxedNumberPair(NumberPair): + """Pair for number-like inputs. + + In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element + tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when + ``check_dtype=True`` is passed. + + In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also + supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and + ``@toleranceOverride`` decorators. + """ + _TYPE_TO_DTYPE = { + int: torch.int64, + float: torch.float32, + complex: torch.complex64, + } + + def __init__( + self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters + ) -> None: + super().__init__(actual, expected, check_dtype=False, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _process_inputs(self, actual, expected, *, id): + # We require only one of the inputs of the inputs to be a number and the other can also be a number or a single + # element tensor or array, whereas in default NumberPair both inputs have to be numbers. + tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray) + other_supported_types = (*self._supported_types, *tensor_or_array_types) + if not ( + (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) + or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) + ): + self._inputs_not_supported() + + return [self._to_number(input, id=id) for input in (actual, expected)] + + def _to_number(self, number_like, *, id): + if isinstance(number_like, (torch.Tensor, np.ndarray)): + numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size + if numel > 1: + self._fail( + ValueError, + f"Only single element tensor-likes can be compared against a number. " + f"Got {numel} elements instead.", + id=id + ) + number = number_like.item() + if isinstance(number, bool): + number = int(number) + + return number + elif isinstance(number_like, Enum): + return int(number_like) # type: ignore[call-overload] + else: + return super()._to_number(number_like, id=id) + + +class TensorOrArrayPair(TensorLikePair): + """Pair for tensor-like inputs. + + On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of + :class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a + tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their + relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine. + + In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride`` + and ``@toleranceOverride`` decorators. + """ + def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): + super().__init__(actual, expected, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _process_inputs(self, actual, expected, *, id, allow_subclasses): + self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray)) + + actual, expected = (self._to_tensor(input) for input in (actual, expected)) + for tensor in (actual, expected): + self._check_supported(tensor, id=id) + return actual, expected + + +class TypedStoragePair(TensorLikePair): + """Pair for :class:`torch.storage.TypedStorage` inputs.""" + def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): + self._check_inputs_isinstance(actual, expected, cls=torch.storage.TypedStorage) + super().__init__(actual, expected, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _to_tensor(self, typed_storage): + return torch.tensor( + typed_storage._untyped_storage, + dtype={ + torch.quint8: torch.uint8, + torch.quint4x2: torch.uint8, + torch.quint2x4: torch.uint8, + torch.qint32: torch.int32, + torch.qint8: torch.int8 + }.get(typed_storage.dtype, typed_storage.dtype), + device=typed_storage.device, + ) + + +class UnittestPair(Pair): + """Fallback ABC pair that handles non-numeric inputs. + + To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in + order to use it with the :class:`Pair` "framework" from :func:`are_equal`. + + Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support. + """ + CLS: Union[Type, Tuple[Type, ...]] + TYPE_NAME: Optional[str] = None + + def __init__(self, actual, expected, **other_parameters): + self._check_inputs_isinstance(actual, expected, cls=self.CLS) + super().__init__(actual, expected, **other_parameters) + + def compare(self): + test_case = unittest.TestCase() + + try: + return test_case.assertEqual(self.actual, self.expected) + except test_case.failureException as error: + msg = str(error) + + type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__ + self._fail(AssertionError, f"{type_name.title()} comparison failed: {msg}") + + +class StringPair(UnittestPair): + CLS = (str, bytes) + TYPE_NAME = "string" + + +class SetPair(UnittestPair): + CLS = set + + +class TypePair(UnittestPair): + CLS = type + + +class ObjectPair(UnittestPair): + CLS = object + + +# This implements a variant of assertRaises/assertRaisesRegex where we first test +# if the exception is NotImplementedError, and if so just skip the test instead +# of failing it. +# +# This is implemented by inheriting from the (private) implementation of +# assertRaises from unittest.case, and slightly tweaking it for this new +# behavior. The year is 2021: this private class hierarchy hasn't changed since +# 2010, seems low risk to inherit from. +class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext): + def __exit__(self, exc_type, exc_value, tb): + if exc_type is not None and issubclass(exc_type, NotImplementedError): + self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined] + return super().__exit__(exc_type, exc_value, tb) + + +@contextmanager +def set_warn_always_context(new_val: bool): + old_val = torch.is_warn_always_enabled() + torch.set_warn_always(new_val) + try: + yield + finally: + torch.set_warn_always(old_val) + + +class NoTest: + # causes pytest to not recognize this class as a test + __test__ = False + + +class TestCase(expecttest.TestCase): + # NOTE: "precision" lets classes and generated tests set minimum + # atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for + # example. + # NOTE: "rel_tol" lets classes and generated tests set minimum + # rtol values when comparing tensors. Used by @toleranceOverride, for example. + _precision: float = 0 + _rel_tol: float = 0 + + # Toggles whether to assert that `torch.get_default_dtype()` returns + # `torch.float` when `setUp` and `tearDown` are called. + _default_dtype_check_enabled: bool = False + + # Always use difflib to print diffs on multi line equality. + # Undocumented feature in unittest + _diffThreshold = sys.maxsize + maxDiff = None + + # checker to early terminate test suite if unrecoverable failure occurs. + def _should_stop_test_suite(self): + if torch.cuda.is_initialized(): + # CUDA device side error will cause subsequence test cases to fail. + # stop entire test suite if catches RuntimeError during torch.cuda.synchronize(). + try: + torch.cuda.synchronize() + except RuntimeError as rte: + print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr) + print(str(rte), file=sys.stderr) + return True + return False + else: + return False + + @property + def precision(self) -> float: + return self._precision + + @precision.setter + def precision(self, prec: float) -> None: + self._precision = prec + + @property + def rel_tol(self) -> float: + return self._rel_tol + + @rel_tol.setter + def rel_tol(self, prec: float) -> None: + self._rel_tol = prec + + _do_cuda_memory_leak_check = False + _do_cuda_non_default_stream = False + + # When True, if a test case raises a NotImplementedError, instead of failing + # the test, skip it instead. + _ignore_not_implemented_error = False + + def __init__(self, method_name='runTest'): + super().__init__(method_name) + + test_method = getattr(self, method_name, None) + if test_method is not None: + # Wraps the tested method if we should do CUDA memory check. + if TEST_CUDA_MEM_LEAK_CHECK: # noqa: F821 + self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True) + # FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044 + if self._do_cuda_memory_leak_check and not IS_WINDOWS: + self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors) + + # Wraps the tested method if we should enforce non default CUDA stream. + self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True) + if self._do_cuda_non_default_stream and not IS_WINDOWS: + self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream) + + if self._ignore_not_implemented_error: + self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError)) + + if PRINT_REPRO_ON_FAILURE: # noqa: F821 + env_var_prefix = TestEnvironment.repro_env_var_prefix() + try: + def _get_rel_test_path(abs_test_path): + # Attempt to get relative path based on the "test" dir. + # In CI, the working dir is not guaranteed to be the base repo dir so + # we can't just compute relative path from that. + parts = Path(abs_test_path).parts + for i, part in enumerate(parts): + if part == "test": + base_dir = os.path.join(*parts[:i]) if i > 0 else '' + return os.path.relpath(abs_test_path, start=base_dir) + + # Can't determine containing dir; just return the test filename. + # The path isn't strictly correct but it's arguably better than nothing. + return os.path.split(abs_test_path)[1] + + test_filename = _get_rel_test_path(inspect.getfile(type(self))) + repro_str = f""" +To execute this test, run the following from the base repo dir: + {env_var_prefix} python {test_filename} -k {method_name} + +This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0""" + self.wrap_with_policy( + method_name, + lambda repro_str=repro_str: print_repro_on_failure(repro_str=repro_str)) + except Exception as e: + # Don't fail entirely if we can't get the test filename + log.info("could not print repro string", extra=str(e)) + + def assertLeaksNoCudaTensors(self, name=None): + name = self.id() if name is None else name + return CudaMemoryLeakCheck(self, name) + + def enforceNonDefaultStream(self): + return CudaNonDefaultStream() + + def assertExpectedInline(self, actual, expect, skip=0): + return super().assertExpectedInline(actual if isinstance(actual, str) else str(actual), expect, skip + 1) + + # Munges exceptions that internally contain stack traces, using munge_exc + def assertExpectedInlineMunged( + self, exc_type, callable, expect, *, suppress_suffix=True + ): + try: + callable() + except exc_type as e: + self.assertExpectedInline( + munge_exc(e, suppress_suffix=suppress_suffix, skip=1), expect, skip=1 + ) + return + self.fail(msg="Did not raise when expected to") + + def assertLogs(self, logger=None, level=None): + if logger is None: + logger = logging.getLogger("torch") + return super().assertLogs(logger, level) + + def assertNoLogs(self, logger=None, level=None): + if logger is None: + logger = logging.getLogger("torch") + return super().assertNoLogs(logger, level) + + def wrap_with_cuda_policy(self, method_name, policy): + test_method = getattr(self, method_name) + # the import below may initialize CUDA context, so we do it only if + # self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream + # is True. + # TODO: sure looks like we unconditionally initialize the context here + # -- ezyang + from torch.testing._internal.common_cuda import TEST_CUDA + fullname = self.id().lower() # class_name.method_name + if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname): + setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) + + def wrap_with_policy(self, method_name, policy): + test_method = getattr(self, method_name) + setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) + + # A policy is a zero-argument function that returns a context manager. + # We don't take the context manager directly as it may be necessary to + # construct it once per test method + def wrap_method_with_policy(self, method, policy): + # Assumes that `method` is the tested function in `self`. + # NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope + # alive, so this cannot be done in setUp and tearDown because + # tearDown is run unconditionally no matter whether the test + # passes or not. For the same reason, we can't wrap the `method` + # call in try-finally and always do the check. + @wraps(method) + def wrapper(self, *args, **kwargs): + with policy(): + method(*args, **kwargs) + return types.MethodType(wrapper, self) + + def wrap_with_cuda_memory_check(self, method): + return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors) + + def _run_custom(self, result=None): + using_unittest = isinstance(result, unittest.TestResult) + + super_run = super().run + test_cls = super_run.__self__ + + # Are we compiling? + compiled = TEST_WITH_TORCHDYNAMO or TEST_WITH_AOT_EAGER or TEST_WITH_TORCHINDUCTOR # noqa: F821 + # Is the class strict and compiling? + strict_default = False + if compiled: + try: + path = inspect.getfile(type(test_cls)) + full_path = os.path.abspath(path) + match = re.match(r".*/test/(.*).py", full_path) + if match is not None: + filename = match.group(1) + if TEST_WITH_TORCHINDUCTOR: # noqa: F821 + from .dynamo_test_failures import FIXME_inductor_non_strict + strict_default = filename not in FIXME_inductor_non_strict + else: + strict_default = True + # inspect.getfile can fail with these + except (OSError, TypeError): + pass + if "STRICT_DEFAULT" in os.environ: + if os.environ["STRICT_DEFAULT"] == "1": + strict_default = True + + strict_mode = False + if compiled: + test_method = getattr(self, self._testMethodName) + if hasattr(test_method, "dynamo_strict"): + strict_mode = test_method.dynamo_strict + elif hasattr(test_cls, "dynamo_strict"): + strict_mode = test_cls.dynamo_strict + else: + strict_mode = strict_default + nopython = getattr(test_cls, "dynamo_strict_nopython", False) and compiled + + if strict_mode: + torch._dynamo.reset() + + # TODO: Remove this; this is grandfathered in because we suppressed errors + # on test suite previously + # When strict mode is False, suppress_errors is True + if compiled: + suppress_errors = not strict_mode + else: + suppress_errors = torch._dynamo.config.suppress_errors + with unittest.mock.patch("torch._dynamo.config.suppress_errors", suppress_errors): + if TEST_WITH_TORCHINDUCTOR: # noqa: F821 + super_run = torch._dynamo.optimize("inductor")(super_run) + elif TEST_WITH_AOT_EAGER: # noqa: F821 + super_run = torch._dynamo.optimize("aot_eager_decomp_partition")(super_run) + elif TEST_WITH_TORCHDYNAMO: # noqa: F821 + # TorchDynamo optimize annotation + super_run = torch._dynamo.optimize("eager", nopython=nopython)(super_run) + key = f"{self.__class__.__name__}.{self._testMethodName}" + from .dynamo_test_failures import dynamo_expected_failures, dynamo_skips + + def expect_failure(f, test_name): + @wraps(f) + def wrapper(*args, **kwargs): + try: + f(*args, **kwargs) + except BaseException as e: + self.skipTest(e) + raise RuntimeError(f"Unexpected success, please remove `test/dynamo_expected_failures/{test_name}`") + return wrapper + + if key in dynamo_expected_failures: + method = getattr(self, self._testMethodName) + setattr(self, self._testMethodName, expect_failure(method, key)) + + def ignore_failure(f, test_name): + @wraps(f) + def wrapper(*args, **kwargs): + try: + f(*args, **kwargs) + except BaseException as e: + self.skipTest(e) + method = getattr(self, self._testMethodName) + if getattr(method, "__unittest_expecting_failure__", False): + self.skipTest("unexpected success") + else: + self.skipTest(f"This test passed, maybe we can remove `test/dynamo_skips/{test_name}`") + return wrapper + + if key in dynamo_skips: + method = getattr(self, self._testMethodName) + setattr(self, self._testMethodName, ignore_failure(method, key)) + + super_run(result=result) + + if strict_mode: + torch._dynamo.reset() + + # Early terminate test if necessary. If using pytest, use the -x flag instead + if using_unittest and self._should_stop_test_suite(): + if result.wasSuccessful(): + case = TestCase() + if TEST_SAVE_XML is not None: + # This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo + # Create dummy TestInfo to record results correctly + from xmlrunner.result import _TestInfo # type: ignore[import] + case = _TestInfo(result, case) + case.output = _TestInfo.ERROR + case.elapsed_time = 0.0 + case.test_description = "TestSuiteEarlyFailure" + # This shouldn't really happen, but if does add fake failure + # For more details see https://github.com/pytorch/pytorch/issues/71973 + result.failures.append((case, "TestSuite execution was aborted early")) + assert result.wasSuccessful() is False + result.stop() + + + def run(self, result=None): + with contextlib.ExitStack() as stack: + if TEST_WITH_CROSSREF: # noqa: F821 + stack.enter_context(CrossRefMode()) + self._run_custom( + result=result, + ) + + def setUp(self): + check_if_enable(self) + set_rng_seed(SEED) + + # Save global check sparse tensor invariants state that can be + # restored from tearDown: + self._check_invariants = torch.sparse.check_sparse_tensor_invariants.is_enabled() + + # Enable invariant checks for all sparse tensors constructions + # including the unsafe ones. If this is not desired for some + # test case, use check_invariants=False optional argument to + # sparse tensor constructors or + # @torch.sparse.check_sparse_tensor_invariants(False) + # decorator to disable the invariant checks. + torch.sparse.check_sparse_tensor_invariants.enable() + + if self._default_dtype_check_enabled: + assert torch.get_default_dtype() == torch.float + + def tearDown(self): + # There exists test cases that override TestCase.setUp + # definition, so we cannot assume that _check_invariants + # attribute is defined in general. + if hasattr(self, '_check_invariants'): + # Restore the global check sparse tensor invariants state + if self._check_invariants: + torch.sparse.check_sparse_tensor_invariants.enable() + else: + torch.sparse.check_sparse_tensor_invariants.disable() + + if self._default_dtype_check_enabled: + assert torch.get_default_dtype() == torch.float + + @staticmethod + def _make_crow_indices(n_rows, n_cols, nnz, + *, device, dtype, random=True): + """Return crow_indices of a CSR tensor with size (n_rows, n_cols) and + the number of specified elements nnz. + + If random is True, the column counts of rows are in random + order. Otherwise, the column counts of rows are defined by the + used sampling method. + + Sampling method + --------------- + + The used sampling method was introduced in + https://pearu.github.io/csr_sampling.html, and here we give + only an overall description of the method. + + Notice that crow_indices can be defined as cumsum(counts) + where counts is a sequence of non-negative integers satisfying + the following conditions: + + len(counts) == n_rows + 1 + counts.max() <= n_cols + + while counts[i + 1] is interpreted as the number of specified + elements in the i-th row. + + The used sampling method aims at increasing the diversity of + CSR samples, that is, a CSR sample should contain (i) rows + that are all filled, (ii) rows with no elements at all, and + (iii) rows that are partially filled. At the same time and for + the given total number of specified elements (nnz), there + should be minimal preference to rows with a given number of + elements. To achieve this, the sampling method is built-up on + using a sawteeth model for counts. In the simplest case, we + would have + + counts = arange(n_rows + 1) % (n_cols + 1) + + that has equal number of all possible column counts per row. + This formula can be used only for specific input values of + n_rows, n_cols, and nnz. To generalize this model to any + combinations of inputs, the counts model above is extended + with an incomplete sawtooth, and the right and lower + rectangular parts that will guarantee that + + counts.sum() == nnz + + for any combination of n_rows, n_cols, and nnz. Basically, + we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid + that is able to hold a sequence of sawteeth and so-called + final correction, while the external part of the window is + filled with counts to meet the nnz constraint exactly. + """ + assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols) + + def sawteeth(n, m): + # return the total number of counts in the sequence of + # sawteeth where n and m define a window in (n_rows+1, + # n_cols+1) rectangle where the sequence of sawteeth + # perfectly fit. + M = (n_cols - m) * (n_cols - m + 1) // 2 + K = (n_rows - n) % (n_cols - m + 1) + return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2 + + # Different from the original method description, here counts + # has leading 0 required by crow_indices: + counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu')) + + n = m = 0 + N = sawteeth(n, m) + if N and nnz >= max(N, n_cols): + # determine the width of the sawteeth window. We use bisection to solve + # N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols) + # for n + n_left = n + n_right = n_rows - 1 + N_right = sawteeth(n_right, m) + while n_right - n_left > 1: + n_middle = (n_left + n_right) // 2 + N_middle = sawteeth(n_middle, m) + if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols): + n_right, N_right = n_middle, N_middle + else: + n_left = n_middle + n, N = n_right, N_right + # fill the right rectangle with counts: + assert n + counts[-n:].fill_(n_cols) + + if N and nnz - n * n_cols >= max(N, n_rows - n): + # determine the height of the sawteeth window. We use bisection to solve + # N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n) + # for m. + m_left = m + m_right = n_cols - 1 + N_right = sawteeth(n, m_right) + while m_right - m_left > 1: + m_middle = (m_left + m_right) // 2 + N_middle = sawteeth(n, m_middle) + if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n): + m_right, N_right = m_middle, N_middle + else: + m_left = m_middle + m, N = m_right, N_right + # fill the bottom rectangle with counts: + assert m + counts[1:n_rows - n + 1].fill_(m) + + if N: + # fill the sawteeth window with counts + q, r = divmod(nnz - n * n_cols - m * (n_rows - n), + (n_cols - m) * (n_cols - m + 1) // 2) + p = 1 + q * (n_cols - m + 1) + k = math.isqrt(2 * r) + if k * (k + 1) > 2 * r: + k -= 1 + corr = r - k * (k + 1) // 2 + assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle + # sequence of full sawteeth: + counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1) + # incomplete sawtooth: + counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device) + else: + # given input does not support sawteeth + p = 1 + corr = nnz - n * n_cols - m * (n_rows - n) + + # correction that will guarantee counts.sum() == nnz: + counts[p] += corr + + if random: + # randomize crow_indices by shuffling the sawteeth + # sequence: + perm = torch.randperm(n_rows, device=counts.device) + counts[1:] = counts[1:][perm] + + # compute crow_indices: + crow_indices = counts + crow_indices.cumsum_(dim=0) + return crow_indices.to(device=device) + + def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=(), dense_dims=0): + from operator import mul + from functools import reduce + sparse_dim = 2 + assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments' + assert len(size) >= sparse_dim + if blocksize: + assert len(blocksize) == 2, (size, blocksize) + assert size[-2 - dense_dims] % blocksize[0] == 0, (size, blocksize) + assert size[-1 - dense_dims] % blocksize[1] == 0, (size, blocksize) + blocksize0, blocksize1 = blocksize + else: + blocksize0 = blocksize1 = 1 + + size = tuple(size) + dense_size = size[(len(size) - dense_dims):] + + def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz): + compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype) + plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device) + for i in range(n_compressed_dims): + count = compressed_indices[i + 1] - compressed_indices[i] + plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort( + torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count]) + low = -1 if dtype != torch.uint8 else 0 + high = 1 if dtype != torch.uint8 else 2 + values = make_tensor((nnz,) + blocksize + dense_size, device=device, dtype=dtype, low=low, high=high) + return values, compressed_indices, plain_indices + + batch_shape = size[:-2 - dense_dims] + n_batch = reduce(mul, batch_shape, 1) + + if layout in {torch.sparse_csr, torch.sparse_bsr}: + n_compressed_dims, n_plain_dims = size[-2 - dense_dims] // blocksize0, size[-1 - dense_dims] // blocksize1 + else: + n_compressed_dims, n_plain_dims = size[-1 - dense_dims] // blocksize1, size[-2 - dense_dims] // blocksize0 + blocknnz = nnz // (blocksize0 * blocksize1) + sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)] + sparse_tensors_it = map(list, zip(*sparse_tensors)) + + values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize, *dense_size) + compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) + plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) + return torch.sparse_compressed_tensor(compressed_indices, plain_indices, + values, size=size, dtype=dtype, layout=layout, device=device) + + def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=dense_dims) + + def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=0) + + def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): + assert len(blocksize) == 2 + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) + + def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): + assert len(blocksize) == 2 + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) + + def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype): + # Assert not given impossible combination, where the sparse dims have + # empty numel, but nnz > 0 makes the indices containing values. + assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' + + v_size = [nnz] + list(size[sparse_dim:]) + v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1) + i = torch.rand(sparse_dim, nnz, device=device) + i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) + i = i.to(torch.long) + if is_uncoalesced: + i1 = i[:, :(nnz // 2), ...] + i2 = i[:, :((nnz + 1) // 2), ...] + i = torch.cat([i1, i2], 1) + x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device) + + if not is_uncoalesced: + x = x.coalesce() + else: + # FIXME: `x` is a sparse view of `v`. Currently rebase_history for + # sparse views is not implemented, so this workaround is + # needed for inplace operations done on `x`, e.g., copy_(). + # Remove after implementing something equivalent to CopySlice + # for sparse views. + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards + x = x.detach().clone()._coalesced_(False) + return x, x._indices().clone(), x._values().clone() + + def generate_simple_inputs(self, layout, + device=None, + dtype=None, + index_dtype=None, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=True, + enable_non_contiguous_values=True, + enable_batch_variable_nse=False, + output_tensor=True, + patterns=None): + """Generator of simple inputs for tensor constructors of the given layout. + + The generated tensor inputs have the following properties: + + - tensor shapes are minimal but not trivial + - tensor values are sorted sequences for COO and CSR formats, e.g. [1, 2, 3, 4] + - the generated tensors represent the same mathematical tensor for all layouts + - the generated tensors include regular, zero-sized, and optionally, batched or/and hybrid tensors. + - the generated tensors include contiguous or non-contiguous tensors both in indices and values + + If output_tensor is True, yield tensors with the given + layout. Otherwise, yield inputs to the corresponding tensor + constructors: + + - sparse compressed input is defined as + (compressed_indices, plain_indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype) + + - sparse COO input is defined as + (indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype) + + - strided input is defined as + (values,), dict(device=device, dtype=dtype) + """ + if index_dtype is None: + index_dtype = torch.int64 + + is_compressed_sparse_layout = layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + + if output_tensor: + for args, kwargs in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype, + enable_batch=enable_batch, enable_hybrid=enable_hybrid, + enable_zero_sized=enable_zero_sized, + enable_non_contiguous_indices=enable_non_contiguous_indices, + enable_non_contiguous_values=enable_non_contiguous_values, + enable_batch_variable_nse=enable_batch_variable_nse, + output_tensor=False): + if layout is torch.strided: + assert len(args) == 1 + size = kwargs.pop('size', None) # to ensure that a zero-sized tensor has the desired shape + assert size is not None + yield args[0].reshape(size) + elif layout is torch.sparse_coo: + yield torch.sparse_coo_tensor(*args, **kwargs) + elif is_compressed_sparse_layout: + kwargs.update(layout=layout) + yield torch.sparse_compressed_tensor(*args, **kwargs) + else: + assert 0 # unreachable + return + + def get_blockpattern(pattern, blocksize): + basesize = pattern.shape + assert basesize[0] % blocksize[0] == 0, (basesize, blocksize) + assert basesize[1] % blocksize[1] == 0, (basesize, blocksize) + blockpattern = pattern.reshape(-1, + blocksize[0], + basesize[1] // blocksize[1], + blocksize[1]).transpose(-3, -2).any(-1).any(-1) + block_ids = torch.arange(1, blockpattern.numel() + 1).reshape(blockpattern.shape) + return (blockpattern != 0) * block_ids + + def get_sparse_data(pattern): + basesize = pattern.shape + assert len(basesize) == 2, basesize # pattern is expected to be a matrix + + # We cannot use `torch.sparse_xyz_tensor(pattern)` to + # compute the sparse layout indices and values because + # generate_simple_inputs is used to generate the inputs to + # test `torch.sparse_xyz_tensor` factory functions, so + # we'll compute the indices and values independently of + # the factory functions. + + indices = torch.where(pattern != 0) + coo_indices = torch.stack(indices) + crow_indices = torch.zeros(basesize[0] + 1, dtype=torch.int64) + crow_indices[1:] = torch.cumsum(coo_indices[0].bincount(minlength=basesize[0]), 0) + col_indices = coo_indices[1] + strided_values = torch.zeros(basesize, dtype=torch.int64) + + # the property of `values == range(1, 1+nnz)` is used in + # get_sparse_data_with_block to relate BSR and BSC values, + # so, don't change the following line: + values = torch.arange(1, 1 + len(indices[0]), dtype=torch.int64) + strided_values[indices] = values + + indices_T = torch.where(pattern.transpose(0, 1) != 0) + coo_indices_T = torch.stack(indices_T) + ccol_indices = torch.zeros(basesize[1] + 1, dtype=torch.int64) + ccol_indices[1:] = torch.cumsum(coo_indices_T[0].bincount(minlength=basesize[1]), 0) + row_indices = coo_indices_T[1] + csc_values = strided_values.transpose(0, 1)[indices_T] + + return {torch.sparse_coo: (coo_indices, values), + torch.sparse_csr: (crow_indices, col_indices, values), + torch.sparse_csc: (ccol_indices, row_indices, csc_values), + torch.strided: (strided_values,)} + + def get_sparse_data_with_block(pattern, blocksize): + nonblock_data = get_sparse_data(pattern) + blockpattern = get_blockpattern(pattern, blocksize) + block_data = get_sparse_data(blockpattern) + + strided_values = nonblock_data[torch.strided][0] + block_indices = block_data[torch.sparse_coo][0] + bsr_values = torch.stack([strided_values[bi * blocksize[0]:(bi + 1) * blocksize[0], + bj * blocksize[1]:(bj + 1) * blocksize[1]] + for bi, bj in block_indices.transpose(0, 1)]) + + # here we use the property `values == range(1, 1+nnz)` and + # `values` relation to `csc_values` (see get_sparse_data) + # to get BSC blocks via reordering the BSR blocks: + bsc_values = bsr_values[block_data[torch.sparse_csc][2] - 1] + + return {torch.sparse_bsr: (*block_data[torch.sparse_csr][:2], bsr_values), + torch.sparse_bsc: (*block_data[torch.sparse_csc][:2], bsc_values), + **nonblock_data} + + def get_batch_sparse_data(pattern, blocksize): + size = pattern.shape + if len(size) <= 2: # non-batch + return get_sparse_data_with_block(pattern, blocksize) + + # batch data is created recursively: + batch_data = {} + for i, item in enumerate(pattern): + for layout, d in get_batch_sparse_data(item, blocksize).items(): + target = batch_data.get(layout) + if layout is torch.sparse_coo: + # a "batch COO" means a COO with the leading + # sparse dimensions interpreted as batch + # dimensions + ext_coo_indices1 = torch.cat((torch.full((1, len(d[1])), i, dtype=torch.int64), d[0])) + if target is None: + target = batch_data[layout] = (ext_coo_indices1, d[1]) + else: + target[0].set_(torch.cat((target[0], ext_coo_indices1), 1)) + target[1].set_(torch.cat((target[1], d[1]))) + else: + if target is None: + target = batch_data[layout] = tuple(d[j].unsqueeze(0) for j in range(len(d))) + else: + for j in range(len(d)): + target[j].set_(torch.cat((target[j], d[j].unsqueeze(0)))) + return batch_data + + def generate_values(base, densesize): + """Generates a tensor of shape densesize with values equal to + + base + i_1 * 10^0 + ... + i_d * 10^{d - 1} + + at indices i_1, ..., i_d (with 0 <= i_j < densesize[j] for any 1 <= j <= + len(densesize)) + + This mapping produces unique values as long as + densesize[i] < 10 for all i in range(len(densesize)). + """ + + if not densesize: + return base + if not isinstance(base, int) and base.ndim > 0: + return torch.stack([generate_values(b, densesize) for b in base]) + if base == 0: + return torch.zeros(densesize, dtype=torch.int64) + r = torch.arange(densesize[0], dtype=torch.int64) + for i, d in enumerate(densesize[1:]): + y = torch.arange(d, dtype=torch.int64) * (10 ** (i + 1)) + r = r[..., None] + y[None, ...] + r.add_(base) + return r + + if patterns is None: + # A pattern is a 3-tuple with the following items: + # + # - a list of integers with the depth of two or more. The + # integers define the sparsity patterns of the generated + # inputs: zero values correspond to unspecified + # elements/blocks, and non-zero values to the specified + # elements. + # + # For debugging convenience, the elements with the same + # value typically belong to the same block. However, it + # is not a hard requirement: as long as the shape of a + # pattern divides with block sizes, the pattern will be + # a valid one. + # + # If the depth of the list is larger than two, inputs + # with batch dimensions will be generated. + # + # - a list of 2-tuples of block sizes, used to generate + # BSR/BSC tensors with various block size parameters + # + # - a list of tuples of dense dimensions, used to generate + # hybrid tensors with various dense dimensions + # + patterns = [ + # a simple 3 x 2 tensor: non-hybrid, hybrid with 1 and 2 dense dimensions + ([[1, 2, 0], + [1, 0, 3]], [(2, 1), (1, 3)], [(), (2,), (4, 5)]), + # 2 x 3 batch of 3 x 2 tensors: non-hybrid and hybrid with 2 dense dimensions + ([[[[1, 2, 0], + [1, 0, 3]], + [[1, 2, 3], + [1, 0, 0]], + [[1, 0, 0], + [1, 2, 3]]], + [[[0, 2, 0], + [1, 2, 3]], + [[1, 0, 3], + [1, 2, 0]], + [[1, 2, 3], + [0, 2, 0]]]], [(2, 1), (2, 3)], [(), (2,)]), + # tensor with non-trivial blocksize + ([[0, 1, 0, 2, 0, 2], + [0, 1, 0, 0, 2, 0], + [3, 3, 3, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 5, 0, 6, 6, 6], + [5, 0, 5, 6, 6, 6], + [0, 0, 0, 0, 8, 8], + [7, 7, 7, 0, 8, 8]], [(2, 3)], [(), (4, 5)]), + # batch tensor with variable NSE + # Requires https://github.com/pytorch/pytorch/pull/84843 or similar. + ([[[1, 2], + [3, 4]], + [[1, 0], + [0, 0]]], [(1, 1)], ([()] if enable_batch_variable_nse else []))] + + def non_contiguous_copy(t, dim=-1, offset=0): + # return a copy of t that is non-contiguous along the + # given dimension and with the given storage offset + self.assertTrue(t.is_contiguous()) + if dim < 0: + dim = dim + t.ndim + assert dim >= 0 and dim < t.ndim + step = max(2, offset + 1) + tmp = torch.zeros((*t.shape[:dim], t.shape[dim] * step, *t.shape[dim + 1:]), dtype=t.dtype, device=t.device) + dim_slices = (*((slice(None),) * dim), slice(offset, None, step)) + r = tmp[dim_slices].copy_(t) + self.assertFalse(r.is_contiguous()) + self.assertEqual(t, r) + return r + + # the main loop of the method: + for pattern, blocksizes, densesizes in patterns: + if not enable_hybrid: + densesizes = [s for s in densesizes if not s] + if not (densesizes and blocksizes): + continue + pattern = torch.tensor(pattern, dtype=torch.int64) + if not enable_batch and pattern.ndim > 2: + continue + for blocksize in blocksizes: + data = get_batch_sparse_data(pattern, blocksize)[layout] + for densesize in densesizes: + indices = [a.to(device=device, dtype=index_dtype) for a in data[:-1]] + values = generate_values(data[-1], densesize).to(device=device, dtype=dtype) + yield (*indices, values), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + if enable_non_contiguous_indices and pattern.ndim > 2: + # sparse compressed indices can be sliced only along batch dimensions + for (dim, offset) in {(0, 1), (-2, 0)}: + indices_copy = [non_contiguous_copy(a, dim=dim, offset=offset) for a in indices] + yield (*indices_copy, values), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + if enable_non_contiguous_values: + values_copy = non_contiguous_copy(values, dim=-1, offset=1) + yield (*indices_copy, values_copy), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + if enable_non_contiguous_values: + values_copy = non_contiguous_copy(values, dim=-1, offset=1) + yield (*indices, values_copy), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + # zero-sized tensor inputs, non-batch, non-hybrid/hybrid + if enable_zero_sized: + for basesize, blocksizes, densesizes in [ + ((2, 0), [(1, 2)], [(), (2,), (2, 3)] if enable_hybrid else [()]), + ((0, 2), [(1, 2), (2, 1), (3, 2)], [()]), + ((0, 0), [(1, 2)], [()]), + ]: + for blocksize in blocksizes: + for densesize in densesizes: + if layout == torch.strided: + indices = () + values = torch.empty((basesize + densesize), device=device, dtype=dtype) + elif layout == torch.sparse_coo: + indices = (torch.empty(len(basesize), 0, device=device, dtype=index_dtype),) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_csr: + crow_indices = torch.tensor([0] * (basesize[0] + 1), device=device, dtype=index_dtype) + col_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (crow_indices, col_indices) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_csc: + ccol_indices = torch.tensor([0] * (basesize[1] + 1), device=device, dtype=index_dtype) + row_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (ccol_indices, row_indices) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_bsr: + crow_indices = torch.tensor([0] * (basesize[0] // blocksize[0] + 1), device=device, dtype=index_dtype) + col_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (crow_indices, col_indices) + values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_bsc: + ccol_indices = torch.tensor([0] * (basesize[1] // blocksize[1] + 1), device=device, dtype=index_dtype) + row_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (ccol_indices, row_indices) + values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) + else: + assert 0 # unreachable + yield (*indices, values), dict(device=device, dtype=dtype, size=basesize + densesize) + + def safeToDense(self, t): + # coalesce is only implemented for COO + if t.layout == torch.sparse_coo: + t = t.coalesce() + return t.to_dense() + + # Compares a torch function with a reference function for a given sample input (object of SampleInput) + # Note: only values are compared, type comparison is not done here + def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs): + numpy_sample = sample_input.numpy() + n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs + t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs + + actual = torch_fn(t_inp, *t_args, **t_kwargs) + expected = ref_fn(n_inp, *n_args, **n_kwargs) + + self.assertEqual(actual, expected, exact_device=False, **kwargs) + + # Compares the given Torch and NumPy functions on the given tensor-like object. + # NOTE: both torch_fn and np_fn should be functions that take a single + # tensor (array). If the torch and/or NumPy function require additional + # arguments then wrap the function in a lambda or pass a partial function. + # TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol) + def compare_with_numpy(self, torch_fn, np_fn, tensor_like, + device=None, dtype=None, **kwargs): + assert TEST_NUMPY + + if isinstance(tensor_like, torch.Tensor): + assert device is None + assert dtype is None + t_cpu = tensor_like.detach().cpu() + if t_cpu.dtype is torch.bfloat16: + t_cpu = t_cpu.float() + a = t_cpu.numpy() + t = tensor_like + else: + d = copy.copy(torch_to_numpy_dtype_dict) + d[torch.bfloat16] = np.float32 + a = np.array(tensor_like, dtype=d[dtype]) + t = torch.tensor(tensor_like, device=device, dtype=dtype) + + np_result = np_fn(a) + torch_result = torch_fn(t).cpu() + + # Converts arrays to tensors + if isinstance(np_result, np.ndarray): + try: + np_result = torch.from_numpy(np_result) + except Exception: + # NOTE: copying an array before conversion is necessary when, + # for example, the array has negative strides. + np_result = torch.from_numpy(np_result.copy()) + if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float: + torch_result = torch_result.to(torch.float) + + self.assertEqual(np_result, torch_result, **kwargs) + + def assertEqualIgnoreType(self, *args, **kwargs) -> None: + # If you are seeing this function used, that means test is written wrongly + # and deserves detailed investigation + return self.assertEqual(*args, exact_dtype=False, **kwargs) + + def assertEqualBroadcasting(self, x, y, *args, **kwargs) -> None: + r"""Tests if tensor x equals to y, if y to be broadcast to x.shape. + """ + if not isinstance(y, Iterable): + # int, float, etc. or different shape tensors + y = torch.ones_like(x) * y + if not isinstance(y, torch.Tensor): + # iterable, but not a tensor + y = torch.ones_like(x) * torch.tensor(y) + return self.assertEqual(x, y, *args, **kwargs) + + def assertEqual( + self, + x, + y, + msg: Optional[Union[str, Callable[[str], str]]] = None, + *, + atol: Optional[float] = None, + rtol: Optional[float] = None, + equal_nan=True, + exact_dtype=True, + # TODO: default this to True + exact_device=False, + exact_layout=False, + exact_stride=False, + exact_is_coalesced=False + ): + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + # numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall + # back to an elementwise comparison. Note that this has to happen here and not for example in + # `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform + # multiple comparisons. + if any( + isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y) + ): + def to_list(input): + return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input) + + x = to_list(x) + y = to_list(y) + # When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here. + # Otherwise, the pair origination of `are_equal` will fail, because the sequence is recognized as container + # that should be checked elementwise while the tensor is not. + elif isinstance(x, torch.Tensor) and isinstance(y, Sequence): + y = torch.as_tensor(y, dtype=x.dtype, device=x.device) + elif isinstance(x, Sequence) and isinstance(y, torch.Tensor): + x = torch.as_tensor(x, dtype=y.dtype, device=y.device) + + # If x or y are tensors and nested then we unbind them to a list of tensors this should allow us to compare + # a nested tensor to a nested tensor and a nested tensor to a list of expected tensors + if isinstance(x, torch.Tensor) and x.is_nested: + x = x.unbind() + if isinstance(y, torch.Tensor) and y.is_nested: + y = y.unbind() + + error_metas = not_close_error_metas( + x, + y, + pair_types=( + NonePair, + RelaxedBooleanPair, + RelaxedNumberPair, + TensorOrArrayPair, + TypedStoragePair, + StringPair, + SetPair, + TypePair, + ObjectPair, + ), + sequence_types=( + Sequence, + Sequential, + ModuleList, + ParameterList, + ScriptList, + torch.utils.data.dataset.Subset, + ), + mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict), + rtol=rtol, + rtol_override=self.rel_tol, + atol=atol, + atol_override=self.precision, + equal_nan=equal_nan, + check_device=exact_device, + check_dtype=exact_dtype, + check_layout=exact_layout, + check_stride=exact_stride, + check_is_coalesced=exact_is_coalesced, + ) + + if error_metas: + # See [ErrorMeta Cycles] + error_metas = [error_metas] + # TODO: compose all metas into one AssertionError + raise error_metas.pop()[0].to_error( + # This emulates unittest.TestCase's behavior if a custom message passed and + # TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage) + # is True (default) + (lambda generated_msg: f"{generated_msg}\n{msg}") if isinstance(msg, str) and self.longMessage else msg + ) + + def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override] + atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None: + with self.assertRaises(AssertionError, msg=msg): + self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs) + + def assertEqualTypeString(self, x, y) -> None: + # This API is used simulate deprecated x.type() == y.type() + self.assertEqual(x.device, y.device) + self.assertEqual(x.dtype, y.dtype) + self.assertEqual(x.is_sparse, y.is_sparse) + + def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None: + for elem in iterable: + if id(obj) == id(elem): + return + raise AssertionError("object not found in iterable") + + # Reimplemented to provide special behavior when + # _ignore_not_implemented_error is True + def assertRaises(self, expected_exception, *args, **kwargs): + if self._ignore_not_implemented_error: + context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \ + AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg] + try: + return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr] + finally: + # see https://bugs.python.org/issue23890 + context = None + else: + return super().assertRaises(expected_exception, *args, **kwargs) + + # Reimplemented to provide special behavior when + # _ignore_not_implemented_error is True + def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): + # Verifies that an exception with the type expected_exception and message + # matching the regular expression defined by expected_regex is thrown. + # If the test is instantiated for a non-native device type (like XLA) + # then the message is not validated. + + # Checks whether the test is instantiated for a device type by testing + # if the test class has defined the device_type attribute and, + # if so, tests whether the instantiated device type is native or not + if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES and self.device_type != "mps": # type: ignore[attr-defined] + # empty string matches any string + expected_regex = '' + + if self._ignore_not_implemented_error: + context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg] + expected_exception, self, expected_regex) + return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined] + else: + return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs) + + # Verifies that no unraisable exceptions are raised by callable. Unlike regular + # exceptions, these do not actually propagate to the caller and are + # suppressed. We must test for them specially. + def assertNoUnraisable(self, callable, *args, **kwargs): + raised = None + + def record_unraisable(unraisable): + nonlocal raised + raised = unraisable + + # Disable GC when running the callable to prevent spurious flakiness + # from unlucky GCs inside the callable + prev = gc.isenabled() + gc.disable() + try: + with unittest.mock.patch("sys.unraisablehook", record_unraisable): + callable(*args, **kwargs) + finally: + if prev: + gc.enable() + + self.assertIsNone(raised) + + # TODO: Support context manager interface + # NB: The kwargs forwarding to callable robs the 'subname' parameter. + # If you need it, manually apply your callable in a lambda instead. + def assertExpectedRaises(self, exc_type, callable, *args, **kwargs): + subname = None + if 'subname' in kwargs: + subname = kwargs['subname'] + del kwargs['subname'] + try: + callable(*args, **kwargs) + except exc_type as e: + self.assertExpected(str(e), subname) + return + # Don't put this in the try block; the AssertionError will catch it + self.fail(msg="Did not raise when expected to") + + def assertNotWarn(self, callable, msg=''): + r""" + Test if :attr:`callable` does not raise a warning. + """ + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") # allow any warning to be raised + with set_warn_always_context(True): + callable() + self.assertTrue(len(ws) == 0, msg) + + @contextmanager + def assertWarnsOnceRegex(self, category, regex=''): + """Context manager for code that *must always* warn + + This filters expected warnings from the test and fails if + the expected warning is not caught. It uses set_warn_always() to force + TORCH_WARN_ONCE to behave like TORCH_WARN + """ + pattern = re.compile(regex) + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") # allow any warning to be raised + with set_warn_always_context(True): + yield + if len(ws) == 0: + self.fail('no warning caught') + self.assertTrue(any(type(w.message) is category for w in ws)) + self.assertTrue( + any(re.match(pattern, str(w.message)) for w in ws), + f'{pattern}, {[w.message for w in ws if type(w.message) is category]}') + + def assertExpected(self, s, subname=None): + r""" + Test that a string matches the recorded contents of a file + derived from the name of this test and subname. This file + is placed in the 'expect' directory in the same directory + as the test script. You can automatically update the recorded test + output using --accept. + + If you call this multiple times in a single function, you must + give a unique subname each time. + """ + if not isinstance(s, str): + raise TypeError("assertExpected is strings only") + + def remove_prefix(text, prefix): + if text.startswith(prefix): + return text[len(prefix):] + return text + # NB: we take __file__ from the module that defined the test + # class, so we place the expect directory where the test script + # lives, NOT where test/common_utils.py lives. This doesn't matter in + # PyTorch where all test scripts are in the same directory as + # test/common_utils.py, but it matters in onnx-pytorch + module_id = self.__class__.__module__ + munged_id = remove_prefix(self.id(), module_id + ".") + test_file = os.path.realpath(sys.modules[module_id].__file__) + expected_file = os.path.join(os.path.dirname(test_file), + "expect", + munged_id) + + subname_output = "" + if subname: + expected_file += "-" + subname + subname_output = f" ({subname})" + expected_file += ".expect" + expected = None + + def accept_output(update_type): + print(f"Accepting {update_type} for {munged_id}{subname_output}:\n\n{s}") + with open(expected_file, 'w') as f: + # Adjust for producer_version, leave s unmodified + s_tag = re.sub(r'(producer_version): "[0-9.]*"', + r'\1: "CURRENT_VERSION"', s) + f.write(s_tag) + + try: + with open(expected_file) as f: + expected = f.read() + except OSError as e: + if e.errno != errno.ENOENT: + raise + elif expecttest.ACCEPT: + return accept_output("output") + else: + raise RuntimeError( + f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n" + "No expect file exists; to accept the current output, run:\n" + f"python {__main__.__file__} {munged_id} --accept") from None + + # a hack for JIT tests + if IS_WINDOWS: + expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected) + s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s) + + # Adjust for producer_version + expected = expected.replace( + 'producer_version: "CURRENT_VERSION"', + f'producer_version: "{torch.onnx.producer_version}"' + ) + if expecttest.ACCEPT: + if expected != s: + return accept_output("updated output") + else: + if hasattr(self, "assertMultiLineEqual"): + # Python 2.7 only + # NB: Python considers lhs "old" and rhs "new". + self.assertMultiLineEqual(expected, s) + else: + self.assertEqual(s, expected) + + def assertExpectedStripMangled(self, s, subname=None): + s = re.sub(r'__torch__[^ ]+', '', s) + self.assertExpected(s, subname) + + def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None): + """Assert that ``first`` is greater than or almost equal to ``second``. + + The equality of ``first`` and ``second`` is determined in a similar way to + the ``assertAlmostEqual`` function of the standard library. + """ + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + + if first >= second: + return + + diff = second - first + if delta is not None: + if diff <= delta: + return + + standardMsg = f"{first} not greater than or equal to {second} within {delta} delta" + else: + if places is None: + places = 7 + + if round(diff, places) == 0: + return + + standardMsg = f"{first} not greater than or equal to {second} within {places} places" + + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertAtenOp(self, onnx_model, operator, overload_name=""): + all_aten_nodes = [p for p in onnx_model.graph.node + if p.op_type == "ATen" and p.domain == "org.pytorch.aten"] + self.assertTrue(all_aten_nodes) + + for op in all_aten_nodes: + attrs = {attr.name: attr.s.decode() for attr in op.attribute} + if attrs.get("operator") == operator: + break + + self.assertEqual(attrs["operator"], operator) + self.assertEqual(attrs.get("overload_name", ""), overload_name) + + def check_nondeterministic_alert(self, fn, caller_name, should_alert=True): + '''Checks that an operation produces a nondeterministic alert when + expected while `torch.use_deterministic_algorithms(True)` is set. + + Args: + fn (callable): Function to check for a nondeterministic alert + + caller_name (str): Name of the operation that produces the + nondeterministic alert. This name is expected to appear at the + beginning of the error/warning message. + + should_alert (bool, optional): If True, then the check will only pass + if calling `fn` produces a nondeterministic error/warning with the + expected message. If False, then the check will only pass if + calling `fn` does not produce an error. Default: `True`. + ''' + + alert_message = '^' + caller_name + ' does not have a deterministic implementation, but you set' + + # Check that errors are thrown correctly + with DeterministicGuard(True): + if should_alert: + with self.assertRaisesRegex( + RuntimeError, + alert_message, + msg='expected a non-deterministic error, but it was not raised'): + fn() + + else: + # If a nondeterministic error is not expected, make sure + # that it is not raised + try: + fn() + except RuntimeError as e: + if 'does not have a deterministic implementation' in str(e): + self.fail( + 'did not expect non-deterministic error message, ' + + 'but got one anyway: "' + str(e) + '"') + # Reraise exceptions unrelated to nondeterminism + raise + + # Check that warnings are thrown correctly + with DeterministicGuard(True, warn_only=True): + if should_alert: + with self.assertWarnsRegex( + UserWarning, + alert_message): + fn() + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fn() + for warning in w: + if isinstance(warning, UserWarning): + self.assertTrue(re.search(alert_message, str(warning)) is None) + + # run code in subprocess and capture exceptions. + @staticmethod + def run_process_no_exception(code, env=None): + import subprocess + + popen = subprocess.Popen( + [sys.executable, '-c', code], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + (stdout, stderr) = popen.communicate() + return (stdout, stderr) + + # returns captured stderr + @staticmethod + def runWithPytorchAPIUsageStderr(code): + env = os.environ.copy() + env["PYTORCH_API_USAGE_STDERR"] = "1" + # remove CI flag since this is a wrapped test process. + # CI flag should be set in the parent process only. + if "CI" in env.keys(): + del env["CI"] + (stdout, stderr) = TestCase.run_process_no_exception(code, env=env) + return stderr.decode('ascii') + + +class TestCaseBase(TestCase): + # Calls to super() in dynamically created classes are a bit odd. + # See https://github.com/pytorch/pytorch/pull/118586 for more info + # Subclassing this class and then calling super(TestCaseBase) will run + # TestCase's setUp, tearDown etc functions + pass + + +def download_file(url, binary=True): + from urllib.parse import urlsplit + from urllib import request, error + + filename = os.path.basename(urlsplit(url)[2]) + data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data')) + path = os.path.join(data_dir, filename) + + if os.path.exists(path): + return path + try: + data = request.urlopen(url, timeout=15).read() + with open(path, 'wb' if binary else 'w') as f: + f.write(data) + return path + except error.URLError as e: + msg = f"could not download test file '{url}'" + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) from e + +def find_free_port(): + """ + Finds an available port and returns that port number. + + NOTE: If this function is being used to allocate a port to Store (or + indirectly via init_process_group or init_rpc), it should be used + in conjuction with the `retry_on_connect_failures` decorator as there is a potential + race condition where the allocated port may become unavailable before it can be used + """ + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('localhost', 0)) + _, port = sock.getsockname() + return port + +# Errors that we can get in c10d initialization for which we should retry tests for. +ADDRESS_IN_USE = "Address already in use" +CONNECT_TIMEOUT = "connect() timed out." + +def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)): + """Reruns a test if the test returns a RuntimeError and the exception + contains one of the strings in connect_errors.""" + # This if block is executed when using this function as a decorator with arguments. + if func is None: + return partial(retry_on_connect_failures, connect_errors=connect_errors) + + @wraps(func) + def wrapper(*args, **kwargs): + n_retries = 10 + tries_remaining = n_retries + while True: + try: + return func(*args, **kwargs) + except RuntimeError as error: + if any(connect_error in str(error) for connect_error in connect_errors): + tries_remaining -= 1 + if tries_remaining == 0: + raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}") from error + time.sleep(random.random()) + continue + raise + return wrapper + + +# Decorator to retry upon certain Exceptions. +def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False): + def deco_retry(f): + @wraps(f) + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay + while mtries > 1: + try: + return f(*args, **kwargs) + except ExceptionToCheck as e: + msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) + print(msg) + time.sleep(mdelay) + mtries -= 1 + try: + return f(*args, **kwargs) + except ExceptionToCheck as e: + raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e + return f_retry # true decorator + return deco_retry + + +# FIXME: modernize these to be consistent with make_tensor +# and review including them in torch.testing +# Methods for matrix generation + +def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'): + assert rank <= l + A = torch.randn(l, l, dtype=dtype, device=device) + u, s, vh = torch.linalg.svd(A, full_matrices=False) + for i in range(l): + if i >= rank: + s[i] = 0 + elif s[i] == 0: + s[i] = 1 + return (u * s.to(dtype).unsqueeze(-2)) @ vh + +def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): + """ + Returns a random rectangular matrix (batch of matrices) + with singular values sampled from a Gaussian with + mean `mean` and standard deviation `sigma`. + The smaller the `sigma`, the better conditioned + the output matrix is. + """ + primitive_dtype = { + torch.float: torch.float, + torch.double: torch.double, + torch.cfloat: torch.float, + torch.cdouble: torch.double + } + x = torch.rand(shape, dtype=dtype, device=device) + m = x.size(-2) + n = x.size(-1) + u, _, vh = torch.linalg.svd(x, full_matrices=False) + s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ + .sort(-1, descending=True).values.to(dtype) + return (u * s.unsqueeze(-2)) @ vh + +# Returns a noncontiguous (tensor with the same shape and values as t +# The noncontiguous tensor is constructed such that elements in the innermost +# dimension are separated by zeros or (whenever possible) nans +# TODO: consider more complicated noncontiguity schemes +def noncontiguous_like(t): + # Short-circuits if t is already noncontiguous + if not t.is_contiguous(): + return t + + # Choose a "weird" value that won't be accessed + if t.dtype.is_floating_point or t.dtype.is_complex: + value = math.nan + elif t.dtype == torch.bool: + value = True + else: + value = 12 + + result = t.new_empty(t.shape + (2,)) + result[..., 0] = value + result[..., 1] = t.detach() + result = result[..., 1] + result.requires_grad_(t.requires_grad) + return result + +# TODO: remove this (prefer make_symmetric_matrices below) +def random_symmetric_matrix(l, *batches, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + A = (A + A.mT).div_(2) + return A + +# Creates a symmetric matrix or batch of symmetric matrices +# Shape must be a square matrix or batch of square matrices +def make_symmetric_matrices(*shape, device, dtype): + assert shape[-1] == shape[-2] + t = make_tensor(shape, device=device, dtype=dtype) + t = (t + t.mT).div_(2) + return t + +def random_hermitian_matrix(l, *batches, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + A = (A + A.mH).div_(2) + return A + + +def random_symmetric_psd_matrix(l, *batches, **kwargs): + """ + Returns a batch of random symmetric positive-semi-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + return A @ A.mT + + +def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'): + """ + Returns a batch of random Hermitian positive-semi-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) + return A @ A.mH + + +# TODO: remove this (prefer make_symmetric_pd_matrices below) +def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), + dtype=dtype, device=device) + return torch.matmul(A, A.mT) \ + + torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5 + + +# Creates a symmetric positive-definite matrix or batch of +# such matrices +def make_symmetric_pd_matrices(*shape, device, dtype): + assert shape[-1] == shape[-2] + t = make_tensor(shape, device=device, dtype=dtype) + i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5 + return t @ t.mT + i + +def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device): + """ + Returns a batch of random Hermitian positive-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), + dtype=dtype, device=device) + return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device) + +# Creates a full rank matrix with distinct singular values or +# a batch of such matrices +def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False): + with torch.no_grad(): + t = make_tensor(shape, device=device, dtype=dtype) + u, _, vh = torch.linalg.svd(t, full_matrices=False) + real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype + k = min(shape[-1], shape[-2]) + # We choose the singular values to be "around one" + # This is to make the matrix well conditioned + # s = [2, 3, ..., k+1] + s = torch.arange(2, k + 2, dtype=real_dtype, device=device) + # s = [2, -3, 4, ..., (-1)^k k+1] + s[1::2] *= -1. + # 1 + 1/s so that the singular values are in the range [2/3, 3/2] + # This gives a condition number of 9/4, which should be good enough + s.reciprocal_().add_(1.) + # Note that the singular values need not be ordered in an SVD so + # we don't need need to sort S + x = (u * s.to(u.dtype)) @ vh + x.requires_grad_(requires_grad) + return x + +def random_matrix(rows, columns, *batch_dims, **kwargs): + """Return rectangular matrix or batches of rectangular matrices. + + Parameters: + dtype - the data type + device - the device kind + singular - when True, the output will be singular + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + silent = kwargs.get("silent", False) + singular = kwargs.get("singular", False) + if silent and not torch._C.has_lapack: + return torch.ones(rows, columns, dtype=dtype, device=device) + + A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device) + if A.numel() == 0: + return A + u, _, vh = torch.linalg.svd(A, full_matrices=False) + k = min(rows, columns) + s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device) + if singular: + # make matrix singular + s[k - 1] = 0 + if k > 2: + # increase the order of singularity so that the pivoting + # in LU factorization will be non-trivial + s[0] = 0 + return (u * s.unsqueeze(-2)) @ vh + + +def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs): + """Return rectangular matrix or batches of rectangular matrices with + given rank. + """ + B = random_matrix(rows, rank, *batch_dims, **kwargs) + C = random_matrix(rank, columns, *batch_dims, **kwargs) + return B.matmul(C) + + +def random_sparse_matrix(rows, columns, density=0.01, **kwargs): + """Return rectangular random sparse matrix within given density. + + The density of the result approaches to given density as the size + of the matrix is increased and a relatively small value of density + is specified but higher than min(rows, columns)/(rows * columns) + for non-singular matrices. + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + singular = kwargs.get("singular", False) + + k = min(rows, columns) + nonzero_elements = max(min(rows, columns), int(rows * columns * density)) + + row_indices = [i % rows for i in range(nonzero_elements)] + column_indices = [i % columns for i in range(nonzero_elements)] + random.shuffle(column_indices) + indices = [row_indices, column_indices] + values = torch.randn(nonzero_elements, dtype=dtype, device=device) + # ensure that the diagonal dominates + values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp() + indices_tensor = torch.tensor(indices) + A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device) + return A.coalesce() + + +def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs): + """Return random sparse positive-definite matrix with given density. + + The eigenvalues of the matrix are defined as:: + arange(1, matrix_size+1)/matrix_size + + Algorithm: + A = diag(arange(1, matrix_size+1)/matrix_size) + while : + + R = + A = R^T A R + """ + import math + torch = kwargs.get('torch', globals()['torch']) + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + data = {(i, i): float(i + 1) / matrix_size + for i in range(matrix_size)} + + + def multiply(data, N, i, j, cs, sn, left=True): + for k in range(N): + if left: + ik, jk = (k, i), (k, j) + else: + ik, jk = (i, k), (j, k) + aik, ajk = data.get(ik, 0), data.get(jk, 0) + aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk + if aik: + data[ik] = aik + else: + data.pop(ik, None) + if ajk: + data[jk] = ajk + else: + data.pop(jk, None) + + target_nnz = density * matrix_size * matrix_size + while len(data) < target_nnz: + i = random.randint(0, matrix_size - 1) + j = random.randint(0, matrix_size - 1) + if i != j: + theta = random.uniform(0, 2 * math.pi) + cs = math.cos(theta) + sn = math.sin(theta) + multiply(data, matrix_size, i, j, cs, sn, left=True) + multiply(data, matrix_size, i, j, cs, sn, left=False) + icoords, jcoords, values = [], [], [] + for (i, j), v in sorted(data.items()): + icoords.append(i) + jcoords.append(j) + values.append(v) + indices_tensor = torch.tensor([icoords, jcoords]) + return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device) + +# FIXME: remove this by updating test suites using it +def do_test_dtypes(self, dtypes, layout, device): + for dtype in dtypes: + if dtype != torch.float16: + out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device) + self.assertIs(dtype, out.dtype) + self.assertIs(layout, out.layout) + self.assertEqual(device, out.device) + +# FIXME: remove this by updating test suites using it +def do_test_empty_full(self, dtypes, layout, device): + shape = torch.Size([2, 3]) + + def check_value(tensor, dtype, layout, device, value, requires_grad): + self.assertEqual(shape, tensor.shape) + self.assertIs(dtype, tensor.dtype) + self.assertIs(layout, tensor.layout) + self.assertEqual(tensor.requires_grad, requires_grad) + if tensor.is_cuda and device is not None: + self.assertEqual(device, tensor.device) + if value is not None: + fill = tensor.new(shape).fill_(value) + self.assertEqual(tensor, fill) + + def get_int64_dtype(dtype): + module = '.'.join(str(dtype).split('.')[1:-1]) + if not module: + return torch.int64 + return operator.attrgetter(module)(torch).int64 + + default_dtype = torch.get_default_dtype() + check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False) + check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False) + for dtype in dtypes: + for rg in {dtype.is_floating_point, False}: + int64_dtype = get_int64_dtype(dtype) + v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg) + check_value(v, dtype, layout, device, None, rg) + out = v.new() + check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg), + dtype, layout, device, None, rg) + check_value(v.new_empty(shape), dtype, layout, device, None, False) + check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False), + int64_dtype, layout, device, None, False) + check_value(torch.empty_like(v), dtype, layout, device, None, False) + check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), + int64_dtype, layout, device, None, False) + + if dtype is not torch.float16 and layout != torch.sparse_coo: + fv = 3 + v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg) + check_value(v, dtype, layout, device, fv, rg) + check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False) + out = v.new() + check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg), + dtype, layout, device, fv + 2, rg) + check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False), + int64_dtype, layout, device, fv + 3, False) + check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False) + check_value(torch.full_like(v, fv + 5, + dtype=int64_dtype, layout=layout, device=device, requires_grad=False), + int64_dtype, layout, device, fv + 5, False) + +# FIXME: improve load_tests() documentation here +running_script_path = None +def set_running_script_path(): + global running_script_path + try: + running_file = os.path.abspath(os.path.realpath(sys.argv[0])) + if running_file.endswith('.py'): # skip if the running file is not a script + running_script_path = running_file + except Exception: + pass + +def check_test_defined_in_running_script(test_case): + if running_script_path is None: + return + test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) + assert test_case_class_file == running_script_path, f"Class of loaded TestCase \"{test_case.id()}\" " \ + f"is not defined in the running script \"{running_script_path}\", but in \"{test_case_class_file}\". Did you " \ + "accidentally import a unittest.TestCase from another file?" + +def load_tests(loader, tests, pattern): + set_running_script_path() + test_suite = unittest.TestSuite() + for test_group in tests: + if not DISABLE_RUNNING_SCRIPT_CHK: # noqa: F821 + for test in test_group: + check_test_defined_in_running_script(test) + if test_group._tests: + test_suite.addTest(test_group) + return test_suite + +# FIXME: document this and move it to test_serialization +class BytesIOContext(io.BytesIO): + def __enter__(self): + return self + + def __exit__(self, *args): + pass + +# Tentative value for nondet_tol for gradcheck when backward implementation +# relies on nondeterministic operations, i.e., those listed here: +# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html +# +# For more information see https://github.com/pytorch/pytorch/issues/56202 +GRADCHECK_NONDET_TOL = 1e-12 + +TestEnvironment.def_flag("TEST_WITH_SLOW_GRADCHECK", env_var="PYTORCH_TEST_WITH_SLOW_GRADCHECK") + +skipIfSlowGradcheckEnv = unittest.skipIf( + TEST_WITH_SLOW_GRADCHECK, # noqa: F821 + "Tests that don't use gradcheck don't need to run on slow_gradcheck CI" +) + +def gradcheck(fn, inputs, **kwargs): + # Wrapper around gradcheck that enables certain keys by default. + # Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and + # forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks + # to be disabled to default for the public-facing api to avoid breaking user code. + # + # All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck. + default_values = { + "check_batched_grad": True, + "fast_mode": True, + } + + if TEST_WITH_SLOW_GRADCHECK: # noqa: F821 + default_values["fast_mode"] = False + + for key, value in default_values.items(): + # default value override values explicitly set to None + k = kwargs.get(key, None) + kwargs[key] = k if k is not None else value + + return torch.autograd.gradcheck(fn, inputs, **kwargs) + +def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs): + # Wrapper around gradgradcheck that enables certain keys by default + # See gradcheck above for an explanation of why we need something like this. + # + # All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck + default_values = { + "check_batched_grad": True, + "fast_mode": True, + } + + if TEST_WITH_SLOW_GRADCHECK: # noqa: F821 + default_values["fast_mode"] = False + + for key, value in default_values.items(): + # default value override values explicitly set to None + k = kwargs.get(key, None) + kwargs[key] = k if k is not None else value + + return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs) + + +def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs): + # call assert function rather than returning a bool since it's nicer + # if we get whether this failed on the gradcheck or the gradgradcheck. + test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs)) + test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs)) + + +@contextmanager +def set_cwd(path: str) -> Iterator[None]: + old_cwd = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(old_cwd) + + +# FIXME: delete this +# Using @toleranceOverride specific to your test is the recommended way +# of doing this. These are just some values that worked for test_nn. +dtype2prec_DONTUSE = {torch.float: 1e-5, + torch.double: 1e-5, + torch.half: 1e-2, + torch.bfloat16: 1e-1} + +# FIXME: move to test_sparse or sparse utils +# This is a wrapper that wraps a test to run this test twice, one with +# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors. +def coalescedonoff(f): + @wraps(f) + def wrapped(self, *args, **kwargs): + f(self, *args, **kwargs, coalesced=True) + f(self, *args, **kwargs, coalesced=False) + return wrapped + + +def is_coalesced_indices(s): + indices = s._indices() + hash_coeffs = (1,) + s.shape[s.sparse_dim() - 1:0:-1] + hash_indices = torch.tensor(hash_coeffs, device=s.device).cumprod(-1).flip(-1) + if s.sparse_dim() > 1: + hash_indices.unsqueeze_(-1) + hash_indices = (indices * hash_indices).sum(0) + else: + hash_indices = indices * hash_indices + + # check if indices are sorted + res = torch.allclose(hash_indices, hash_indices.sort()[0]) + + # check if there are no repeated indices + res = res and torch.allclose(hash_indices, hash_indices.unique()) + + return res + + +@contextlib.contextmanager +def disable_gc(): + if gc.isenabled(): + try: + gc.disable() + yield + finally: + gc.enable() + else: + yield + + +def find_library_location(lib_name: str) -> Path: + # return the shared library file in the installed folder if exist, + # else the file in the build folder + torch_root = Path(torch.__file__).resolve().parent + path = torch_root / 'lib' / lib_name + if os.path.exists(path): + return path + torch_root = Path(__file__).resolve().parent.parent.parent + return torch_root / 'build' / 'lib' / lib_name + +def skip_but_pass_in_sandcastle(reason): + """ + Similar to unittest.skip, however in the sandcastle environment it just + "passes" the test instead to avoid creating tasks complaining about tests + skipping continuously. + """ + def decorator(func): + if not IS_SANDCASTLE: # noqa: F821 + func.__unittest_skip__ = True + func.__unittest_skip_why__ = reason + return func + + @wraps(func) + def wrapper(*args, **kwargs): + print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) + return + return wrapper + + return decorator + +def mock_wrapper(method): + """ + Returns a function that calls the real implementation of a method + in addition to passing args to a mock object. + """ + mock = MagicMock() + + @wraps(method) + def wrapper(self, *args, **kwargs): + mock(*args, **kwargs) + return method(self, *args, **kwargs) + wrapper.mock = mock # type: ignore[attr-defined] + return wrapper + +def get_tensors_from(args, kwargs): + """ Returns a set of all Tensor objects in the given args and kwargs. """ + return set([arg for arg in args if isinstance(arg, Tensor)] + + [v for v in kwargs.values() if isinstance(v, Tensor)]) + + +# Returns scalar tensor representation of a list of integer byte values +def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device): + dtype_to_ctype: Dict[torch.dtype, Any] = { + torch.int8: ctypes.c_int8, + torch.uint8: ctypes.c_uint8, + torch.uint16: ctypes.c_uint16, + torch.uint32: ctypes.c_uint32, + torch.uint64: ctypes.c_uint64, + torch.int16: ctypes.c_int16, + torch.int32: ctypes.c_int32, + torch.int64: ctypes.c_int64, + torch.bool: ctypes.c_bool, + torch.float32: ctypes.c_float, + torch.complex64: ctypes.c_float, + torch.float64: ctypes.c_double, + torch.complex128: ctypes.c_double, + } + ctype = dtype_to_ctype[dtype] + num_bytes = ctypes.sizeof(ctype) + + def check_bytes(byte_list): + for byte in byte_list: + assert 0 <= byte <= 255 + + if dtype.is_complex: + assert len(byte_list) == (num_bytes * 2) + check_bytes(byte_list) + real = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list[:num_bytes])).value + imag = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list[num_bytes:])).value + res = real + 1j * imag + else: + assert len(byte_list) == num_bytes + check_bytes(byte_list) + res = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list)).value + + return torch.tensor(res, device=device, dtype=dtype) + + +def copy_func(f): + """Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)""" + g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, + argdefs=f.__defaults__, + closure=f.__closure__) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = f.__kwdefaults__ + return g + + +def xfail_inherited_tests(tests): + """ + Given a list of test names which are defined by a superclass of the + class this decorates, mark them as expected failure. This is useful + if you are doing poor man's parameterized tests by subclassing a generic + test class. + """ + def deco(cls): + for t in tests: + # NB: expectedFailure operates by mutating the method in question, + # which is why you have to copy the function first + setattr(cls, t, unittest.expectedFailure(copy_func(getattr(cls, t)))) + return cls + return deco + + +def skip_but_pass_in_sandcastle_if(condition, reason): + """ + Similar to unittest.skipIf, however in the sandcastle environment it just + "passes" the test instead to avoid creating tasks complaining about tests + skipping continuously. + """ + def decorator(func): + if condition: + if IS_SANDCASTLE: # noqa: F821 + @wraps(func) + def wrapper(*args, **kwargs): + print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) + return wrapper + else: + func.__unittest_skip__ = True + func.__unittest_skip_why__ = reason + + return func + + return decorator + +def dtype_name(dtype): + """ Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """ + return str(dtype).split('.')[1] + + +dtype_abbrs = { + torch.bfloat16: 'bf16', + torch.float64: 'f64', + torch.float32: 'f32', + torch.float16: 'f16', + torch.complex32: 'c32', + torch.complex64: 'c64', + torch.complex128: 'c128', + torch.int8: 'i8', + torch.int16: 'i16', + torch.int32: 'i32', + torch.int64: 'i64', + torch.bool: 'b8', + torch.uint8: 'u8', +} + + +def set_single_threaded_if_parallel_tbb(fn): + """Set test to be single threaded for parallel tbb. + + See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883 + """ + if not IS_TBB: + return fn + + @wraps(fn) + def wrap_fn(*args, **kwargs): + num_threads = torch.get_num_threads() + torch.set_num_threads(1) + try: + return fn(*args, **kwargs) + finally: + torch.set_num_threads(num_threads) + return wrap_fn + + +@functools.lru_cache +def get_cycles_per_ms() -> float: + """Measure and return approximate number of cycles per millisecond for torch.cuda._sleep + """ + + def measure() -> float: + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + start.record() + torch.cuda._sleep(1000000) + end.record() + end.synchronize() + cycles_per_ms = 1000000 / start.elapsed_time(end) + return cycles_per_ms + + # Get 10 values and remove the 2 max and 2 min and return the avg. + # This is to avoid system disturbance that skew the results, e.g. + # the very first cuda call likely does a bunch of init, which takes + # much longer than subsequent calls. + # + # Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs + # and seems to return stable values. Therefore, we enable caching + # using lru_cache decorator above. + num = 10 + vals = [] + for _ in range(num): + vals.append(measure()) + vals = sorted(vals) + return mean(vals[2 : num - 2]) + + +# OpInfo utils + +T = TypeVar('T') +def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T: + """ + Returns the first sample from an iterable of samples, like those returned by OpInfo. + The test will be skipped if no samples are available. + """ + try: + return next(iter(samples)) + except StopIteration as e: + raise unittest.SkipTest('Skipped! Need at least 1 sample input') from e + +# this helper method is to recursively +# clone the tensor-type input of operators tested by OpInfo +def clone_input_helper(input): + if isinstance(input, torch.Tensor): + return torch.clone(input) + + if isinstance(input, Sequence): + return tuple(map(clone_input_helper, input)) + + return input + +@contextmanager +def custom_op(opname, symbolic_fn, opset_version): + """Context manager/decorator to test ONNX export with custom operator""" + try: + register_custom_op_symbolic(opname, symbolic_fn, opset_version) + yield + finally: + unregister_custom_op_symbolic(opname, opset_version) + + +def outs_and_grads(fn, graph_inps, inps): + outs = fn(*graph_inps) + for out in pytree.tree_leaves(outs): + if isinstance(out, torch.Tensor) and out.requires_grad: + out.sum().backward(retain_graph=True) + grads = [inp.grad for inp in pytree.tree_leaves(inps) if isinstance(inp, torch.Tensor)] + for inp in pytree.tree_leaves(inps): + if isinstance(inp, torch.Tensor): + inp.grad = None + return outs, grads + +def compare_equal_outs_and_grads(test, m1, m2, inps): + r1, g1 = outs_and_grads(m1, inps, inps) + r2, g2 = outs_and_grads(m2, inps, inps) + test.assertEqual(r1, r2) + test.assertEqual(g1, g2) + +class TestGradients(TestCase): + exact_dtype = True + + # Copies inputs to inplace operations to avoid inplace modifications + # to leaves requiring gradient + def _get_safe_inplace(self, inplace_variant): + @wraps(inplace_variant) + def _fn(t, *args, **kwargs): + return inplace_variant(t.clone(), *args, **kwargs) + + return _fn + + def _check_helper(self, device, dtype, op, variant, check, *, check_forward_ad=False, check_backward_ad=True, + check_batched_grad=None, check_batched_forward_grad=False): + assert check in ('gradcheck', 'bwgrad_bwgrad', 'fwgrad_bwgrad') + # NB: check_backward_ad does not affect gradgradcheck (always True) + if variant is None: + self.skipTest("Skipped! Variant not implemented.") + if not op.supports_dtype(dtype, torch.device(device).type): + self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}") + + def is_inplace(variant): + if hasattr(variant, "__wrapped__"): + return variant.__wrapped__ is op.get_inplace() + return variant is op.get_inplace() + + include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex + + samples = op.sample_inputs(device, dtype, requires_grad=True, include_conjugated_inputs=include_conjugated_inputs, + small_inputs_only=TEST_WITH_SLOW_GRADCHECK) # noqa: F821 + + for sample in samples: + if sample.broadcasts_input and is_inplace(variant): + continue + + # Gradcheck expects tensors as its input, but autograd actually supports tensorlists + # and tensors passed as kwargs. The following creates a function that accepts just + # the tensors that require grad as varargs, and then recomposes them back into the + # original input. + + # Creates gradcheck inputs by identifying tensors requiring grad + all_args = None + if is_iterable_of_tensors(sample.input): + all_args = chain(sample.input, sample.args, sample.kwargs.values()) + else: + all_args = tuple(chain((sample.input,), sample.args, sample.kwargs.values())) + gradcheck_args = tuple(x for x in all_args if (isinstance(x, torch.Tensor) and x.requires_grad)) + + # Verifies sample input tensors should have no grad + # This may happen if the same tensor is used in two different SampleInputs + for t in gradcheck_args: + self.assertIsNone(t.grad, + "A sampled input has a gradient before running autograd. " + "This usually means that (at least) one input tensor is reused " + "across different SampleInputs. " + "Please create a new tensor for each SampleInput.") + + def _input_recomposition_helper(inputs, inp, input_idx): + if is_iterable_of_tensors(inp): + tensor_list = [] + for x in inp: + if isinstance(x, torch.Tensor) and x.requires_grad: + tensor_list.append(inputs[input_idx]) + input_idx = input_idx + 1 + else: + tensor_list.append(x) + return tensor_list, input_idx + elif isinstance(inp, torch.Tensor) and inp.requires_grad: + return inputs[input_idx], input_idx + 1 + else: + return inp, input_idx + + def fn(*inputs): + # Puts inputs back into sample properly + positional_args = [] + input_idx = 0 + inp, input_idx = _input_recomposition_helper(inputs, sample.input, input_idx) + positional_args.append(inp) + + for x in sample.args: + inp, input_idx = _input_recomposition_helper(inputs, x, input_idx) + positional_args.append(inp) + + # Recreates kwargs + kwargs = {} + for k, v in sample.kwargs.items(): + inp, input_idx = _input_recomposition_helper(inputs, v, input_idx) + kwargs[k] = inp + + output = op.gradcheck_wrapper(variant, *positional_args, **kwargs) + if sample.output_process_fn_grad is not None: + return sample.output_process_fn_grad(output) + return output + + if check == 'gradcheck': + if check_batched_grad is None: + check_batched_grad = op.check_batched_grad + self.assertTrue(gradcheck(fn, gradcheck_args, + check_batched_grad=check_batched_grad, + check_grad_dtypes=True, + nondet_tol=op.gradcheck_nondet_tol, + fast_mode=op.gradcheck_fast_mode, + check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, + check_undefined_grad=True, + check_batched_forward_grad=check_batched_forward_grad)) + elif check in ('bwgrad_bwgrad', 'fwgrad_bwgrad'): # gradgrad check + self.assertFalse(check_forward_ad, msg="Cannot run forward AD check for gradgradcheck") + for gen_non_contig_grad_outputs in (False, True): + kwargs = { + "gen_non_contig_grad_outputs": gen_non_contig_grad_outputs, + "check_batched_grad": op.check_batched_gradgrad, + "check_grad_dtypes": True, + "nondet_tol": op.gradcheck_nondet_tol, + "fast_mode": op.gradcheck_fast_mode + } + if check == "fwgrad_bwgrad": + kwargs["check_fwd_over_rev"] = True + kwargs["check_rev_over_rev"] = False + kwargs["check_batched_grad"] = False + kwargs["check_undefined_grad"] = False + + self.assertTrue(gradgradcheck(fn, gradcheck_args, **kwargs)) + else: + self.assertTrue(False, msg="Unknown check requested!") + + def _grad_test_helper(self, device, dtype, op, variant, *, check_forward_ad=False, check_backward_ad=True, + check_batched_grad=None, check_batched_forward_grad=False): + return self._check_helper(device, dtype, op, variant, 'gradcheck', check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, check_batched_grad=check_batched_grad, + check_batched_forward_grad=check_batched_forward_grad) + + def _skip_helper(self, op, device, dtype): + if dtype not in op.supported_backward_dtypes(torch.device(device).type): + self.skipTest("Skipped! Op doesn't support autograd for this dtype.") + if not op.supports_autograd and not op.supports_forward_ad: + self.skipTest("Skipped! autograd not supported.") + +def make_lazy_class(cls): + + def lazy_init(self, cb): + self._cb = cb + self._value = None + + cls.__init__ = lazy_init + + for basename in [ + "add", "sub", "mul", "truediv", "floordiv", "mod", "divmod", "pow", + "lshift", "rshift", "and", "or", "xor", "neg", "pos", "abs", "invert", + "eq", "ne", "lt", "le", "gt", "ge", "bool", "int", "index", + ]: + name = f"__{basename}__" + + def inner_wrapper(name): + use_operator = basename not in ("bool", "int") + + def wrapped(self, *args, **kwargs): + if self._cb is not None: + self._value = self._cb() + self._cb = None + if not use_operator: + return getattr(self._value, name)(*args, **kwargs) + else: + return getattr(operator, name)(self._value, *args, **kwargs) + return wrapped + + setattr(cls, name, inner_wrapper(name)) + + return cls + +@make_lazy_class +class LazyVal: + pass + + +def munge_exc(e, *, suppress_suffix=True, suppress_prefix=True, file=None, skip=0): + if file is None: + file = inspect.stack()[1 + skip].filename # skip one frame + + s = str(e) + + # Remove everything that looks like stack frames in NOT this file + def repl_frame(m): + if m.group(1) != file: + return "" + # Don't accept top-level, even for this script, these will wobble + # depending on how the testing script was invoked + if m.group(2) == "": + return "" + + return m.group(0) + + s = re.sub(r' File "([^"]+)", line \d+, in (.+)\n .+\n( +[~^]+ *\n)?', repl_frame, s) + s = re.sub(r"line \d+", "line N", s) + s = re.sub(r".py:\d+", ".py:N", s) + s = re.sub(file, os.path.basename(file), s) + s = re.sub(os.path.join(os.path.dirname(torch.__file__), ""), "", s) + s = re.sub(r"\\", "/", s) # for Windows + if suppress_suffix: + s = re.sub(r"\n*Set TORCH_LOGS.+", "", s, flags=re.DOTALL) + s = re.sub(r"\n*You can suppress this exception.+", "", s, flags=re.DOTALL) + if suppress_prefix: + s = re.sub(r"Cannot export model.+\n\n", "", s) + s = re.sub(r" +$", "", s, flags=re.M) + return s diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py new file mode 100644 index 0000000000000000000000000000000000000000..eb626b552ce6abf67d363654b536e5218f197283 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py @@ -0,0 +1,113 @@ +import logging +import os +import sys + +# NOTE: [dynamo_test_failures.py] +# +# We generate xFailIfTorchDynamo* for all tests in `dynamo_expected_failures` +# We generate skipIfTorchDynamo* for all tests in `dynamo_skips` +# +# For an easier-than-manual way of generating and updating these lists, +# see scripts/compile_tests/update_failures.py +# +# If you're adding a new test, and it's failing PYTORCH_TEST_WITH_DYNAMO=1, +# either add the appropriate decorators to your test or add skips for them +# via test/dynamo_skips and test/dynamo_expected_failures. +# +# *These are not exactly unittest.expectedFailure and unittest.skip. We'll +# always execute the test and then suppress the signal, if necessary. +# If your tests crashes, or is slow, please use @skipIfTorchDynamo instead. +# +# The expected failure and skip files are located in test/dynamo_skips and +# test/dynamo_expected_failures. They're individual files rather than a list so +# git will merge changes easier. + + +def find_test_dir(): + # Find the path to the dynamo expected failure and skip files. + from os.path import abspath, basename, dirname, exists, join, normpath + + if sys.platform == "win32": + return None + + # Check relative to this file (local build): + test_dir = normpath(join(dirname(abspath(__file__)), "../../../test")) + if exists(join(test_dir, "dynamo_expected_failures")): + return test_dir + + # Check relative to __main__ (installed builds relative to test file): + main = sys.modules["__main__"] + file = getattr(main, "__file__", None) + if file is None: + # Generated files do not have a module.__file__ + return None + test_dir = dirname(abspath(file)) + while dirname(test_dir) != test_dir: + if basename(test_dir) == "test" and exists( + join(test_dir, "dynamo_expected_failures") + ): + return test_dir + test_dir = dirname(test_dir) + + # Not found + return None + + +test_dir = find_test_dir() +if not test_dir: + logger = logging.getLogger(__name__) + logger.warning( + "test/dynamo_expected_failures directory not found - known dynamo errors won't be skipped." + ) + +# Tests that run without strict mode in PYTORCH_TEST_WITH_INDUCTOR=1. +# Please don't add anything to this list. +FIXME_inductor_non_strict = { + "test_modules", + "test_ops", + "test_ops_gradients", + "test_torch", +} + +# We generate unittest.expectedFailure for all of the following tests +# when run under PYTORCH_TEST_WITH_DYNAMO=1. +# see NOTE [dynamo_test_failures.py] for more details +# +# This lists exists so we can more easily add large numbers of failing tests, +if test_dir is None: + dynamo_expected_failures = set() + dynamo_skips = set() +else: + failures_directory = os.path.join(test_dir, "dynamo_expected_failures") + skips_directory = os.path.join(test_dir, "dynamo_skips") + + dynamo_expected_failures = set(os.listdir(failures_directory)) + dynamo_skips = set(os.listdir(skips_directory)) + +# TODO: due to case sensitivity problems, for now list these files by hand +extra_dynamo_skips = { + "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_t_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_T_cpu_float32", + "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_t_cpu_float32", +} +dynamo_skips = dynamo_skips.union(extra_dynamo_skips) + + +# verify some invariants +for test in dynamo_expected_failures.union(dynamo_skips): + if len(test.split(".")) != 2: + raise AssertionError(f'Invalid test name: "{test}"') + +intersection = dynamo_expected_failures.intersection(dynamo_skips) +if len(intersection) > 0: + raise AssertionError( + "there should be no overlap between dynamo_expected_failures " + "and dynamo_skips, got " + str(intersection) + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..98aa82e1c93d2fa81cf88ce058abb00a619ae7e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py @@ -0,0 +1,371 @@ +# mypy: ignore-errors + +from collections import defaultdict +from collections.abc import Iterable +import numpy as np +import torch + +import hypothesis +from functools import reduce +from hypothesis import assume +from hypothesis import settings +from hypothesis import strategies as st +from hypothesis.extra import numpy as stnp +from hypothesis.strategies import SearchStrategy + +from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams + +# Setup for the hypothesis tests. +# The tuples are (torch_quantized_dtype, zero_point_enforce), where the last +# element is enforced zero_point. If None, any zero_point point within the +# range of the data type is OK. + +# Tuple with all quantized data types. +_ALL_QINT_TYPES = ( + torch.quint8, + torch.qint8, + torch.qint32, +) + +# Enforced zero point for every quantized data type. +# If None, any zero_point point within the range of the data type is OK. +_ENFORCED_ZERO_POINT = defaultdict(lambda: None, { + torch.quint8: None, + torch.qint8: None, + torch.qint32: 0 +}) + +def _get_valid_min_max(qparams): + scale, zero_point, quantized_type = qparams + adjustment = 1 + torch.finfo(torch.float).eps + _long_type_info = torch.iinfo(torch.long) + long_min, long_max = _long_type_info.min / adjustment, _long_type_info.max / adjustment + # make sure intermediate results are within the range of long + min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point)) + max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point)) + return np.float32(min_value), np.float32(max_value) + +# This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if +# it is too old, removes the `width` parameter (which was introduced) +# in 3.67.0 +def _floats_wrapper(*args, **kwargs): + if 'width' in kwargs and hypothesis.version.__version_info__ < (3, 67, 0): + # As long as nan, inf, min, max are not specified, reimplement the width + # parameter for older versions of hypothesis. + no_nan_and_inf = ( + (('allow_nan' in kwargs and not kwargs['allow_nan']) or + 'allow_nan' not in kwargs) and + (('allow_infinity' in kwargs and not kwargs['allow_infinity']) or + 'allow_infinity' not in kwargs)) + min_and_max_not_specified = ( + len(args) == 0 and + 'min_value' not in kwargs and + 'max_value' not in kwargs + ) + if no_nan_and_inf and min_and_max_not_specified: + if kwargs['width'] == 16: + kwargs['min_value'] = torch.finfo(torch.float16).min + kwargs['max_value'] = torch.finfo(torch.float16).max + elif kwargs['width'] == 32: + kwargs['min_value'] = torch.finfo(torch.float32).min + kwargs['max_value'] = torch.finfo(torch.float32).max + elif kwargs['width'] == 64: + kwargs['min_value'] = torch.finfo(torch.float64).min + kwargs['max_value'] = torch.finfo(torch.float64).max + kwargs.pop('width') + return st.floats(*args, **kwargs) + +def floats(*args, **kwargs): + if 'width' not in kwargs: + kwargs['width'] = 32 + return _floats_wrapper(*args, **kwargs) + +"""Hypothesis filter to avoid overflows with quantized tensors. + +Args: + tensor: Tensor of floats to filter + qparams: Quantization parameters as returned by the `qparams`. + +Returns: + True + +Raises: + hypothesis.UnsatisfiedAssumption + +Note: This filter is slow. Use it only when filtering of the test cases is + absolutely necessary! +""" +def assume_not_overflowing(tensor, qparams): + min_value, max_value = _get_valid_min_max(qparams) + assume(tensor.min() >= min_value) + assume(tensor.max() <= max_value) + return True + +"""Strategy for generating the quantization parameters. + +Args: + dtypes: quantized data types to sample from. + scale_min / scale_max: Min and max scales. If None, set to 1e-3 / 1e3. + zero_point_min / zero_point_max: Min and max for the zero point. If None, + set to the minimum and maximum of the quantized data type. + Note: The min and max are only valid if the zero_point is not enforced + by the data type itself. + +Generates: + scale: Sampled scale. + zero_point: Sampled zero point. + quantized_type: Sampled quantized type. +""" +@st.composite +def qparams(draw, dtypes=None, scale_min=None, scale_max=None, + zero_point_min=None, zero_point_max=None): + if dtypes is None: + dtypes = _ALL_QINT_TYPES + if not isinstance(dtypes, (list, tuple)): + dtypes = (dtypes,) + quantized_type = draw(st.sampled_from(dtypes)) + + _type_info = torch.iinfo(quantized_type) + qmin, qmax = _type_info.min, _type_info.max + + # TODO: Maybe embed the enforced zero_point in the `torch.iinfo`. + _zp_enforced = _ENFORCED_ZERO_POINT[quantized_type] + if _zp_enforced is not None: + zero_point = _zp_enforced + else: + _zp_min = qmin if zero_point_min is None else zero_point_min + _zp_max = qmax if zero_point_max is None else zero_point_max + zero_point = draw(st.integers(min_value=_zp_min, max_value=_zp_max)) + + if scale_min is None: + scale_min = torch.finfo(torch.float).eps + if scale_max is None: + scale_max = torch.finfo(torch.float).max + scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32)) + + return scale, zero_point, quantized_type + +"""Strategy to create different shapes. +Args: + min_dims / max_dims: minimum and maximum rank. + min_side / max_side: minimum and maximum dimensions per rank. + +Generates: + Possible shapes for a tensor, constrained to the rank and dimensionality. + +Example: + # Generates 3D and 4D tensors. + @given(Q = qtensor(shapes=array_shapes(min_dims=3, max_dims=4)) + some_test(self, Q):... +""" +@st.composite +def array_shapes(draw, min_dims=1, max_dims=None, min_side=1, max_side=None, max_numel=None): + """Return a strategy for array shapes (tuples of int >= 1).""" + assert min_dims < 32 + if max_dims is None: + max_dims = min(min_dims + 2, 32) + assert max_dims < 32 + if max_side is None: + max_side = min_side + 5 + candidate = st.lists(st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims) + if max_numel is not None: + candidate = candidate.filter(lambda x: reduce(int.__mul__, x, 1) <= max_numel) + return draw(candidate.map(tuple)) + + +"""Strategy for generating test cases for tensors. +The resulting tensor is in float32 format. + +Args: + shapes: Shapes under test for the tensor. Could be either a hypothesis + strategy, or an iterable of different shapes to sample from. + elements: Elements to generate from for the returned data type. + If None, the strategy resolves to float within range [-1e6, 1e6]. + qparams: Instance of the qparams strategy. This is used to filter the tensor + such that the overflow would not happen. + +Generates: + X: Tensor of type float32. Note that NaN and +/-inf is not included. + qparams: (If `qparams` arg is set) Quantization parameters for X. + The returned parameters are `(scale, zero_point, quantization_type)`. + (If `qparams` arg is None), returns None. +""" +@st.composite +def tensor(draw, shapes=None, elements=None, qparams=None, dtype=np.float32): + if isinstance(shapes, SearchStrategy): + _shape = draw(shapes) + else: + _shape = draw(st.sampled_from(shapes)) + if qparams is None: + if elements is None: + elements = floats(-1e6, 1e6, allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape)) + assume(not (np.isnan(X).any() or np.isinf(X).any())) + return X, None + qparams = draw(qparams) + if elements is None: + min_value, max_value = _get_valid_min_max(qparams) + elements = floats(min_value, max_value, allow_infinity=False, + allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape)) + # Recompute the scale and zero_points according to the X statistics. + scale, zp = _calculate_dynamic_qparams(X, qparams[2]) + enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None) + if enforced_zp is not None: + zp = enforced_zp + return X, (scale, zp, qparams[2]) + +@st.composite +def per_channel_tensor(draw, shapes=None, elements=None, qparams=None): + if isinstance(shapes, SearchStrategy): + _shape = draw(shapes) + else: + _shape = draw(st.sampled_from(shapes)) + if qparams is None: + if elements is None: + elements = floats(-1e6, 1e6, allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) + assume(not (np.isnan(X).any() or np.isinf(X).any())) + return X, None + qparams = draw(qparams) + if elements is None: + min_value, max_value = _get_valid_min_max(qparams) + elements = floats(min_value, max_value, allow_infinity=False, + allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) + # Recompute the scale and zero_points according to the X statistics. + scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2]) + enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None) + if enforced_zp is not None: + zp = enforced_zp + # Permute to model quantization along an axis + axis = int(np.random.randint(0, X.ndim, 1)) + permute_axes = np.arange(X.ndim) + permute_axes[0] = axis + permute_axes[axis] = 0 + X = np.transpose(X, permute_axes) + + return X, (scale, zp, axis, qparams[2]) + +"""Strategy for generating test cases for tensors used in Conv. +The resulting tensors is in float32 format. + +Args: + spatial_dim: Spatial Dim for feature maps. If given as an iterable, randomly + picks one from the pool to make it the spatial dimension + batch_size_range: Range to generate `batch_size`. + Must be tuple of `(min, max)`. + input_channels_per_group_range: + Range to generate `input_channels_per_group`. + Must be tuple of `(min, max)`. + output_channels_per_group_range: + Range to generate `output_channels_per_group`. + Must be tuple of `(min, max)`. + feature_map_range: Range to generate feature map size for each spatial_dim. + Must be tuple of `(min, max)`. + kernel_range: Range to generate kernel size for each spatial_dim. Must be + tuple of `(min, max)`. + max_groups: Maximum number of groups to generate. + elements: Elements to generate from for the returned data type. + If None, the strategy resolves to float within range [-1e6, 1e6]. + qparams: Strategy for quantization parameters. for X, w, and b. + Could be either a single strategy (used for all) or a list of + three strategies for X, w, b. +Generates: + (X, W, b, g): Tensors of type `float32` of the following drawen shapes: + X: (`batch_size, input_channels, H, W`) + W: (`output_channels, input_channels_per_group) + kernel_shape + b: `(output_channels,)` + groups: Number of groups the input is divided into +Note: X, W, b are tuples of (Tensor, qparams), where qparams could be either + None or (scale, zero_point, quantized_type) + + +Example: + @given(tensor_conv( + spatial_dim=2, + batch_size_range=(1, 3), + input_channels_per_group_range=(1, 7), + output_channels_per_group_range=(1, 7), + feature_map_range=(6, 12), + kernel_range=(3, 5), + max_groups=4, + elements=st.floats(-1.0, 1.0), + qparams=qparams() + )) +""" +@st.composite +def tensor_conv( + draw, spatial_dim=2, batch_size_range=(1, 4), + input_channels_per_group_range=(3, 7), + output_channels_per_group_range=(3, 7), feature_map_range=(6, 12), + kernel_range=(3, 7), max_groups=1, can_be_transposed=False, + elements=None, qparams=None +): + + # Resolve the minibatch, in_channels, out_channels, iH/iW, iK/iW + batch_size = draw(st.integers(*batch_size_range)) + input_channels_per_group = draw( + st.integers(*input_channels_per_group_range)) + output_channels_per_group = draw( + st.integers(*output_channels_per_group_range)) + groups = draw(st.integers(1, max_groups)) + input_channels = input_channels_per_group * groups + output_channels = output_channels_per_group * groups + + if isinstance(spatial_dim, Iterable): + spatial_dim = draw(st.sampled_from(spatial_dim)) + + feature_map_shape = [] + for i in range(spatial_dim): + feature_map_shape.append(draw(st.integers(*feature_map_range))) + + kernels = [] + for i in range(spatial_dim): + kernels.append(draw(st.integers(*kernel_range))) + + tr = False + weight_shape = (output_channels, input_channels_per_group) + tuple(kernels) + bias_shape = output_channels + if can_be_transposed: + tr = draw(st.booleans()) + if tr: + weight_shape = (input_channels, output_channels_per_group) + tuple(kernels) + bias_shape = output_channels + + # Resolve the tensors + if qparams is not None: + if isinstance(qparams, (list, tuple)): + assert len(qparams) == 3, "Need 3 qparams for X, w, b" + else: + qparams = [qparams] * 3 + + X = draw(tensor(shapes=( + (batch_size, input_channels) + tuple(feature_map_shape),), + elements=elements, qparams=qparams[0])) + W = draw(tensor(shapes=(weight_shape,), elements=elements, + qparams=qparams[1])) + b = draw(tensor(shapes=(bias_shape,), elements=elements, + qparams=qparams[2])) + + return X, W, b, groups, tr + +# We set the deadline in the currently loaded profile. +# Creating (and loading) a separate profile overrides any settings the user +# already specified. +hypothesis_version = hypothesis.version.__version_info__ +current_settings = settings._profiles[settings._current_profile].__dict__ +current_settings['deadline'] = None +if hypothesis_version >= (3, 16, 0) and hypothesis_version < (5, 0, 0): + current_settings['timeout'] = hypothesis.unlimited +def assert_deadline_disabled(): + if hypothesis_version < (3, 27, 0): + import warnings + warning_message = ( + "Your version of hypothesis is outdated. " + "To avoid `DeadlineExceeded` errors, please update. " + f"Current hypothesis version: {hypothesis.__version__}" + ) + warnings.warn(warning_message) + else: + assert settings().deadline is None diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..dedb83343e5df178b675d620915c28961b08b740 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py @@ -0,0 +1,182 @@ +# mypy: ignore-errors + +import torch +from torch.utils._pytree import tree_map +from typing import Iterator, List, Optional +import logging +import contextlib +import itertools +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.weak import WeakTensorKeyDictionary +import functools +from torch._C._profiler import gather_traceback, symbolize_tracebacks + + +_dtype_abbrs = { + torch.bfloat16: "bf16", + torch.float64: "f64", + torch.float32: "f32", + torch.float16: "f16", + torch.complex32: "c32", + torch.complex64: "c64", + torch.complex128: "c128", + torch.int8: "i8", + torch.int16: "i16", + torch.int32: "i32", + torch.int64: "i64", + torch.bool: "b8", + torch.uint8: "u8", +} + +# How the chain of calls works for LoggingTensor: +# 1. Call torch.sin +# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely +# 3. Enter dispatcher, wind your way through Autograd +# 4. Hit Python dispatch key, call __torch_dispatch__ + +# This Tensor can work with autograd in two ways: +# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor +# can require gradients if the user asks for it as a constructor kwarg. +# - The wrapped Tensor can require gradients. In that case autograd will be tracked +# for the wrapped Tensor and the LoggingTensor itself cannot require gradients. +# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single +# test or you might get surprising behavior. + +# TODO: TensorBase should work +class LoggingTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + context = contextlib.nullcontext + + @staticmethod + def __new__(cls, elem, *args, **kwargs): + # The wrapping tensor (LoggingTensor) shouldn't hold any + # memory for the class in question, but it should still + # advertise the same device as before + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + strides=elem.stride(), storage_offset=elem.storage_offset(), + # TODO: clone storage aliasing + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=kwargs.get("requires_grad", False) + ) + # ...the real tensor is held as an element on the tensor. + r.elem = elem.detach() if r.requires_grad else elem + return r + + def __repr__(self): + return super().__repr__(tensor_contents=f"{self.elem}") + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, cls) else e + + def wrap(e): + return cls(e) if isinstance(e, torch.Tensor) else e + + with cls.context(): + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) + return rs + +class LoggingTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + rs = func(*args, **kwargs) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) + return rs + +class LoggingTensorReentrant(LoggingTensor): + context = torch.overrides.enable_reentrant_dispatch + +# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list +class LoggingTensorHandler(logging.Handler): + def __init__( + self, log_list: List[str], use_shortid_for_all_tensors: bool, + with_type: bool, tracebacks_list: Optional[List]) -> None: + logging.Handler.__init__(self) + self.log_list = log_list + self.use_shortid_for_all_tensors = use_shortid_for_all_tensors + self.tracebacks_list = tracebacks_list + self.memo = WeakTensorKeyDictionary() + self.next_id = 0 + self.with_type = with_type + + def _shortid(self, t: torch.Tensor) -> int: + if t not in self.memo: + self.memo[t] = self.next_id + self.next_id += 1 + return self.memo[t] + + def _fmt(self, a: object, with_type: bool = False) -> str: + cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor + if isinstance(a, cond_cls): + maybe_type = "" + if with_type and self.with_type: + maybe_type = f": {_dtype_abbrs[a.dtype]}[{', '.join(map(str, a.shape))}]" + x = f"${self._shortid(a)}{maybe_type}" + return x + else: + return repr(a) + + def emit(self, record): + fmt_args = ", ".join( + itertools.chain( + (str(tree_map(self._fmt, a)) for a in record.args[0]), + (f"{k}={str(tree_map(self._fmt, v))}" for k, v in record.args[1].items()), + ) + ) + fmt_rets = tree_map(functools.partial(self._fmt, with_type=True), record.args[2]) + self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})') + if self.tracebacks_list is not None: + self.tracebacks_list.append(record.traceback) + +def log_input(name: str, var: object): + logging.getLogger("LoggingTensor").info("input", (name,), {}, var) + +class GatherTraceback(logging.Filter): + def __init__(self, python=True, script=True, cpp=False): + self.python = python + self.script = script + self.cpp = cpp + + def filter(self, record): + record.traceback = gather_traceback(python=self.python, script=self.script, cpp=self.cpp) + return True + +@contextlib.contextmanager +def capture_logs(is_mode=False, python_tb=False, script_tb=False, cpp_tb=False) -> Iterator[List[str]]: + collect_traceback = python_tb or script_tb or cpp_tb + logger = logging.getLogger("LoggingTensor") + log_list: List[str] = [] + tracebacks_list: List[str] = [] + handler = LoggingTensorHandler( + log_list, + with_type=True, + use_shortid_for_all_tensors=is_mode, + tracebacks_list=tracebacks_list if collect_traceback else None + ) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + logger.propagate = False + if collect_traceback: + logger.addFilter(GatherTraceback(python=python_tb, script=script_tb, cpp=cpp_tb)) + try: + if collect_traceback: + yield log_list, tracebacks_list + else: + yield log_list + finally: + symbolized_tracebacks = symbolize_tracebacks(tracebacks_list) + tracebacks_list.clear() + tracebacks_list.extend(symbolized_tracebacks) + logger.removeHandler(handler) + +@contextlib.contextmanager +def capture_logs_with_logging_tensor_mode(python_tb=False, script_tb=False, cpp_tb=False): + with LoggingTensorMode(), capture_logs(True, python_tb, script_tb, cpp_tb) as logs: + yield logs diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9125ba0ebe7e0623a12ad1a1cd7eeb7d2749a3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py @@ -0,0 +1,7 @@ +# mypy: ignore-errors + +from .make_fx import make_fx_check +from .aot_autograd import aot_autograd_check, _test_aot_autograd_forwards_backwards_helper +from .fake_tensor import fake_check +from .autograd_registration import autograd_registration_check +from .generate_tests import generate_opcheck_tests, opcheck, OpCheckError, dontGenerateOpCheckTests, is_inside_opcheck_mode diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e83f8077e6101e2b2bde7ec953313bf430aabe65 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32adb9a729e72a1455b9a15a3dd063f1c51c1903 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19a459f2013db3027f6116a3bc31ceb72bd710e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffff70e353d958e15310956a4970f7165523e2da Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/generate_tests.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/generate_tests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e614e66eaf16129a5b5d30ba9d604d7a3a46e884 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/generate_tests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f0af30f5159656fa2ef5b6f98c89a7028601fb8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..13ce9e88378988dd61ee3a86e695f0a4887575b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py @@ -0,0 +1,146 @@ +# mypy: ignore-errors + +import torch +import torch.utils._pytree as pytree +from torch.testing._internal.common_methods_invocations import wrapper_set_seed +from functorch.compile import compiled_function, min_cut_rematerialization_partition, nop +from .make_fx import randomize +import re + + +class assert_raises_regex: + def __init__(self, exception_cls, regex): + self.exception_cls = exception_cls + self.regex = regex + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, traceback): + if exc_type == self.exception_cls: + msg = str(exc_val) + if not re.search(self.regex, msg): + raise AssertionError( + f"Expected exception to match regex. regex: {self.regex}, exception: {msg}") + return True # Squashes the exception + if exc_type is not None: + raise AssertionError( + f"Expected {self.exception_cls} to be raised, instead got exception {exc_type}") + raise AssertionError("Expected exception to be raised but none was") + + +def aot_autograd_check( + func, + args, + kwargs, + dynamic, + assert_raises_regex_fn=assert_raises_regex, + assert_equals_fn=torch.testing._comparison.assert_close, + check_gradients=True, + try_check_data_specialization=False): + """Compares func(*args, **kwargs) in eager-mode to under AOTAutograd. + + Compares outputs and (if check_gradients=True) gradients produced by + AOTAutograd against eager-mode PyTorch. + + We assume that func(*args, **kwargs) succeeds in eager-mode PyTorch. + + """ + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + args_is_tensor = [isinstance(arg, torch.Tensor) for arg in flat_args] + args = [arg for arg in flat_args if isinstance(arg, torch.Tensor)] + + # We construct a new function that only accepts Tensors as inputs + def func_no_tensors(args): + reconstructed_flat_args = [] + args = iter(args) + for v in flat_args: + if isinstance(v, torch.Tensor): + reconstructed_flat_args.append(next(args)) + else: + reconstructed_flat_args.append(v) + + c_args, c_kwargs = pytree.tree_unflatten(reconstructed_flat_args, args_spec) + return func(*c_args, **c_kwargs) + + compiled_f = compiled_function( + func_no_tensors, nop, nop, dynamic=dynamic, partition_fn=min_cut_rematerialization_partition) + + out = wrapper_set_seed(func_no_tensors, args) + if check_gradients == "auto": + any_tensor_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, args) + any_output_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, out) + check_gradients = any_tensor_requires_grad and any_output_requires_grad + if not check_gradients: + compiled_out = wrapper_set_seed(compiled_f, args) + assert_equals_fn(compiled_out, out, msg=outputs_msg) + return + _test_aot_autograd_forwards_backwards_helper( + func_no_tensors, compiled_f, args, assert_raises_regex_fn, assert_equals_fn, + try_check_data_specialization) + +outputs_msg = ( + "Outputs of the operator are different in eager-mode PyTorch vs " + "AOTAutograd. This means the operator will have incorrect output " + "underneath torch.compile. This could be because the operator's " + "implementation not traceable or that there is a bug in AOTAutograd." +) + + +def _test_aot_autograd_forwards_backwards_helper( + f, compiled_f, args, assert_raises_regex_fn, assert_equals_fn, + try_check_data_specialization): + # Verify grads are equal between compiled and non-compiled versions of f. + + def call_forwards_backwards(f, args): + flat_args = pytree.arg_tree_leaves(*args) + diff_args = [arg for arg in flat_args if isinstance(arg, torch.Tensor) and + arg.requires_grad] + out = wrapper_set_seed(f, args) + flat_out = pytree.tree_leaves(out) + + sm = 0 + for i in flat_out: + if isinstance(i, torch.Tensor): + # We need to call .abs() because it is possible that the output of the + # operator is a complex Tensor and autograd will yell at autograd.grad + # on a complex Tensor unless we manually provide the grad_output flag. + sm += i.sum().abs() + assert isinstance(sm, torch.Tensor) + return out, torch.autograd.grad(sm, diff_args, allow_unused=True) + + def check(args, ignore_failure=False): + try: + orig_out, orig_grad = call_forwards_backwards(f, args) + except Exception: + if ignore_failure: + return + raise + + # See https://github.com/pytorch/pytorch/pull/98960#issuecomment-1505962215 + if all(x is None for x in orig_grad): + with assert_raises_regex_fn(RuntimeError, 'does not require grad and does not have a grad_fn'): + call_forwards_backwards(compiled_f, args) + return + + msg = ( + "Gradients of the operator are different in eager-mode PyTorch vs " + "AOTAutograd. This means the operator will have incorrect gradients " + "underneath torch.compile. This could be because the operator's " + "backward is incorrectly registered or not traceable or that there " + "is a bug in AOTAutograd." + ) + + compiled_out, compiled_grad = call_forwards_backwards(compiled_f, args) + assert_equals_fn(compiled_out, orig_out, msg=outputs_msg) + assert_equals_fn(compiled_grad, orig_grad, msg=msg) + + check(args, ignore_failure=False) + + # Randomize the data and run the traced graph with it, to catch bugs + # where we may have baked in Tensor data into the trace. + # This is not guaranteed to succeed, because `f` might have preconditions + # on the values of the inputs, so we just ignore if this test fails. + if try_check_data_specialization: + args = randomize(args) + check(args, ignore_failure=True) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py new file mode 100644 index 0000000000000000000000000000000000000000..25df4f1d03fcf50fc5869d8af93cd128f98e9c72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py @@ -0,0 +1,132 @@ +# mypy: ignore-errors + +import contextlib + +import torch +import torch.utils._pytree as pytree + + +@contextlib.contextmanager +def set_autograd_fallback_mode(mode): + prev = torch._C._get_autograd_fallback_mode() + try: + torch._C._set_autograd_fallback_mode(mode) + yield + finally: + torch._C._set_autograd_fallback_mode(prev) + + +def autograd_registration_check(op, args, kwargs): + """Check if autograd was registered correctly (for the operator). + + Operators should have "autograd support" registered directly to an + autograd dispatch key. + An incorrect registration may lead to unexpected silent incorrectness. + Note that this check won't catch all problems but will catch + the most common ones. + + Example usage: + >>> x = torch.randn(3, requires_grad=True) + >>> autograd_registration_check(torch.ops.aten.sin.default, (x,), {}) + + Here are some best practices if you do find your autograd is + registered incorrectly: + - If the operator is composite (i.e. consists of other PyTorch ops) + and you wish the operator to decompose and get autograd support + that way, then please register the implementation to + DispatchKey::CompositeImplicitAutograd + - If you're adding an autograd formula for the operator, the correct + thing to do is to register an autograd.Function to + DispatchKey::Autograd (preferred) or one of the + DispatchKey::Autograd keys. It is NOT OK to register + an autograd.Function to a backend (e.g. CPU/CUDA) key. + - If your operator is non-differentiable, then you should register + an implementation to the Autograd key that uses + AutoDispatchBelowAutograd and re-invokes the operator. + + """ + assert isinstance(op, torch._ops.OpOverload) + # Implementation details + # ----------------------------------------------- + # If an operator doesn't have an autograd kernel at an autograd key, + # and the operator does not return inputs as-is, then all of + # the outputs should have requires_grad=False before we apply + # special behaviors of our default autograd fallback. + # (The default autograd fallback may set requires_grad=True on output + # tensors in certain modes so that when they are backpropped through, + # they raise an error). + # + # Our strategy for detecting if an operator doesn't have an autograd + # kernel at the autograd key is: + # - set the autograd fallback mode to "nothing" (so it does not change + # the required-gradness of outputs) + # - run the operator + # - Check if any outputs of the operator (that are not inputs) require + # grad. This would only happen if the user calls regular PyTorch + # operations in their backend key (this op should instead be + # CompositeImplicitAutograd or not an op) or if the user invokes + # an autograd.Function in the backend key. + # + # Note that it's already likely a bug if the operator directly returns + # an input as output (because custom ops don't have a good way of + # constructing true in-place or out variants), but we defer that + # responsibility to a different test (schema_check). + + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + all_tensors = [arg for arg in flat_args if isinstance(arg, torch.Tensor)] + if not any(t.requires_grad for t in all_tensors): + raise RuntimeError( + "autograd_registration_check: no inputs have requires_grad=True so " + "we are unable to actually perform this test. Please pass inputs " + "that do require grad." + ) + + # Determine which AutogradBACKEND key to check + all_device_types = {arg.device.type for arg in all_tensors} + if not all_device_types.issubset(["cpu", "cuda"]): + # Don't want to support other keys yet + raise NotImplementedError( + f"autograd_registration_check: NYI devices other than CPU/CUDA, got {all_device_types}" + ) + if "cuda" in all_device_types: + key = "AutogradCUDA" + elif "cpu" in all_device_types: + key = "AutogradCPU" + + if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), key): + return + if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), "Autograd"): + return + if torch._C._dispatch_has_kernel_for_dispatch_key( + op.name(), "CompositeImplicitAutograd" + ): + return + + # At this point, we know the operator doesn't have a kernel registered to an + # autograd key. Let's proceed with our test. + with set_autograd_fallback_mode("nothing"): + all_outs = op(*args, **kwargs) + + inp_ids = {id(arg) for arg in flat_args} + + def not_an_input_and_requires_grad(tensor): + if not tensor.requires_grad: + return False + if id(tensor) in inp_ids: + return False + return True + + if not pytree.tree_any_only(torch.Tensor, not_an_input_and_requires_grad, all_outs): + return + + raise AssertionError( + f"{op.name()}: at least one output of this operator has requires_grad=True " + f"but the operator does not have an autograd kernel defined at an autograd " + f"key (e.g. DispatchKey::Autograd). This could mean that you have " + f"incorrectly registered an autograd kernel to a non-Autograd DispatchKey, " + f"which may lead to silently incorrect results. If your operator consists " + f"of regular PyTorch operations, consider not using an operator at all " + f"or registering your operator as CompositeImplicitAutograd. If you have " + f"an autograd.Function registered to a backend (CPU/CUDA) key, the correct " + f"location for it is the Autograd key." + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..5e60f50189b5dc3ab43fdd97120d5fa23559a84e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py @@ -0,0 +1,12 @@ +# mypy: ignore-errors + +import torch._subclasses + + +def is_builtin(op): + return op.namespace in ('aten', 'prims', 'prim') + + +def fake_check(op, args, kwargs): + with torch._subclasses.CrossRefFakeMode(ignore_op_fn=is_builtin): + op(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..2fbbd8f6c3ae19d4c85025c0221c46116e6ff41d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py @@ -0,0 +1,852 @@ +# mypy: ignore-errors + +import datetime +import difflib +import functools +import inspect +import json +import os +import re +import tempfile +import threading +import unittest +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch + +import torch._dynamo + +import torch.utils._pytree as pytree +from torch._dynamo.utils import clone_input +from torch._subclasses.schema_check_mode import SchemaCheckMode +from torch._utils_internal import get_file_path_2 +from torch.overrides import TorchFunctionMode +from torch.testing._internal.optests import ( + aot_autograd_check, + autograd_registration_check, + fake_check, +) + + +def dontGenerateOpCheckTests(reason: str): + def inner(fun): + fun._torch_dont_generate_opcheck_tests = True + return fun + + return inner + + +def is_abstract(tensor: torch.Tensor) -> bool: + if tensor.is_meta: + return True + if torch._subclasses.fake_tensor.is_fake(tensor): + return True + return False + + +def safe_schema_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> Any: + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + with SchemaCheckMode(): + result = op(*args, **kwargs) + return result + + +def safe_autograd_registration_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + # Don't perform autograd_registration_check if none of the inputs require grad. + if not pytree.tree_any_only( + torch.Tensor, lambda x: x.requires_grad, (args, kwargs) + ): + return + return autograd_registration_check(op, args, kwargs) + + +def safe_fake_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + return fake_check(op, args, kwargs) + + +def safe_aot_autograd_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + dynamic: bool, + *, + copy_inputs: bool = True, +) -> Any: + # NB: copy_inputs does nothing for aot_autograd_check: it always needs to copy + # inputs. + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + + def func(*args, **kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, torch.clone, (args, kwargs)) + return op(*args, **kwargs) + + # aot_autograd_check runs func(*args, **kwargs) multiple times + # and assumes `func` does not modify its inputs. + return aot_autograd_check(func, args, kwargs, dynamic, check_gradients="auto") + + +def deepcopy_tensors(inputs: Any) -> Any: + return pytree.tree_map_only(torch.Tensor, clone_input, inputs) + + +# Test util requirements +# - The test util must have signature (op: OpOverload, args, kwargs) +# - The test util must NOT mutate args, kwargs. +# - The test utils in this list must not be prefixes of each other. For example, +# having both "test_schema" and "test_schema_is_functional" is NOT OK. +# - The order of items in this dict matters (for opcheck), we'll run them +# in order. +ALL_TEST_UTILS = { + "test_schema": safe_schema_check, + "test_autograd_registration": safe_autograd_registration_check, + "test_faketensor": safe_fake_check, + "test_aot_dispatch_static": functools.partial( + safe_aot_autograd_check, + dynamic=False, + ), + "test_aot_dispatch_dynamic": functools.partial( + safe_aot_autograd_check, + dynamic=True, + ), +} + +GDOC = "https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit" + +DEFAULT_TEST_UTILS = [ + "test_schema", + "test_autograd_registration", + "test_faketensor", + "test_aot_dispatch_static", + "test_aot_dispatch_dynamic", +] + + +def generate_opcheck_tests( + testcase: Any, + namespaces: List[str], + failures_dict_path: Optional[str] = None, + additional_decorators: Dict[str, Callable] = None, + test_utils: List[str] = DEFAULT_TEST_UTILS, +) -> None: + """Given an existing TestCase, use the existing tests to generate + additional validation tests for custom operators. + + For {all existing tests in the TestCase} x {all test utils}, + we will generate one new test. The new test runs a TorchFunctionMode + that intercepts ``op(*args, **kwargs)`` calls and invokes + ``test_util(op, *args, **kwargs)``, where ``op`` is an operator. + + The test_util that we support are in ALL_TEST_UTILS. They are: + - test_schema: This runs SchemaCheckMode. + - test_autograd_registration: This runs autograd_registration_check. + - test_faketensor: This runs CrossRefFakeMode. + - test_aot_dispatch_static: This runs aot_autograd_check, which: + checks that the outputs (and gradients, if they are computable) + are the same under eager-mode PyTorch and using AOTAutograd. + - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but + runs AOTAutograd using dynamic shapes instead of static shapes. + + The generated test will have name ``{test_util}__{original_name}``. + For example, if there is a method named ``test_cumsum``, then + we will generate a ``test_schema__test_cumsum``, + ``test_faketensor__test_cumsum``, etc. + + For more details, see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit + + Args: + testcase: The testcase we will modify and generate additional tests for. + namespaces: We will only intercept calls to custom operators with these + namespaces. + failures_dict_path: See ``validate_failures_dict_structure`` for more details + test_utils: a list of test_utils to generate. Example: ["test_schema", "test_faketensor"] + """ + if additional_decorators is None: + additional_decorators = {} + test_methods = [ + m + for m in dir(testcase) + if m.startswith("test_") and callable(getattr(testcase, m)) + ] + if failures_dict_path is None: + # The default failures_dict_path is failures_dict.json in + # the same directory as the test file. + prev_frame = inspect.currentframe().f_back + filename = inspect.getframeinfo(prev_frame)[0] + failures_dict_path = get_file_path_2( + os.path.dirname(filename), "failures_dict.json" + ) + failures_dict = FailuresDict.load( + failures_dict_path, create_file=should_update_failures_dict() + ) + validate_failures_dict_structure(failures_dict, test_utils, testcase) + validate_failures_dict_formatting(failures_dict_path) + + def construct_method(attr, prefix, tester): + method = getattr(testcase, attr) + if getattr(method, "_torch_dont_generate_opcheck_tests", False): + return + new_method_name = prefix + "__" + attr + + @functools.wraps(method) + def new_method(*args, **kwargs): + with OpCheckMode( + namespaces, + prefix, + tester, + failures_dict, + f"{testcase.__name__}.{new_method_name}", + failures_dict_path, + ): + result = method(*args, **kwargs) + return result + + if pytestmark := new_method.__dict__.get("pytestmark"): + import pytest + + # check if we need to simplify the parametrize marks + # NB: you need to add this mark to your pytest.ini + opcheck_only_one = False + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "opcheck_only_one": + opcheck_only_one = True + + if opcheck_only_one: + new_pytestmark = [] + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "parametrize": + argnames, argvalues = mark.args + assert not mark.kwargs, "NYI" + # Special case for device, we want to run on all + # devices + if argnames != "device": + new_pytestmark.append( + pytest.mark.parametrize( + argnames, (next(iter(argvalues)),) + ) + ) + continue + new_pytestmark.append(mark) + new_method.__dict__["pytestmark"] = new_pytestmark + + if new_method_name in additional_decorators: + for dec in additional_decorators[new_method_name]: + new_method = dec(new_method) + + if hasattr(testcase, new_method_name): + raise RuntimeError( + f"Tried to autogenerate {new_method_name} but {testcase} already " + f"has method named {new_method_name}. Please rename the original " + f"method on the TestCase." + ) + setattr(testcase, new_method_name, new_method) + + test_utils = {name: ALL_TEST_UTILS[name] for name in test_utils} + for attr in test_methods: + for prefix, tester in test_utils.items(): + construct_method(attr, prefix, tester) + + generate_tag_tests(testcase, failures_dict, additional_decorators) + + +def generate_tag_tests(testcase, failures_dict, additional_decorators): + def generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests): + def inner(self): + try: + op = torch._library.utils.lookup_op(qualname) + except AttributeError as e: + # Operator not importable in this test file + raise unittest.SkipTest(f"Can't import operator {qualname}") from e + op_marked_as_compliant = torch.Tag.pt2_compliant_tag in op.tags + if not op_marked_as_compliant: + return + if not definitely_not_pt2_compliant: + return + raise AssertionError( + f"op '{qualname}' was tagged with torch.Tag.pt2_compliant_tag " + f"but it failed some of the generated opcheck tests " + f"({xfailed_tests}). This may lead to silent correctness issues, " + f"please fix this." + ) + + return inner + + for qualname, test_dict in failures_dict.data.items(): + xfailed_tests = [ + test + for test, status_dict in test_dict.items() + # We're about to delete the following test after Ed's PR + # to specialize on C++ .size() calls + if "test_aot_dispatch_static" not in test + and status_dict["status"] == "xfail" + ] + definitely_not_pt2_compliant = len(xfailed_tests) > 0 + generated = generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests) + + # Could result in collisions, but unlikely. We'll raise if we see one below. + mangled_qualname = qualname.replace("::", "_").replace(".", "_") + test_name = "test_pt2_compliant_tag_" + mangled_qualname + + # You can skip this test via the additional_decorators argument + # in generate_opcheck_tests + if test_name in additional_decorators: + for decorator in additional_decorators[test_name]: + generated = decorator(generated) + + if hasattr(testcase, test_name): + raise RuntimeError( + f"Tried to generate a test named {test_name}, but it exists " + f"already. This could be because of a name collision (where " + f"we generated two tests with the same name), or where we " + f"generated a test with the same name as an existing test." + ) + setattr(testcase, test_name, generated) + + +TEST_OPTIONS = ("xfail", "skip", "xsuccess") + + +def validate_failures_dict_formatting(failures_dict_path: str) -> None: + with open(failures_dict_path) as fp: + actual = fp.read() + failures_dict = FailuresDict.load(failures_dict_path) + expected = failures_dict._save(to_str=True) + if actual == expected: + return + if should_update_failures_dict(): + failures_dict = FailuresDict.load(failures_dict_path) + failures_dict.save() + return + expected = expected.splitlines(1) + actual = actual.splitlines(1) + diff = difflib.unified_diff(actual, expected) + diff = "".join(diff) + raise RuntimeError( + f"\n{diff}\n\nExpected the failures dict to be formatted " + f"a certain way. Please see the above diff; you can correct " + f"this either manually or by re-running the test with " + f"PYTORCH_OPCHECK_ACCEPT=1" + ) + + +def validate_failures_dict_structure( + failure_dict: "FailuresDict", test_utils: List[str], testcase: Any +) -> None: + """Validates the failures dict. + + The failure dict looks something like the following. + It maps operator name (qualname) to a list of autogenerated tests. + Each autogenerated test may have a check for the operator (if the operator is + called by the test); the dictionary specifies if we should skip the check, + or if we expect some check to fail. + + { + "fbgemm::split_lengths": { + "test_schema__test_split_lengths": { + "comment": "you can put whatever you want into the comment section", + "status": "xfail", + } + "test_schema__test_split_lengths_empty": { + "comment": "", + "status": "skip", + }, + }, + "fbgemm::gather_lengths": { + "test_schema__test_gather_lengths": { + "comment": "", + "status": "skip", + }, + }, + } + + """ + failure_dict = failure_dict.data + qualnames = list(failure_dict.keys()) + for test_to_option in failure_dict.values(): + test_names = list(test_to_option.keys()) + for test_name, test_dict in test_to_option.items(): + if set(test_dict.keys()) != set({"comment", "status"}): + raise RuntimeError( + "in failures_dict, expected sub-dict to have keys 'comment' and 'status'" + ) + test_option = test_dict["status"] + if test_option not in TEST_OPTIONS: + raise RuntimeError( + f"In failures_dict, got status={test_option} but it needs to be in {TEST_OPTIONS}" + ) + test_class, actual_test_name = test_name.split(".") + if not any(actual_test_name.startswith(test) for test in test_utils): + raise RuntimeError( + f"In failures_dict, test name '{test_name}' should begin with one of {test_utils}" + ) + for test in test_utils: + if not actual_test_name.startswith(test): + continue + base_test_name = actual_test_name[len(test) + 2 :] + # remove potential pytest parametrization suffix + base_test_name = re.sub(r"\[.*\]", "", base_test_name) + if testcase.__name__ != test_class: + continue + if hasattr(testcase, base_test_name): + continue + raise RuntimeError( + f"In failures dict, got test name '{test_name}'. We parsed this as " + f"running test '{test}' on '{base_test_name}', but " + f"{base_test_name} does not exist on the TestCase '{testcase.__name__}]. " + f"Maybe you need to change the test name?" + ) + + +def should_update_failures_dict() -> bool: + key = "PYTORCH_OPCHECK_ACCEPT" + return key in os.environ and os.environ[key] == "1" + + +_is_inside_opcheck_mode = threading.local() +_is_inside_opcheck_mode.value = False + + +def is_inside_opcheck_mode(): + return _is_inside_opcheck_mode.value + + +class OpCheckMode(TorchFunctionMode): + """ + For a given test, OpCheckMode intercepts calls to operators and runs + test_util(op, args, kwargs) for each intercepted (op, args, kwargs). + """ + + def __init__( + self, + namespaces: List[str], + test_util_name: str, + test_util: Callable, + failures_dict: "FailuresDict", + test_name: str, + failures_dict_path: str, + ): + # We will intercept calls to ops with these namespaces + self.namespaces = namespaces + # The test utility function. Its signature should be (op, args, kwargs) -> None. + # Examples of test utilities are: schema_check, make_fx_check + self.test_util = test_util + self.test_util_name = test_util_name + # The name of the test that is running this OpCheckMode. + self.test_name = test_name + # Maps qualname -> test_name -> skip/xfail + # Tells us if we should skip a test or assert that there is a failure. + self.failures_dict = failures_dict + # Location of the failures dict. Makes it so that the error message is better. + self.failures_dict_path = failures_dict_path + + # OpCheckMode surpresses errors, collects them here, and then raises them on exit. + # Maps qualname -> List[(Exception, func, maybe args, maybe kwargs)] + self.seen_ops_to_errors = {} + + def maybe_raise_errors_on_exit(self) -> None: + # Check expected failures first + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if len(self.seen_ops_to_errors[qualname]) == 0: + if should_update_failures_dict(): + self.failures_dict.set_status( + qualname, self.test_name, "xsuccess", comment="" + ) + else: + if option == "xfail": + raise OpCheckError( + f"generate_opcheck_tests: Unexpected success for operator " + f"{qualname} on test {self.test_name}. This may mean that " + f"you have fixed this test failure. Please rerun the test with " + f"PYTORCH_OPCHECK_ACCEPT=1 to automatically update the test runner " + f"or manually remove the " + f"expected failure in the failure dict at " + f"{self.failures_dict_path}" + f"For more details, see " + f"{GDOC}" + ) + continue + failed_ops = [] + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if option != "xsuccess": + continue + if len(self.seen_ops_to_errors[qualname]) == 0: + continue + failed_ops.append(qualname) + if not failed_ops: + return + + if should_update_failures_dict(): + for op in failed_ops: + self.failures_dict.set_status(op, self.test_name, "xfail") + return + + # Raise from the first error but also report about all of them to make + # recording xfails easier. + ex, op, args, kwargs = self.seen_ops_to_errors[failed_ops[0]][0] + repro_command = generate_repro( + self.test_util_name, op, args, kwargs, save_data=should_print_better_repro() + ) + raise OpCheckError( + f"Test generated by `generate_opcheck_tests`, {self.test_name}, " + f"failed on operators {failed_ops}. This usually means that the " + f"operators are not implemented correctly and may lead to silently " + f"incorrect behavior. Set PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1 for a standalone repro, " + f"or please see " + f"{GDOC} " + f"for more recommendations. " + f"To reproduce this problem locally, try to run the following:\n{repro_command}" + ) from ex + + def __enter__(self, *args, **kwargs): + self.prev_is_opcheck_mode = _is_inside_opcheck_mode.value + self.prev_dynamo_disable = os.environ.get("TORCHDYNAMO_DISABLE", "") + _is_inside_opcheck_mode.value = True + os.environ["TORCHDYNAMO_DISABLE"] = "1" + return super().__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + _is_inside_opcheck_mode.value = self.prev_is_opcheck_mode + os.environ["TORCHDYNAMO_DISABLE"] = self.prev_dynamo_disable + try: + self.maybe_raise_errors_on_exit() + if should_update_failures_dict(): + self.failures_dict.save() + finally: + result = super().__exit__(*args, **kwargs) + return result + + def run_test_util(self, op, args, kwargs): + try: + self.test_util(op, args, kwargs, copy_inputs=False) + except torch._subclasses.fake_tensor.UnsupportedFakeTensorException: + # We might get here if the input is already a FakeTensor + # or if we're in a torch.compile block. Just ignore these + # since we can't handle them and reporting them as failures + # is too noisy. + pass + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + + # Only intercept calls to operators + if not isinstance(func, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)): + return func(*args, **kwargs) + if ( + torch.jit.is_tracing() + or torch.jit.is_scripting() + or torch._dynamo.is_compiling() + ): + return func(*args, **kwargs) + # Pre-existing code may not use the .default overload. If we see an + # OpOverloadPacket and we cannot resolve the overload, then we just throw + # and ask the user to clarify. Otherwise, we attempt to resolve the overload. + if isinstance(func, torch._ops.OpOverloadPacket): + func = resolve_unique_overload_or_throw(func) + qualname = func.name() + ns = qualname.split("::")[0] + if ns not in self.namespaces: + return func(*args, **kwargs) + + args_c, kwargs_c = deepcopy_tensors((args, kwargs)) + result = func(*args, **kwargs) + + option = self.failures_dict.get_status(qualname, self.test_name) + if option == "xsuccess" or option == "xfail": + # Surpress all errors during execution. Raise them during __exit__. + try: + if qualname not in self.seen_ops_to_errors: + self.seen_ops_to_errors[qualname] = [] + self.run_test_util(func, args_c, kwargs_c) + except Exception as ex: + if should_print_better_repro(): + self.seen_ops_to_errors[qualname].append((ex, func, args, kwargs)) + else: + self.seen_ops_to_errors[qualname].append((ex, func, None, None)) + elif option == "skip": + pass + return result + + +def should_print_better_repro() -> None: + """If set, the tests generated by `generate_opcheck_tests` will print a + repro command on failure. + + In order to print the repro command, we need to save some tensors to disk. + These will be saved under the following directory: + {tempfile.gettempdir()}/pytorch_opcheck_safe_to_delete/. + + Although this is a temp folder, it will usually not automatically get cleaned + up, so you'll need to manually delete it. + """ + key = "PYTORCH_OPCHECK_PRINT_BETTER_REPRO" + if key not in os.environ: + return False + value = os.environ[key] + return value == "1" or value == 1 + + +def opcheck( + op: torch._ops.OperatorBase, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + test_utils: Union[str, List[str]] = "ALL", + raise_exception: bool = True, +) -> Dict[str, str]: + """Given an operator and some sample arguments, tests if the operator is + registered correctly. + + We test the following (which are important for correctness in eager-mode + PyTorch and with torch.compile): + - test_schema: if the operator's schema is correct. + - test_autograd_registration: if autograd was registered correctly, + i.e. to the correct DispatchKey. + - test_faketensor: If the operator has a FakeTensor implementation + (and if it is correct). + - test_aot_dispatch_static: If the operator works with + AOTAutograd/AOTDispatch, which is one of the parts in the PT2 stack. + Checks that the outputs (and gradients, if they are computable) + of the operator are the same under eager-mode PyTorch and torch.compile. + - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but + tests dynamic shapes instead of static shapes. + + For best results, please call ``opcheck`` multiple times with a + representative set of inputs. For example, if your operator supports + autograd, please use ``opcheck`` with inputs that require_grad. + + Args: + op: The operator. Should look like torch.ops.aten.foo + args: The args to the operator + kwargs: The kwargs to the operator + test_utils: Tests that we should run. Default: all of them. + Example: ["test_schema", "test_faketensor"] + raise_exception: If we should raise an exception on the first + error. If False, we will return a dict with information + on if each test passed or not. + + """ + + if kwargs is None: + kwargs = {} + if isinstance(op, torch._ops.OpOverloadPacket): + op = resolve_unique_overload_or_throw(op) + if not isinstance(op, torch._ops.OpOverload): + raise ValueError( + f"opcheck(op, ...): op must be instance of torch._ops.OpOverload, " + f"e.g. torch.ops.aten.sin.default, got {type(op)}" + ) + if test_utils == "ALL": + test_utils = tuple(ALL_TEST_UTILS.keys()) + if isinstance(test_utils, str): + test_utils = (test_utils,) + if not isinstance(test_utils, (tuple, list)) or not set(test_utils).issubset( + ALL_TEST_UTILS.keys() + ): + raise ValueError( + f"opcheck(op, ..., test_utils={test_utils}), expected test_utils " + f"to be subset of {tuple(ALL_TEST_UTILS.keys())} but it was not" + ) + + results_dict = {} + for test_util in test_utils: + tester = ALL_TEST_UTILS[test_util] + try: + tester(op, args, kwargs) + results_dict[test_util] = "SUCCESS" + except Exception as ex: + if raise_exception: + raise OpCheckError( + f"opcheck(op, ...): {test_util} failed with {ex} " + f"(scroll up for stack trace)" + ) from ex + results_dict[test_util] = ex + return results_dict + + +class OpCheckError(Exception): + pass + + +def generate_repro( + test: str, + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + save_data: bool, + dry_run: bool = False, +) -> str: + if save_data: + now = datetime.datetime.now() + path = os.path.join(tempfile.gettempdir(), "pytorch_opcheck_safe_to_delete") + unix_timestamp = datetime.datetime.timestamp(now) * 100000 + filepath = os.path.join(path, f"repro_{unix_timestamp}.pt") + if not dry_run: + os.makedirs(path, exist_ok=True) + torch.save((args, kwargs), filepath) + args_kwargs = f'args, kwargs = torch.load("{filepath}")' + else: + args_kwargs = ( + "# If you rerun your test with PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1\n" + "# we will fill them in same (args, kwargs) as in your test\n" + "args = () # args to the operator\n" + "kwargs = {} # kwargs to the operator" + ) + + ns, name = op._schema.name.split("::") + overload = op._overloadname + + repro_command = ( + f"# =========================================================\n" + f"# BEGIN REPRO SCRIPT\n" + f"# =========================================================\n" + f"import torch\n" + f"from torch.testing._internal.optests import opcheck\n" + f"\n" + f"# Make sure you have loaded the library that contains the op\n" + f"# via an import or torch.ops.load_library(...)\n" + f"op = torch.ops.{ns}.{name}.{overload}\n" + f"\n" + f"{args_kwargs}\n" + f'opcheck(op, args, kwargs, test_utils="{test}")\n' + f"# =========================================================\n" + f"# END REPRO SCRIPT\n" + f"# =========================================================\n" + ) + return repro_command + + +def resolve_unique_overload_or_throw( + op: torch._ops.OpOverloadPacket, +) -> torch._ops.OpOverload: + all_schemas = torch._C._jit_get_schemas_for_operator(op._qualified_op_name) + if len(all_schemas) != 1: + raise RuntimeError( + f"opcheck can only test operators without overloads. " + f"Got the following overloads for {op._qualified_op_name}: " + f"{[schema.overload_name for schema in all_schemas]}" + ) + + overload_name = all_schemas[0].overload_name + if overload_name == "": + return op.default + return getattr(op, overload_name) + + +DUMP_OPTIONS = {"indent": 2, "sort_keys": True} + + +FailuresDictData = Dict[str, Dict[str, Dict[str, str]]] + + +VERSION = 1 +DESCRIPTION = ( + f"This is a dict containing failures for tests autogenerated by " + f"generate_opcheck_tests. " + f"For more details, please see {GDOC}" +) + + +class FailuresDict: + def __init__(self, path: str, data: FailuresDictData): + self.path = path + self.data = data + + @staticmethod + def load(path, *, create_file=False) -> "FailuresDict": + if create_file and not os.path.exists(path): + result = FailuresDict(path, {}) + FailuresDict.save() + return result + with open(path) as fp: + contents = fp.read() + if contents.strip() == "": + dct = { + "_description": DESCRIPTION, + "data": {}, + "_version": VERSION, + } + else: + dct = json.loads(contents) + assert "data" in dct + assert "_version" in dct and dct["_version"] == VERSION + return FailuresDict(path, dct["data"]) + + def _save(self, to_str=False) -> Optional[str]: + to_dump = { + "_description": DESCRIPTION, + "data": self.data, + "_version": VERSION, + } + # json.dumps doesn't end with a newline. Let's add one because files + # should end in newlines. + serialized = json.dumps(to_dump, **DUMP_OPTIONS) + "\n" + if to_str: + return serialized + with open(self.path, "w") as fp: + fp.write(serialized) + return None + + def save(self) -> None: + return self._save() + + def get_status(self, qualname: str, test_name: str) -> str: + if qualname not in self.data: + return "xsuccess" + dct = self.data[qualname] + if test_name not in dct: + return "xsuccess" + return dct[test_name]["status"] + + def set_status( + self, + qualname: str, + test_name: str, + status: str, + *, + comment: Optional[str] = None, + ): + if qualname not in self.data: + self.data[qualname] = {} + dct = self.data[qualname] + if test_name not in dct: + dct[test_name] = {"status": None, "comment": ""} + + if status == "xsuccess": + # The default status is "xsuccess". + del dct[test_name] + else: + dct[test_name]["status"] = status + if comment is not None: + dct[test_name]["comment"] = comment diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..95f746a31af385b1fe13000d6d3b53324dc6fde4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py @@ -0,0 +1,89 @@ +# mypy: ignore-errors + +import torch +from torch.fx.experimental.proxy_tensor import make_fx +from torch.testing._internal.common_methods_invocations import wrapper_set_seed +import torch.utils._pytree as pytree + + +def make_fx_check( + func, + args, + kwargs, + tracing_mode, + assert_close=torch.testing.assert_close, + randomize_data=False, +): + f, *new_args = handle_sizes_for_dynamic_shapes(func, args, kwargs) + + def run(f, *args, **kwargs): + return wrapper_set_seed(f, *args, **kwargs) + + traced_f = make_fx(f, tracing_mode=tracing_mode)(*new_args) + + msg = ( + "op(*args, **kwargs) and make_fx(op)(*args, **kwargs) produced different " + "values. This could mean that your abstract impls (meta/FakeTensor impls) " + "are incorrect, that your operator is not completely traceable (e.g., " + "it relies on some global state), or that there is a bug in make_fx. " + "Note that if you passed a python function (and not an operator) to " + "make_fx_check, it is still possible that the python function will still " + "work with torch.compile because it handles capturing pieces of " + "your python code to compile." + ) + + # Randomize the data and run the traced graph with it, to catch bugs + # where we may have baked in Tensor data into the trace. + # This is not guaranteed to succeed, because `f` might have preconditions + # on the values of the inputs, so we just ignore if we used + # random data and it fails. + if randomize_data: + new_args = randomize(new_args) + try: + expected = run(f, *new_args) + except Exception: + if randomize_data: + return + raise + result = run(traced_f, *new_args) + assert_close(result, expected, msg=msg) + + +# Arguably we should make make_fx promote torch.Size() objects to symbolic shapes. +# Absent that, here is our strategy: +# +# If any argument is a torch.Size(), maybe get dynamic shapes for it by: +# - Create a temporary Tensor whose size is the torch.Size() we want. Note that +# we use an expanded Tensor as we cannot pass "meta" Tensors to make_fx. +# - Pass it to make_fx such that it is is converted to a proxy Tensor +# - Unpack the size in the wrapper to get a torch.Size with dynamic shapes (in +# symbolic mode, a no-op otherwise) +def handle_sizes_for_dynamic_shapes(func, args, kwargs): + def f(args, kwargs, extra_args, extra_kwargs): + if extra_args: + for i, t in extra_args: + args[i] = t.size() + if extra_kwargs: + for k, t in extra_kwargs.items(): + kwargs[k] = t.size() + + return func(*args, **kwargs) + + extra_args = [] + extra_kwargs = {} + for i, arg in enumerate(args): + if isinstance(arg, torch.Size): + extra_args.append((i, torch.empty(arg, device="cpu"))) + for key, value in kwargs.items(): + if isinstance(value, torch.Size): + extra_kwargs[key] = torch.empty(value, device="cpu") + + return f, args, kwargs, extra_args, extra_kwargs + + +def randomize(args): + def transform(x): + if not x.dtype.is_floating_point: + return x + return x.detach().clone().uniform_(0, 1).requires_grad_(x.requires_grad) + return pytree.tree_map_only(torch.Tensor, transform, args) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08d1d0cf1fed77e080563c54d41768a57950b405 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1f32e96e2acd66e0ed5d4703b0fa5f55c9c917c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a836fa3dc0dca73f40bb8549e966e7a5cd9c8c81 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..0a3494f945fad36d84cb8056dcf722d6911f0af2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py @@ -0,0 +1,10 @@ +# mypy: ignore-errors + + + +def div_int_future(): + return 1 / 2 + + +def div_float_future(): + return 3.14 / 0.125 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..164e6d168414a11039f3b63885760ad08b81ae99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py @@ -0,0 +1,11 @@ +# mypy: ignore-errors + +import torch # noqa: F401 + + +def div_int_nofuture(): + return 1 / 2 + + +def div_float_nofuture(): + return 3.14 / 0.125 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e6790a732f9f2f56a42d48820c6d4d69579ea7e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py @@ -0,0 +1,392 @@ +# mypy: ignore-errors + +import unittest + +from torch.testing._internal.inductor_utils import HAS_CUDA + + +def has_lark(): + try: + import lark # noqa: F401 + + return True + except ModuleNotFoundError: + return False + + +HAS_LARK = has_lark() + +requires_lark = unittest.skipUnless(HAS_LARK, "requires lark") +requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") + +if HAS_CUDA: + import triton + from triton import language as tl + + # Define here so that multiple tests can take advantage of it + @triton.jit + def add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_with_optional_param( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + ARGS_PASSED: "tl.constexpr", + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + if ARGS_PASSED == "two": + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + else: + output = x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 128}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_SIZE": 64}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4), + ], + key=[], + ) + @triton.jit + def add_kernel_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=4, num_warps=4 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=4, num_warps=4 + ), + ], + key=[], + ) + @triton.jit + def add_kernel_2d_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + x_elements, + y_elements, + BLOCK_SIZE_X: "tl.constexpr", + BLOCK_SIZE_Y: "tl.constexpr", + ): + xoffset = tl.program_id(0) * BLOCK_SIZE_X + xindex = xoffset + tl.arange(0, BLOCK_SIZE_X)[:, None] + xmask = xindex < x_elements + yoffset = tl.program_id(1) * BLOCK_SIZE_Y + yindex = yoffset + tl.arange(0, BLOCK_SIZE_Y)[None, :] + ymask = yindex < y_elements + x1 = xindex + y0 = yindex + tmp0 = tl.load(in_ptr0 + (x1 + (x_elements * y0)), xmask & ymask) + tmp1 = tl.load(in_ptr0 + (y0 + (y_elements * x1)), xmask & ymask) + tmp2 = tmp0 + tmp1 + tl.store(out_ptr + (x1 + (x_elements * y0)), tmp2, xmask & ymask) + + @triton.jit + def mul2_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + output = 2 * x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def mul2_inplace_kernel( + ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(ptr + offsets, mask=mask) + output = 2 * x + tl.store(ptr + offsets, output, mask=mask) + + @triton.jit + def zero_negs(x): + return tl.where(x >= 0, x, 0) + + @triton.jit + def indirection_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ACTIVATION: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + if ACTIVATION == "mul2_inplace_kernel": + mul2_inplace_kernel(in_ptr0, n_elements, BLOCK_SIZE=BLOCK_SIZE) + elif ACTIVATION == "add_kernel": + add_kernel(in_ptr0, in_ptr0, out_ptr, n_elements, BLOCK_SIZE=BLOCK_SIZE) + x = tl.load(in_ptr0 + offsets, mask=mask) + tl.store(out_ptr + offsets, x, mask=mask) + + @triton.jit + def double_strided_kernel( + in_ptr, + out_ptr, + in_y_stride, + out_y_stride, + X_BLOCK_SIZE: "tl.constexpr", + Y_BLOCK_SIZE: "tl.constexpr", + ): + xid = tl.program_id(axis=0) + yid = tl.program_id(axis=1) + x_start = xid * X_BLOCK_SIZE + y_start = yid * Y_BLOCK_SIZE + x_offsets = x_start + tl.arange(0, X_BLOCK_SIZE) + y_offsets = y_start + tl.arange(0, Y_BLOCK_SIZE) + src_offsets = y_offsets[:, None] * in_y_stride + x_offsets[None, :] + dst_offsets = y_offsets[:, None] * out_y_stride + x_offsets[None, :] + src = tl.load(in_ptr + src_offsets) + tl.store(out_ptr + dst_offsets, src * 2.0) + + @triton.jit + def inline_asm_kernel(X, Y, Z, n: "tl.constexpr", BLOCK: "tl.constexpr"): + x = tl.load(X + tl.arange(0, BLOCK)) + y = tl.load(Y + tl.arange(0, BLOCK)) + s = tl.full([BLOCK], n, tl.int32) + z = tl.inline_asm_elementwise( + "shf.l.wrap.b32 $0, $1, $2, $3;", + "=r,r, r, r", + [x, y, s], + dtype=tl.int32, + is_pure=True, + pack=1, + ) + tl.store(Z + tl.arange(0, BLOCK), z) + + @triton.jit + def add_kernel_with_block_ptr( + x_ptr, + y_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + x = tl.load( + tl.make_block_ptr( + base=x_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + boundary_check=[0], + ) + y = tl.load( + tl.make_block_ptr( + base=y_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + boundary_check=[0], + ) + output = x + y + tl.store( + tl.make_block_ptr( + base=output_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + output, + boundary_check=[0], + ) + + @triton.jit + def kernel_with_block_ptr_2d( + x_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + x = tl.load( + tl.make_block_ptr( + base=x_ptr, + shape=[n_elements, 1], + strides=[1, 1], + offsets=[block_start, 0], + block_shape=[BLOCK_SIZE, 1], + order=[1, 0], + ), + boundary_check=[0], + ) + output = x + tl.store( + tl.make_block_ptr( + base=output_ptr, + shape=[n_elements, 1], + strides=[1, 1], + offsets=[block_start, 0], + block_shape=[BLOCK_SIZE, 1], + order=[1, 0], + ), + output, + boundary_check=[0], + ) + + from triton.language import load, store + + @triton.jit + def add_kernel_with_import( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = load(in_ptr0 + offsets, mask=mask) + y = load(in_ptr1 + offsets, mask=mask) + output = x + y + store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def cond_op_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + if tl.program_id(0) == 0: + output = x + y + else: + output = x * y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def atomic_add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.atomic_add(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_4_times_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + for i in range(2): + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + i = 2 + while i > 0: + i -= 1 + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_out_of_order_fn2( + in_ptr0, + in_ptr1, + n_elements, + out_ptr, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..32fbe011ade07788923ed65c38cff1a8af7b8e49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py @@ -0,0 +1,82 @@ +# mypy: ignore-errors + +import torch +import torch.utils._pytree as pytree +from torch.utils._python_dispatch import return_and_correct_aliasing + + +# A simple tensor subclass that holds two tensors internally, and runs every op on both tensors. +class TwoTensor(torch.Tensor): + @staticmethod + def __new__(cls, a, b): + assert ( + a.device == b.device + and a.layout == b.layout + and a.requires_grad == b.requires_grad + and a.dtype == b.dtype + ) + # I guess it would be more accurate to represent the shape as torch.cat(a, b).shape + shape = a.shape + kwargs = {} + kwargs["strides"] = a.stride() + kwargs["storage_offset"] = a.storage_offset() + kwargs["device"] = a.device + kwargs["layout"] = a.layout + kwargs["requires_grad"] = a.requires_grad + kwargs["dtype"] = a.dtype + out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) + + assert a.shape == b.shape + assert a.stride() == b.stride() + assert a.storage_offset() == b.storage_offset() + return out + + def __init__(self, a, b): + self.a = a + self.b = b + + def __repr__(self): + a_repr = repr(self.a) + b_repr = repr(self.b) + return f"TwoTensor({a_repr}, {b_repr})" + + def __tensor_flatten__(self): + return ["a", "b"], None + + @staticmethod + def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): + assert meta is None + a, b = inner_tensors["a"], inner_tensors["b"] + return TwoTensor(a, b) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + if kwargs is None: + kwargs = {} + args_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, args) + args_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, args) + + kwargs_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, kwargs) + kwargs_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, kwargs) + + out_a = func(*args_a, **kwargs_a) + out_b = func(*args_b, **kwargs_b) + assert type(out_a) == type(out_b) + out_a_flat, spec = pytree.tree_flatten(out_a) + out_b_flat = pytree.tree_leaves(out_b) + # for aten ops that return non-tensors, just assume that + # our two inner tensors return the same value + out_flat = [ + TwoTensor(o_a, o_b) if isinstance(o_a, torch.Tensor) else o_a + for o_a, o_b in zip(out_a_flat, out_b_flat) + ] + out = pytree.tree_unflatten(out_flat, spec) + return return_and_correct_aliasing(func, args, kwargs, out) + + +class TwoTensorMode(torch.utils._python_dispatch.TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + out = func(*args, **kwargs) + if torch._subclasses.fake_tensor._is_tensor_constructor(func): + out = TwoTensor(out, out.clone()) + return out