Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg.pt +3 -0
- venv/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so +0 -0
- venv/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py +369 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py +632 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py +165 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py +1322 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py +1441 -0
- venv/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py +323 -0
ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82b2cc573b61810c1df8dc2b03222bd65d297aac9ca5803f3d38f647eff438ca
|
3 |
+
size 16778411
|
ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad7b30513b015be4692666eeb92c161196815ac041e1ebd79fe5dc0f1012f17f
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44e31cb86eaee1ac6d969f86e5db612d9556fb4804e3d992f0d0a9e698be0117
|
3 |
+
size 33555627
|
ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ee5ad1b8c398cdd65429a14a609a6d9264b1c8099e07871ac553e74d86bc695
|
3 |
+
size 33555533
|
ckpts/universal/global_step120/zero/9.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c42cd830428106a8dec190cb226a0e64959a8fb710a57f636f718bed1bcc16d9
|
3 |
+
size 16778396
|
venv/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so
ADDED
Binary file (22.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so
ADDED
Binary file (21.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (191 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc
ADDED
Binary file (10.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc
ADDED
Binary file (16.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc
ADDED
Binary file (3.86 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc
ADDED
Binary file (9.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc
ADDED
Binary file (37.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc
ADDED
Binary file (4.05 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc
ADDED
Binary file (39.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc
ADDED
Binary file (4.64 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc
ADDED
Binary file (41.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc
ADDED
Binary file (11.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc
ADDED
Binary file (548 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc
ADDED
Binary file (2.57 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc
ADDED
Binary file (92.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc
ADDED
Binary file (122 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc
ADDED
Binary file (27.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc
ADDED
Binary file (13.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc
ADDED
Binary file (100 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc
ADDED
Binary file (7.11 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc
ADDED
Binary file (7.57 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc
ADDED
Binary file (155 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc
ADDED
Binary file (17.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc
ADDED
Binary file (2.58 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc
ADDED
Binary file (15.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc
ADDED
Binary file (6.48 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc
ADDED
Binary file (2.37 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc
ADDED
Binary file (6.87 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc
ADDED
Binary file (2.84 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc
ADDED
Binary file (24.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc
ADDED
Binary file (28.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc
ADDED
Binary file (6.86 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc
ADDED
Binary file (6.72 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc
ADDED
Binary file (1.63 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc
ADDED
Binary file (1.28 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc
ADDED
Binary file (7.08 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc
ADDED
Binary file (3.45 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py
ADDED
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch.testing._internal.common_utils import TEST_WITH_ROCM
|
5 |
+
|
6 |
+
|
7 |
+
class AutocastTestLists:
|
8 |
+
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
|
9 |
+
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
|
10 |
+
|
11 |
+
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
|
12 |
+
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
|
13 |
+
torch.randn((n, n), device=dev, dtype=torch.float32),)
|
14 |
+
|
15 |
+
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
|
16 |
+
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
|
17 |
+
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
|
18 |
+
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
|
19 |
+
|
20 |
+
# returns args as a tuple
|
21 |
+
return input + hx + weights
|
22 |
+
|
23 |
+
# Supplies ops and arguments for test_autocast_* in test/test_cuda.py
|
24 |
+
def __init__(self, dev):
|
25 |
+
super().__init__()
|
26 |
+
n = 8
|
27 |
+
# Utility arguments, created as one-element tuples
|
28 |
+
pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
29 |
+
pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
30 |
+
pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
31 |
+
mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
|
32 |
+
mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
|
33 |
+
mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
|
34 |
+
|
35 |
+
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
|
36 |
+
conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
|
37 |
+
torch.randn(dimset, dtype=torch.float32, device=dev))
|
38 |
+
for dimset in dimsets]
|
39 |
+
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
|
40 |
+
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
|
41 |
+
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
42 |
+
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
43 |
+
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
44 |
+
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
45 |
+
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
46 |
+
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
47 |
+
|
48 |
+
# The lists below organize ops that autocast needs to test.
|
49 |
+
# self.list_name corresponds to test_autocast_list_name in test/test_cuda.py.
|
50 |
+
# Each op is associated with a tuple of valid arguments.
|
51 |
+
# In addition, cudnn conv ops are not supported on ROCm and hence will
|
52 |
+
# be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list.
|
53 |
+
|
54 |
+
# Some ops implement built-in type promotion. These don't need autocasting,
|
55 |
+
# but autocasting relies on their promotion, so we include tests to double-check.
|
56 |
+
self.torch_expect_builtin_promote = [
|
57 |
+
("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
58 |
+
("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
59 |
+
("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
60 |
+
("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
61 |
+
("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
62 |
+
("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
63 |
+
("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
64 |
+
("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
65 |
+
("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
66 |
+
("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
|
67 |
+
("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
68 |
+
("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
|
69 |
+
]
|
70 |
+
self.methods_expect_builtin_promote = [
|
71 |
+
("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
72 |
+
("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
73 |
+
("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
74 |
+
("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
75 |
+
("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
76 |
+
("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
77 |
+
("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
78 |
+
("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
79 |
+
("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
80 |
+
]
|
81 |
+
|
82 |
+
# The remaining lists organize ops that autocast treats explicitly.
|
83 |
+
self.torch_fp16 = [
|
84 |
+
# deprecated _convolution
|
85 |
+
("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
|
86 |
+
(0, 0), 1, False, True, True)),
|
87 |
+
# the current _convolution
|
88 |
+
("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
|
89 |
+
(0, 0), 1, False, True, True, True)),
|
90 |
+
("conv1d", conv_args_fp32[0]),
|
91 |
+
("conv2d", conv_args_fp32[1]),
|
92 |
+
("conv3d", conv_args_fp32[2]),
|
93 |
+
("conv_tbc", conv_args_fp32[0] + bias_fp32),
|
94 |
+
("conv_transpose1d", conv_args_fp32[0]),
|
95 |
+
("conv_transpose2d", conv_args_fp32[1]),
|
96 |
+
("conv_transpose3d", conv_args_fp32[2]),
|
97 |
+
("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
|
98 |
+
("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
|
99 |
+
("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
|
100 |
+
(1, 1), 1, False, True, True), TEST_WITH_ROCM),
|
101 |
+
("prelu", pointwise0_fp32 + element0_fp32),
|
102 |
+
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
|
103 |
+
("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32),
|
104 |
+
("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32),
|
105 |
+
("matmul", mat0_fp32 + mat1_fp32),
|
106 |
+
("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32),
|
107 |
+
("mm", mat0_fp32 + mat1_fp32),
|
108 |
+
("mv", mat0_fp32 + pointwise0_fp32),
|
109 |
+
("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32),
|
110 |
+
("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
111 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
112 |
+
("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
113 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
114 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
115 |
+
("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
116 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
117 |
+
# _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell.
|
118 |
+
# ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
|
119 |
+
# ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
|
120 |
+
("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)),
|
121 |
+
("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)),
|
122 |
+
("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
|
123 |
+
("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
|
124 |
+
]
|
125 |
+
self.torch_fp32 = [
|
126 |
+
("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
|
127 |
+
("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
|
128 |
+
("cosh", pointwise0_fp16),
|
129 |
+
("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)),
|
130 |
+
("exp", pointwise0_fp16),
|
131 |
+
("expm1", pointwise0_fp16),
|
132 |
+
("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
|
133 |
+
("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
|
134 |
+
("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
|
135 |
+
("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)),
|
136 |
+
("reciprocal", pointwise0_fp16),
|
137 |
+
("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)),
|
138 |
+
("sinh", pointwise0_fp16),
|
139 |
+
("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)),
|
140 |
+
("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16),
|
141 |
+
("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)),
|
142 |
+
# ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API.
|
143 |
+
("softmax", pointwise0_fp16 + (0,)),
|
144 |
+
("log_softmax", pointwise0_fp16 + (0,)),
|
145 |
+
("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
|
146 |
+
("group_norm", mat0_fp16 + (1,)),
|
147 |
+
("norm", pointwise0_fp16),
|
148 |
+
("norm", pointwise0_fp16, {"dim": 0}),
|
149 |
+
# these need magma
|
150 |
+
# ("norm", mat0_fp16, {"p": "nuc"}),
|
151 |
+
# ("norm", mat0_fp16, {"p": "nuc", "dim": 0}),
|
152 |
+
("norm", pointwise0_fp16, {"p": 1}),
|
153 |
+
("norm", pointwise0_fp16, {"p": 1, "dim": 0}),
|
154 |
+
("cosine_similarity", mat0_fp16 + mat1_fp16),
|
155 |
+
("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
|
156 |
+
("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16),
|
157 |
+
torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16),
|
158 |
+
torch.tensor([1], device=dev, dtype=torch.int))),
|
159 |
+
("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)),
|
160 |
+
("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
|
161 |
+
("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)),
|
162 |
+
("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16),
|
163 |
+
("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
|
164 |
+
("cumprod", pointwise0_fp16 + (0,)),
|
165 |
+
("cumsum", pointwise0_fp16 + (0,)),
|
166 |
+
("dist", pointwise0_fp16 + pointwise1_fp16),
|
167 |
+
("pdist", mat0_fp16),
|
168 |
+
("cdist", mat0_fp16 + mat1_fp16),
|
169 |
+
("prod", pointwise0_fp16),
|
170 |
+
("prod", pointwise0_fp16 + (0,)),
|
171 |
+
("renorm", mat0_fp16 + (2, 0, 1.0)),
|
172 |
+
("sum", pointwise0_fp16),
|
173 |
+
("sum", mat0_fp16 + (1,)),
|
174 |
+
("logsumexp", mat0_fp16 + (1,)),
|
175 |
+
]
|
176 |
+
self.torch_need_autocast_promote = [
|
177 |
+
("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)),
|
178 |
+
("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16),
|
179 |
+
("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)),
|
180 |
+
("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev),
|
181 |
+
torch.randn((1, 2), dtype=torch.float32, device=dev),
|
182 |
+
torch.randn((1, 2, 2), dtype=torch.float16, device=dev),
|
183 |
+
torch.randn((1,), dtype=torch.float32, device=dev))),
|
184 |
+
("cross", (torch.randn(3, dtype=torch.float32, device=dev),
|
185 |
+
torch.randn(3, dtype=torch.float16, device=dev))),
|
186 |
+
("dot", pointwise0_fp16 + pointwise1_fp32),
|
187 |
+
("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev),
|
188 |
+
torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev),
|
189 |
+
0, 0, False)),
|
190 |
+
("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),),
|
191 |
+
torch.randn(1, device=dev, dtype=torch.float16))),
|
192 |
+
("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),),
|
193 |
+
torch.randn(1, device=dev, dtype=torch.float32))),
|
194 |
+
("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev),
|
195 |
+
torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
|
196 |
+
("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev),
|
197 |
+
0,
|
198 |
+
torch.randint(0, 2, (2, 2, 2), device=dev),
|
199 |
+
torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
|
200 |
+
("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev),
|
201 |
+
0,
|
202 |
+
torch.randint(0, 2, (2, 2, 2), device=dev),
|
203 |
+
torch.randn((2, 2, 2), dtype=torch.float32, device=dev))),
|
204 |
+
]
|
205 |
+
self.nn_fp16 = [
|
206 |
+
("linear", mat0_fp32 + mat1_fp32 + mat2_fp32),
|
207 |
+
]
|
208 |
+
self.nn_fp32 = [
|
209 |
+
("softplus", pointwise0_fp16),
|
210 |
+
("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float),
|
211 |
+
torch.zeros((n,), device=dev, dtype=torch.long))),
|
212 |
+
("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half),
|
213 |
+
torch.zeros((n, n, n), device=dev, dtype=torch.long))),
|
214 |
+
("l1_loss", mat0_fp16 + mat1_fp16),
|
215 |
+
("smooth_l1_loss", mat0_fp16 + mat1_fp16),
|
216 |
+
("mse_loss", mat0_fp16 + mat1_fp16),
|
217 |
+
("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
218 |
+
("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
219 |
+
("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
|
220 |
+
]
|
221 |
+
self.linalg_fp16 = [
|
222 |
+
("linalg_vecdot", mat0_fp32 + mat0_fp32),
|
223 |
+
("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)),
|
224 |
+
]
|
225 |
+
self.methods_fp16 = [
|
226 |
+
("__matmul__", mat0_fp32 + mat1_fp32)
|
227 |
+
]
|
228 |
+
self.methods_fp32 = [
|
229 |
+
("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)),
|
230 |
+
]
|
231 |
+
self.banned = [
|
232 |
+
("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32),
|
233 |
+
torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn),
|
234 |
+
]
|
235 |
+
|
236 |
+
class AutocastCPUTestLists:
|
237 |
+
# Supplies ops and arguments for test_autocast_* in test/test_cpu.py
|
238 |
+
def __init__(self, dev):
|
239 |
+
super().__init__()
|
240 |
+
n = 8
|
241 |
+
# Utility arguments, created as one-element tuples
|
242 |
+
pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
|
243 |
+
pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
|
244 |
+
pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
|
245 |
+
mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
|
246 |
+
mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
|
247 |
+
mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
|
248 |
+
|
249 |
+
pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
250 |
+
pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
251 |
+
|
252 |
+
dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
|
253 |
+
|
254 |
+
dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
|
255 |
+
for dimset in dummy_dimsets]
|
256 |
+
|
257 |
+
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
|
258 |
+
conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),
|
259 |
+
torch.randn(dimset, dtype=torch.bfloat16, device=dev))
|
260 |
+
for dimset in dimsets]
|
261 |
+
conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
|
262 |
+
torch.randn(dimset, dtype=torch.float32, device=dev))
|
263 |
+
for dimset in dimsets]
|
264 |
+
|
265 |
+
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
|
266 |
+
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
|
267 |
+
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
268 |
+
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
269 |
+
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
270 |
+
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
271 |
+
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
272 |
+
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
273 |
+
|
274 |
+
dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),)
|
275 |
+
for dimset in dummy_dimsets]
|
276 |
+
# The lists below organize ops that autocast needs to test.
|
277 |
+
# self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
|
278 |
+
# Each op is associated with a tuple of valid arguments.
|
279 |
+
|
280 |
+
# Some ops implement built-in type promotion. These don't need autocasting,
|
281 |
+
# but autocasting relies on their promotion, so we include tests to double-check.
|
282 |
+
self.torch_expect_builtin_promote = [
|
283 |
+
("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
284 |
+
("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
285 |
+
("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
286 |
+
("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
287 |
+
("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
288 |
+
("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
289 |
+
("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
290 |
+
("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
291 |
+
("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
292 |
+
]
|
293 |
+
|
294 |
+
self.methods_expect_builtin_promote = [
|
295 |
+
("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
296 |
+
("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
297 |
+
("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
298 |
+
("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
299 |
+
("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
300 |
+
("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
301 |
+
("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
302 |
+
("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
303 |
+
("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
304 |
+
]
|
305 |
+
# The remaining lists organize ops that autocast treats explicitly.
|
306 |
+
self.torch_16 = [
|
307 |
+
("conv1d", conv_args_fp32[0]),
|
308 |
+
("conv2d", conv_args_fp32[1]),
|
309 |
+
("conv3d", conv_args_fp32[2]),
|
310 |
+
("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
311 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
312 |
+
("mm", mat0_fp32 + mat1_fp32),
|
313 |
+
("matmul", mat0_fp32 + mat1_fp32),
|
314 |
+
("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
315 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
316 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
317 |
+
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
|
318 |
+
("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
319 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
320 |
+
("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32),
|
321 |
+
torch.randn((5, 3, 5), device=dev, dtype=torch.float32),
|
322 |
+
torch.randn(5, device=dev, dtype=torch.float32),
|
323 |
+
0)),
|
324 |
+
("conv_transpose1d", conv_args_fp32[0]),
|
325 |
+
("conv_transpose2d", conv_args_fp32[1]),
|
326 |
+
("conv_transpose3d", conv_args_fp32[2]),
|
327 |
+
("prelu", pointwise0_fp32 + element0_fp32),
|
328 |
+
("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
329 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
330 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
331 |
+
n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32),
|
332 |
+
torch.randn((3 * n), device=dev, dtype=torch.float32),
|
333 |
+
torch.randn((n, n), device=dev, dtype=torch.float32),
|
334 |
+
torch.randn((n), device=dev, dtype=torch.float32))),
|
335 |
+
]
|
336 |
+
self.torch_fp32 = [
|
337 |
+
("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
|
338 |
+
("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16),
|
339 |
+
torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16),
|
340 |
+
torch.tensor([1], device=dev, dtype=torch.int))),
|
341 |
+
("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)),
|
342 |
+
("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)),
|
343 |
+
("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16),
|
344 |
+
("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
|
345 |
+
]
|
346 |
+
self.nn_16 = [
|
347 |
+
("linear", mat0_fp32 + mat1_fp32, {}),
|
348 |
+
]
|
349 |
+
self.nn_fp32 = [
|
350 |
+
("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}),
|
351 |
+
("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) +
|
352 |
+
(torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
|
353 |
+
("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}),
|
354 |
+
("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),
|
355 |
+
torch.zeros((n,), device=dev, dtype=torch.long))),
|
356 |
+
("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16),
|
357 |
+
torch.zeros((n, n, n), device=dev, dtype=torch.long))),
|
358 |
+
("l1_loss", mat0_bf16 + mat1_bf16),
|
359 |
+
("smooth_l1_loss", mat0_bf16 + mat1_bf16),
|
360 |
+
("mse_loss", mat0_bf16 + mat1_bf16),
|
361 |
+
("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
362 |
+
("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
363 |
+
("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
|
364 |
+
("huber_loss", mat0_bf16 + mat1_bf16),
|
365 |
+
]
|
366 |
+
self.torch_need_autocast_promote = [
|
367 |
+
("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
|
368 |
+
("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
|
369 |
+
]
|
venv/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py
ADDED
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from functools import partial
|
5 |
+
from torch.testing import make_tensor
|
6 |
+
from torch.testing._internal.opinfo.core import (
|
7 |
+
OpInfo,
|
8 |
+
SampleInput,
|
9 |
+
)
|
10 |
+
from torch.testing._internal.common_dtype import all_types_and
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
# Note: [autograd.Function db]
|
14 |
+
#
|
15 |
+
# This is a collection of autograd.Function test cases written as OpInfos
|
16 |
+
# so they can easily be consumed by OpInfo-based tests to check if a subsystem
|
17 |
+
# supports autograd.Function.
|
18 |
+
#
|
19 |
+
# Axes:
|
20 |
+
# - saves {output, input, intermediate, non-tensor}
|
21 |
+
# - {inputs, output} x {single tensor, tensors, arbitrary objects}
|
22 |
+
# - Uses {mark_dirty, mark_non_differentiable, once_differentiable}
|
23 |
+
|
24 |
+
|
25 |
+
def to_numpy(tensor):
|
26 |
+
return tensor.cpu().numpy()
|
27 |
+
|
28 |
+
|
29 |
+
class NumpyCube(torch.autograd.Function):
|
30 |
+
@staticmethod
|
31 |
+
def forward(input):
|
32 |
+
input_np = to_numpy(input)
|
33 |
+
dinput = torch.tensor(3 * input_np ** 2, device=input.device)
|
34 |
+
return torch.tensor(input_np ** 3, device=input.device), dinput
|
35 |
+
|
36 |
+
@staticmethod
|
37 |
+
def setup_context(ctx, inputs, output):
|
38 |
+
ctx.save_for_backward(inputs[0], output[1])
|
39 |
+
ctx.save_for_forward(inputs[0], output[1])
|
40 |
+
|
41 |
+
@staticmethod
|
42 |
+
def backward(ctx, grad_output, grad_saved):
|
43 |
+
input, dinput = ctx.saved_tensors
|
44 |
+
return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input)
|
45 |
+
|
46 |
+
@staticmethod
|
47 |
+
def vmap(info, in_dims, input):
|
48 |
+
result = NumpyCube.apply(input)
|
49 |
+
return result, (in_dims[0], in_dims[0])
|
50 |
+
|
51 |
+
@staticmethod
|
52 |
+
def jvp(ctx, input_tangent):
|
53 |
+
input, dinput = ctx.saved_tensors
|
54 |
+
return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
|
55 |
+
|
56 |
+
|
57 |
+
class CubeGenVmap(torch.autograd.Function):
|
58 |
+
generate_vmap_rule = True
|
59 |
+
|
60 |
+
@staticmethod
|
61 |
+
def forward(x):
|
62 |
+
return x ** 3, 3 * x ** 2
|
63 |
+
|
64 |
+
@staticmethod
|
65 |
+
def setup_context(ctx, inputs, outputs):
|
66 |
+
ctx.save_for_backward(inputs[0], outputs[1])
|
67 |
+
ctx.save_for_forward(inputs[0], outputs[1])
|
68 |
+
|
69 |
+
@staticmethod
|
70 |
+
def backward(ctx, grad_output, grad_saved):
|
71 |
+
input, dinput = ctx.saved_tensors
|
72 |
+
result = grad_output * dinput + 6 * dinput
|
73 |
+
return result
|
74 |
+
|
75 |
+
@staticmethod
|
76 |
+
def jvp(ctx, input_tangent):
|
77 |
+
input, dinput = ctx.saved_tensors
|
78 |
+
return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
|
79 |
+
|
80 |
+
|
81 |
+
def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs):
|
82 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
83 |
+
yield SampleInput(make_arg(1, low=0.8, high=2), args=())
|
84 |
+
|
85 |
+
|
86 |
+
class NumpyCubeNotComposable(torch.autograd.Function):
|
87 |
+
@staticmethod
|
88 |
+
def forward(input):
|
89 |
+
input_np = to_numpy(input)
|
90 |
+
return torch.tensor(input_np ** 3, device=input.device), input_np
|
91 |
+
|
92 |
+
@staticmethod
|
93 |
+
def setup_context(ctx, inputs, output):
|
94 |
+
_, input_np = output
|
95 |
+
ctx.input_np = input_np
|
96 |
+
ctx.device = inputs[0].device
|
97 |
+
|
98 |
+
@staticmethod
|
99 |
+
@torch.autograd.function.once_differentiable
|
100 |
+
def backward(ctx, grad_output, grad_saved):
|
101 |
+
result_np = 3 * (ctx.input_np ** 2)
|
102 |
+
return torch.tensor(result_np, device=ctx.device)
|
103 |
+
|
104 |
+
|
105 |
+
class NumpyMul(torch.autograd.Function):
|
106 |
+
@staticmethod
|
107 |
+
def forward(x, y):
|
108 |
+
return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
|
109 |
+
|
110 |
+
@staticmethod
|
111 |
+
def setup_context(ctx, inputs, output):
|
112 |
+
ctx.save_for_backward(*inputs)
|
113 |
+
ctx.save_for_forward(*inputs)
|
114 |
+
|
115 |
+
@staticmethod
|
116 |
+
def backward(ctx, grad_output):
|
117 |
+
x, y = ctx.saved_tensors
|
118 |
+
gx = None
|
119 |
+
if ctx.needs_input_grad[0]:
|
120 |
+
gx = NumpyMul.apply(grad_output, y)
|
121 |
+
gy = None
|
122 |
+
if ctx.needs_input_grad[1]:
|
123 |
+
gy = NumpyMul.apply(grad_output, x)
|
124 |
+
return gx, gy
|
125 |
+
|
126 |
+
@staticmethod
|
127 |
+
def vmap(info, in_dims, x, y):
|
128 |
+
x_bdim, y_bdim = in_dims
|
129 |
+
x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
|
130 |
+
y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
|
131 |
+
result = NumpyMul.apply(x, y)
|
132 |
+
result = result.movedim(-1, 0)
|
133 |
+
return result, 0
|
134 |
+
|
135 |
+
@staticmethod
|
136 |
+
def jvp(ctx, x_tangent, y_tangent):
|
137 |
+
x, y = ctx.saved_tensors
|
138 |
+
return x_tangent * y + y_tangent * x
|
139 |
+
|
140 |
+
def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs):
|
141 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
142 |
+
# Broadcasting
|
143 |
+
yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),))
|
144 |
+
|
145 |
+
|
146 |
+
class MulGenVmap(torch.autograd.Function):
|
147 |
+
generate_vmap_rule = True
|
148 |
+
|
149 |
+
@staticmethod
|
150 |
+
def forward(x, y):
|
151 |
+
return x * y
|
152 |
+
|
153 |
+
@staticmethod
|
154 |
+
def setup_context(ctx, inputs, outputs):
|
155 |
+
ctx.save_for_backward(*inputs)
|
156 |
+
ctx.save_for_forward(*inputs)
|
157 |
+
|
158 |
+
@staticmethod
|
159 |
+
def backward(ctx, grad_output):
|
160 |
+
x, y = ctx.saved_tensors
|
161 |
+
gx = None
|
162 |
+
if ctx.needs_input_grad[0]:
|
163 |
+
gx = MulGenVmap.apply(grad_output, y)
|
164 |
+
gy = None
|
165 |
+
if ctx.needs_input_grad[1]:
|
166 |
+
gy = MulGenVmap.apply(grad_output, x)
|
167 |
+
return gx, gy
|
168 |
+
|
169 |
+
@staticmethod
|
170 |
+
def jvp(ctx, x_tangent, y_tangent):
|
171 |
+
x, y = ctx.saved_tensors
|
172 |
+
return x_tangent * y + y_tangent * x
|
173 |
+
|
174 |
+
|
175 |
+
class NumpyExp_(torch.autograd.Function):
|
176 |
+
@staticmethod
|
177 |
+
def forward(x):
|
178 |
+
x_np = to_numpy(x)
|
179 |
+
np.exp(x_np, x_np)
|
180 |
+
return x
|
181 |
+
|
182 |
+
@staticmethod
|
183 |
+
def setup_context(ctx, inputs, output):
|
184 |
+
x, = inputs
|
185 |
+
ctx.mark_dirty(x)
|
186 |
+
ctx.save_for_backward(output)
|
187 |
+
ctx.save_for_forward(output)
|
188 |
+
|
189 |
+
@staticmethod
|
190 |
+
def backward(ctx, grad_output):
|
191 |
+
output, = ctx.saved_tensors
|
192 |
+
return NumpyMul.apply(grad_output, output)
|
193 |
+
|
194 |
+
@staticmethod
|
195 |
+
def vmap(info, in_dims, x):
|
196 |
+
NumpyExp_.apply(x)
|
197 |
+
return x, in_dims[0]
|
198 |
+
|
199 |
+
@staticmethod
|
200 |
+
def jvp(ctx, x_tangent):
|
201 |
+
# Doesn't call numpy operations because I didn't want to write NumpyMul_
|
202 |
+
output, = ctx.saved_tensors
|
203 |
+
x_tangent.mul_(output)
|
204 |
+
return x_tangent
|
205 |
+
|
206 |
+
class NumpySort(torch.autograd.Function):
|
207 |
+
@staticmethod
|
208 |
+
def forward(x, dim):
|
209 |
+
device = x.device
|
210 |
+
x = to_numpy(x)
|
211 |
+
ind = np.argsort(x, axis=dim)
|
212 |
+
ind_inv = np.argsort(ind, axis=dim)
|
213 |
+
result = np.take_along_axis(x, ind, axis=dim)
|
214 |
+
return (
|
215 |
+
torch.tensor(x, device=device),
|
216 |
+
torch.tensor(ind, device=device),
|
217 |
+
torch.tensor(ind_inv, device=device),
|
218 |
+
)
|
219 |
+
|
220 |
+
@staticmethod
|
221 |
+
def setup_context(ctx, inputs, output):
|
222 |
+
x, dim = inputs
|
223 |
+
_, ind, ind_inv = output
|
224 |
+
ctx.mark_non_differentiable(ind, ind_inv)
|
225 |
+
ctx.save_for_backward(ind, ind_inv)
|
226 |
+
ctx.save_for_forward(ind, ind_inv)
|
227 |
+
ctx.dim = dim
|
228 |
+
|
229 |
+
@staticmethod
|
230 |
+
def backward(ctx, grad_output, _0, _1):
|
231 |
+
ind, ind_inv = ctx.saved_tensors
|
232 |
+
return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None
|
233 |
+
|
234 |
+
@staticmethod
|
235 |
+
def vmap(info, in_dims, x, dim):
|
236 |
+
x_bdim, _ = in_dims
|
237 |
+
x = x.movedim(x_bdim, 0)
|
238 |
+
# wrap dim
|
239 |
+
dim = dim if dim >= 0 else dim + x.dim() - 1
|
240 |
+
return NumpySort.apply(x, dim + 1), (0, 0, 0)
|
241 |
+
|
242 |
+
@staticmethod
|
243 |
+
def jvp(ctx, x_tangent, _):
|
244 |
+
ind, ind_inv = ctx.saved_tensors
|
245 |
+
return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
|
246 |
+
|
247 |
+
class SortGenVmap(torch.autograd.Function):
|
248 |
+
generate_vmap_rule = True
|
249 |
+
|
250 |
+
@staticmethod
|
251 |
+
def forward(x, dim):
|
252 |
+
device = x.device
|
253 |
+
ind = torch.argsort(x, dim=dim)
|
254 |
+
ind_inv = torch.argsort(ind, axis=dim)
|
255 |
+
result = torch.take_along_dim(x, ind, dim=dim)
|
256 |
+
return result, ind, ind_inv
|
257 |
+
|
258 |
+
@staticmethod
|
259 |
+
def setup_context(ctx, inputs, outputs):
|
260 |
+
x, dim = inputs
|
261 |
+
_, ind, ind_inv = outputs
|
262 |
+
ctx.mark_non_differentiable(ind, ind_inv)
|
263 |
+
ctx.save_for_backward(ind, ind_inv)
|
264 |
+
ctx.save_for_forward(ind, ind_inv)
|
265 |
+
ctx.dim = dim
|
266 |
+
|
267 |
+
@staticmethod
|
268 |
+
def backward(ctx, grad_output, _0, _1):
|
269 |
+
ind, ind_inv = ctx.saved_tensors
|
270 |
+
return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None
|
271 |
+
|
272 |
+
@staticmethod
|
273 |
+
def jvp(ctx, x_tangent, _):
|
274 |
+
ind, ind_inv = ctx.saved_tensors
|
275 |
+
return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
|
276 |
+
|
277 |
+
|
278 |
+
def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs):
|
279 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
280 |
+
yield SampleInput(make_arg(3, 5), args=(1,))
|
281 |
+
|
282 |
+
|
283 |
+
def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs):
|
284 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
285 |
+
tensor = make_arg(3, 5)
|
286 |
+
dim = 1
|
287 |
+
_, ind, ind_inv = NumpySort.apply(tensor, 1)
|
288 |
+
yield SampleInput(tensor, args=(ind, ind_inv, dim))
|
289 |
+
|
290 |
+
|
291 |
+
class NumpyTake(torch.autograd.Function):
|
292 |
+
@staticmethod
|
293 |
+
def forward(x, ind, ind_inv, dim):
|
294 |
+
device = x.device
|
295 |
+
x = to_numpy(x)
|
296 |
+
ind = to_numpy(ind)
|
297 |
+
return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
|
298 |
+
|
299 |
+
@staticmethod
|
300 |
+
def setup_context(ctx, inputs, output):
|
301 |
+
x, ind, ind_inv, dim = inputs
|
302 |
+
ctx.save_for_backward(ind, ind_inv)
|
303 |
+
ctx.save_for_forward(ind, ind_inv)
|
304 |
+
ctx.dim = dim
|
305 |
+
|
306 |
+
@staticmethod
|
307 |
+
def backward(ctx, grad_output):
|
308 |
+
ind, ind_inv = ctx.saved_tensors
|
309 |
+
result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim)
|
310 |
+
return result, None, None, None
|
311 |
+
|
312 |
+
@staticmethod
|
313 |
+
def vmap(info, in_dims, x, ind, ind_inv, dim):
|
314 |
+
x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
|
315 |
+
|
316 |
+
# wrap dim
|
317 |
+
logical_dim = x.dim() if x_bdim is None else x_bdim - 1
|
318 |
+
dim = dim if dim >= 0 else dim + logical_dim
|
319 |
+
|
320 |
+
def expand_bdim(x, x_bdim):
|
321 |
+
if x_bdim is None:
|
322 |
+
return x.expand(info.batch_size, *x.shape)
|
323 |
+
return x.movedim(x_bdim, 0)
|
324 |
+
|
325 |
+
x = expand_bdim(x, x_bdim)
|
326 |
+
ind = expand_bdim(ind, ind_bdim)
|
327 |
+
ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
|
328 |
+
|
329 |
+
return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0
|
330 |
+
|
331 |
+
@staticmethod
|
332 |
+
def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
|
333 |
+
assert ind_tangent is None
|
334 |
+
assert ind_inv_tangent is None
|
335 |
+
ind, ind_inv = ctx.saved_tensors
|
336 |
+
return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim)
|
337 |
+
|
338 |
+
class TakeGenVmap(torch.autograd.Function):
|
339 |
+
generate_vmap_rule = True
|
340 |
+
|
341 |
+
@staticmethod
|
342 |
+
def forward(x, ind, ind_inv, dim):
|
343 |
+
return torch.take_along_dim(x, ind, dim)
|
344 |
+
|
345 |
+
@staticmethod
|
346 |
+
def setup_context(ctx, inputs, outputs):
|
347 |
+
x, ind, ind_inv, dim = inputs
|
348 |
+
ctx.save_for_backward(ind, ind_inv)
|
349 |
+
ctx.save_for_forward(ind, ind_inv)
|
350 |
+
ctx.dim = dim
|
351 |
+
|
352 |
+
@staticmethod
|
353 |
+
def backward(ctx, grad_output):
|
354 |
+
ind, ind_inv = ctx.saved_tensors
|
355 |
+
result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim)
|
356 |
+
return result, None, None, None
|
357 |
+
|
358 |
+
@staticmethod
|
359 |
+
def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
|
360 |
+
ind, ind_inv = ctx.saved_tensors
|
361 |
+
return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim)
|
362 |
+
|
363 |
+
class Select(torch.autograd.Function):
|
364 |
+
@staticmethod
|
365 |
+
def forward(x, idx):
|
366 |
+
return x[idx]
|
367 |
+
|
368 |
+
@staticmethod
|
369 |
+
def setup_context(ctx, inputs, output):
|
370 |
+
x, idx = inputs
|
371 |
+
ctx.x_shape = x.shape
|
372 |
+
ctx.idx = idx
|
373 |
+
|
374 |
+
@staticmethod
|
375 |
+
def backward(ctx, grad_output):
|
376 |
+
result = grad_output.new_zeros(ctx.x_shape)
|
377 |
+
result[ctx.idx] = grad_output
|
378 |
+
return result, None
|
379 |
+
|
380 |
+
@staticmethod
|
381 |
+
def vmap(info, in_dims, x, idx):
|
382 |
+
x_bdim, _ = in_dims
|
383 |
+
x = x.movedim(x_bdim, 1)
|
384 |
+
return Select.apply(x, idx), 0
|
385 |
+
|
386 |
+
@staticmethod
|
387 |
+
def jvp(ctx, x_tangent, _):
|
388 |
+
return Select.apply(x_tangent, ctx.idx)
|
389 |
+
|
390 |
+
class SelectGenVmap(torch.autograd.Function):
|
391 |
+
generate_vmap_rule = True
|
392 |
+
|
393 |
+
@staticmethod
|
394 |
+
def forward(x, idx):
|
395 |
+
return x[idx]
|
396 |
+
|
397 |
+
@staticmethod
|
398 |
+
def setup_context(ctx, inputs, outputs):
|
399 |
+
x, idx = inputs
|
400 |
+
ctx.x_shape = x.shape
|
401 |
+
ctx.idx = idx
|
402 |
+
|
403 |
+
@staticmethod
|
404 |
+
def backward(ctx, grad_output):
|
405 |
+
result = grad_output.new_zeros(ctx.x_shape)
|
406 |
+
result[ctx.idx] = grad_output
|
407 |
+
return result, None
|
408 |
+
|
409 |
+
@staticmethod
|
410 |
+
def jvp(ctx, x_tangent, _):
|
411 |
+
return SelectGenVmap.apply(x_tangent, ctx.idx)
|
412 |
+
|
413 |
+
|
414 |
+
def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs):
|
415 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
416 |
+
yield SampleInput(make_arg(3, 5), args=(2,))
|
417 |
+
|
418 |
+
class ScaleGradGenVmap(torch.autograd.Function):
|
419 |
+
generate_vmap_rule = True
|
420 |
+
scale = 3.14
|
421 |
+
|
422 |
+
@staticmethod
|
423 |
+
def forward(x):
|
424 |
+
return x.clone()
|
425 |
+
|
426 |
+
@staticmethod
|
427 |
+
def setup_context(ctx, inputs, outputs):
|
428 |
+
pass
|
429 |
+
|
430 |
+
@staticmethod
|
431 |
+
def backward(ctx, grad_output):
|
432 |
+
return grad_output * ScaleGradGenVmap.scale
|
433 |
+
|
434 |
+
@staticmethod
|
435 |
+
def jvp(ctx, x_tangent):
|
436 |
+
return x_tangent * ScaleGradGenVmap.scale
|
437 |
+
|
438 |
+
class ZeroGradientsGenVmap(torch.autograd.Function):
|
439 |
+
generate_vmap_rule = True
|
440 |
+
|
441 |
+
@staticmethod
|
442 |
+
def forward(x, y):
|
443 |
+
return x.clone(), y.clone()
|
444 |
+
|
445 |
+
@staticmethod
|
446 |
+
def setup_context(ctx, inputs, outputs):
|
447 |
+
pass
|
448 |
+
|
449 |
+
@staticmethod
|
450 |
+
def backward(ctx, gx, gy):
|
451 |
+
# Intentionally returning torch.zeros instead of zeros_like or new_zeros.
|
452 |
+
# Also intentionally not None.
|
453 |
+
return (
|
454 |
+
# Intentionally too-large gradient
|
455 |
+
torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device),
|
456 |
+
torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
|
457 |
+
)
|
458 |
+
|
459 |
+
@staticmethod
|
460 |
+
def jvp(ctx, gx, gy):
|
461 |
+
# Intentionally returning torch.zeros instead of zeros_like or new_zeros.
|
462 |
+
# Also intentionally not None.
|
463 |
+
return (
|
464 |
+
torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device),
|
465 |
+
torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
|
466 |
+
)
|
467 |
+
|
468 |
+
|
469 |
+
def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs):
|
470 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
471 |
+
yield SampleInput(make_arg(3, 5))
|
472 |
+
|
473 |
+
|
474 |
+
class ForwardHasDefaultArgs(torch.autograd.Function):
|
475 |
+
@staticmethod
|
476 |
+
def forward(x, idx=(2,)):
|
477 |
+
return x[idx]
|
478 |
+
|
479 |
+
@staticmethod
|
480 |
+
def setup_context(ctx, inputs, output):
|
481 |
+
x, idx = inputs
|
482 |
+
ctx.x_shape = x.shape
|
483 |
+
ctx.idx = idx
|
484 |
+
|
485 |
+
@staticmethod
|
486 |
+
def backward(ctx, grad_output):
|
487 |
+
result = grad_output.new_zeros(ctx.x_shape)
|
488 |
+
result[ctx.idx] = grad_output
|
489 |
+
return result, None
|
490 |
+
|
491 |
+
@staticmethod
|
492 |
+
def vmap(info, in_dims, x, idx):
|
493 |
+
x_bdim, _ = in_dims
|
494 |
+
x = x.movedim(x_bdim, 1)
|
495 |
+
return ForwardHasDefaultArgs.apply(x, idx), 0
|
496 |
+
|
497 |
+
@staticmethod
|
498 |
+
def jvp(ctx, x_tangent, _):
|
499 |
+
return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx)
|
500 |
+
|
501 |
+
|
502 |
+
autograd_function_db = [
|
503 |
+
OpInfo(
|
504 |
+
'NumpyCubeAutogradFunction',
|
505 |
+
op=NumpyCube.apply,
|
506 |
+
supports_forward_ad=True,
|
507 |
+
supports_fwgrad_bwgrad=True,
|
508 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
509 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
510 |
+
supports_out=False,
|
511 |
+
),
|
512 |
+
OpInfo(
|
513 |
+
'NumpyExpMarkDirtyAutogradFunction',
|
514 |
+
op=lambda x: NumpyExp_.apply(x.clone()),
|
515 |
+
inplace_variant=NumpyExp_.apply,
|
516 |
+
supports_forward_ad=True,
|
517 |
+
supports_fwgrad_bwgrad=True,
|
518 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
519 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
520 |
+
supports_out=False,
|
521 |
+
),
|
522 |
+
OpInfo(
|
523 |
+
'NumpyMulAutogradFunction',
|
524 |
+
op=NumpyMul.apply,
|
525 |
+
supports_forward_ad=True,
|
526 |
+
supports_fwgrad_bwgrad=True,
|
527 |
+
sample_inputs_func=sample_inputs_numpy_mul,
|
528 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
529 |
+
supports_out=False,
|
530 |
+
),
|
531 |
+
OpInfo(
|
532 |
+
'NumpyCubeNotComposableAutogradFunction',
|
533 |
+
op=lambda x: NumpyCubeNotComposable.apply(x)[0],
|
534 |
+
supports_forward_ad=False,
|
535 |
+
supports_fwgrad_bwgrad=False,
|
536 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
537 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
538 |
+
supports_out=False,
|
539 |
+
),
|
540 |
+
OpInfo(
|
541 |
+
'NumpySortAutogradFunction',
|
542 |
+
op=NumpySort.apply,
|
543 |
+
supports_forward_ad=False,
|
544 |
+
supports_fwgrad_bwgrad=False,
|
545 |
+
sample_inputs_func=sample_inputs_numpy_sort,
|
546 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
547 |
+
supports_out=False,
|
548 |
+
gradcheck_wrapper=lambda y, ind: y,
|
549 |
+
),
|
550 |
+
OpInfo(
|
551 |
+
'NumpyTakeAutogradFunction',
|
552 |
+
op=NumpyTake.apply,
|
553 |
+
supports_forward_ad=False,
|
554 |
+
supports_fwgrad_bwgrad=False,
|
555 |
+
sample_inputs_func=sample_inputs_numpy_take,
|
556 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
557 |
+
supports_out=False,
|
558 |
+
),
|
559 |
+
OpInfo(
|
560 |
+
'SelectAutogradFunction',
|
561 |
+
op=Select.apply,
|
562 |
+
supports_forward_ad=True,
|
563 |
+
supports_fwgrad_bwgrad=True,
|
564 |
+
sample_inputs_func=sample_inputs_select,
|
565 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
566 |
+
supports_out=False,
|
567 |
+
),
|
568 |
+
OpInfo(
|
569 |
+
'CubeGenVmapAutogradFunction',
|
570 |
+
op=CubeGenVmap.apply,
|
571 |
+
supports_forward_ad=True,
|
572 |
+
supports_fwgrad_bwgrad=True,
|
573 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
574 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
575 |
+
supports_out=False,
|
576 |
+
),
|
577 |
+
OpInfo(
|
578 |
+
'MulGenVmapAutogradFunction',
|
579 |
+
op=MulGenVmap.apply,
|
580 |
+
supports_forward_ad=True,
|
581 |
+
supports_fwgrad_bwgrad=True,
|
582 |
+
sample_inputs_func=sample_inputs_numpy_mul,
|
583 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
584 |
+
supports_out=False,
|
585 |
+
),
|
586 |
+
OpInfo(
|
587 |
+
'SortGenVmapAutogradFunction',
|
588 |
+
op=SortGenVmap.apply,
|
589 |
+
supports_forward_ad=True,
|
590 |
+
supports_fwgrad_bwgrad=True,
|
591 |
+
sample_inputs_func=sample_inputs_numpy_sort,
|
592 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
593 |
+
supports_out=False,
|
594 |
+
gradcheck_wrapper=lambda y, ind: y,
|
595 |
+
),
|
596 |
+
OpInfo(
|
597 |
+
'SelectGenVmapAutogradFunction',
|
598 |
+
op=SelectGenVmap.apply,
|
599 |
+
supports_forward_ad=True,
|
600 |
+
supports_fwgrad_bwgrad=True,
|
601 |
+
sample_inputs_func=sample_inputs_select,
|
602 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
603 |
+
supports_out=False,
|
604 |
+
),
|
605 |
+
OpInfo(
|
606 |
+
'ScaleGradGenVmapAutogradFunction',
|
607 |
+
op=ScaleGradGenVmap.apply,
|
608 |
+
supports_forward_ad=True,
|
609 |
+
supports_fwgrad_bwgrad=True,
|
610 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
611 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
612 |
+
supports_out=False,
|
613 |
+
),
|
614 |
+
OpInfo(
|
615 |
+
'ZeroGradientsGenVmapAutogradFunction',
|
616 |
+
op=ZeroGradientsGenVmap.apply,
|
617 |
+
supports_forward_ad=True,
|
618 |
+
supports_fwgrad_bwgrad=True,
|
619 |
+
sample_inputs_func=sample_inputs_numpy_mul,
|
620 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
621 |
+
supports_out=False,
|
622 |
+
),
|
623 |
+
OpInfo(
|
624 |
+
'ForwardHasDefaultArgsAutogradFunction',
|
625 |
+
op=ForwardHasDefaultArgs.apply,
|
626 |
+
supports_forward_ad=True,
|
627 |
+
supports_fwgrad_bwgrad=True,
|
628 |
+
sample_inputs_func=sample_inputs_forward_default_args,
|
629 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
630 |
+
supports_out=False,
|
631 |
+
),
|
632 |
+
]
|
venv/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
2 |
+
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import sys
|
6 |
+
from typing import List
|
7 |
+
|
8 |
+
__all__ = [
|
9 |
+
"check_code_for_cuda_kernel_launches",
|
10 |
+
"check_cuda_kernel_launches",
|
11 |
+
]
|
12 |
+
|
13 |
+
# FILES TO EXCLUDE (match is done with suffix using `endswith`)
|
14 |
+
# You wouldn't drive without a seatbelt, though, so why would you
|
15 |
+
# launch a kernel without some safety? Use this as a quick workaround
|
16 |
+
# for a problem with the checker, fix the checker, then de-exclude
|
17 |
+
# the files in question.
|
18 |
+
exclude_files: List[str] = []
|
19 |
+
|
20 |
+
# Without using a C++ AST we can't 100% detect kernel launches, so we
|
21 |
+
# model them as having the pattern "<<<parameters>>>(arguments);"
|
22 |
+
# We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be
|
23 |
+
# the next statement.
|
24 |
+
#
|
25 |
+
# We model the next statement as ending at the next `}` or `;`.
|
26 |
+
# If we see `}` then a clause ended (bad) if we see a semi-colon then
|
27 |
+
# we expect the launch check just before it.
|
28 |
+
#
|
29 |
+
# Since the kernel launch can include lambda statements, it's important
|
30 |
+
# to find the correct end-paren of the kernel launch. Doing this with
|
31 |
+
# pure regex requires recursive regex, which aren't part of the Python
|
32 |
+
# standard library. To avoid an additional dependency, we build a prefix
|
33 |
+
# regex that finds the start of a kernel launch, use a paren-matching
|
34 |
+
# algorithm to find the end of the launch, and then another regex to
|
35 |
+
# determine if a launch check is present.
|
36 |
+
|
37 |
+
# Finds potential starts of kernel launches
|
38 |
+
kernel_launch_start = re.compile(
|
39 |
+
r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE
|
40 |
+
)
|
41 |
+
|
42 |
+
# This pattern should start at the character after the final paren of the
|
43 |
+
# kernel launch. It returns a match if the launch check is not the next statement
|
44 |
+
has_check = re.compile(
|
45 |
+
r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE
|
46 |
+
)
|
47 |
+
|
48 |
+
def find_matching_paren(s: str, startpos: int) -> int:
|
49 |
+
"""Given a string "prefix (unknown number of characters) suffix"
|
50 |
+
and the position of the first `(` returns the index of the character
|
51 |
+
1 past the `)`, accounting for paren nesting
|
52 |
+
"""
|
53 |
+
opening = 0
|
54 |
+
for i, c in enumerate(s[startpos:]):
|
55 |
+
if c == '(':
|
56 |
+
opening += 1
|
57 |
+
elif c == ')':
|
58 |
+
opening -= 1
|
59 |
+
if opening == 0:
|
60 |
+
return startpos + i + 1
|
61 |
+
|
62 |
+
raise IndexError("Closing parens not found!")
|
63 |
+
|
64 |
+
|
65 |
+
def should_exclude_file(filename) -> bool:
|
66 |
+
for exclude_suffix in exclude_files:
|
67 |
+
if filename.endswith(exclude_suffix):
|
68 |
+
return True
|
69 |
+
return False
|
70 |
+
|
71 |
+
|
72 |
+
def check_code_for_cuda_kernel_launches(code, filename=None):
|
73 |
+
"""Checks code for CUDA kernel launches without cuda error checks.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
filename - Filename of file containing the code. Used only for display
|
77 |
+
purposes, so you can put anything here.
|
78 |
+
code - The code to check
|
79 |
+
|
80 |
+
Returns:
|
81 |
+
The number of unsafe kernel launches in the code
|
82 |
+
"""
|
83 |
+
if filename is None:
|
84 |
+
filename = "##Python Function Call##"
|
85 |
+
|
86 |
+
# We break the code apart and put it back together to add
|
87 |
+
# helpful line numberings for identifying problem areas
|
88 |
+
code = enumerate(code.split("\n")) # Split by line breaks
|
89 |
+
code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines
|
90 |
+
code = '\n'.join(code) # Put it back together
|
91 |
+
|
92 |
+
num_launches_without_checks = 0
|
93 |
+
for m in kernel_launch_start.finditer(code):
|
94 |
+
end_paren = find_matching_paren(code, m.end() - 1)
|
95 |
+
if has_check.match(code, end_paren):
|
96 |
+
num_launches_without_checks += 1
|
97 |
+
context = code[m.start():end_paren + 1]
|
98 |
+
print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr)
|
99 |
+
|
100 |
+
return num_launches_without_checks
|
101 |
+
|
102 |
+
|
103 |
+
def check_file(filename):
|
104 |
+
"""Checks a file for CUDA kernel launches without cuda error checks
|
105 |
+
|
106 |
+
Args:
|
107 |
+
filename - File to check
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
The number of unsafe kernel launches in the file
|
111 |
+
"""
|
112 |
+
if not (filename.endswith((".cu", ".cuh"))):
|
113 |
+
return 0
|
114 |
+
if should_exclude_file(filename):
|
115 |
+
return 0
|
116 |
+
with open(filename) as fo:
|
117 |
+
contents = fo.read()
|
118 |
+
unsafeCount = check_code_for_cuda_kernel_launches(contents, filename)
|
119 |
+
return unsafeCount
|
120 |
+
|
121 |
+
|
122 |
+
def check_cuda_kernel_launches():
|
123 |
+
"""Checks all pytorch code for CUDA kernel launches without cuda error checks
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
The number of unsafe kernel launches in the codebase
|
127 |
+
"""
|
128 |
+
torch_dir = os.path.dirname(os.path.realpath(__file__))
|
129 |
+
torch_dir = os.path.dirname(torch_dir) # Go up to parent torch
|
130 |
+
torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2
|
131 |
+
|
132 |
+
kernels_without_checks = 0
|
133 |
+
files_without_checks = []
|
134 |
+
for root, dirnames, filenames in os.walk(torch_dir):
|
135 |
+
# `$BASE/build` and `$BASE/torch/include` are generated
|
136 |
+
# so we don't want to flag their contents
|
137 |
+
if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"):
|
138 |
+
# Curtail search by modifying dirnames and filenames in place
|
139 |
+
# Yes, this is the way to do this, see `help(os.walk)`
|
140 |
+
dirnames[:] = []
|
141 |
+
continue
|
142 |
+
|
143 |
+
for x in filenames:
|
144 |
+
filename = os.path.join(root, x)
|
145 |
+
file_result = check_file(filename)
|
146 |
+
if file_result > 0:
|
147 |
+
kernels_without_checks += file_result
|
148 |
+
files_without_checks.append(filename)
|
149 |
+
|
150 |
+
if kernels_without_checks > 0:
|
151 |
+
count_str = f"Found {kernels_without_checks} instances in " \
|
152 |
+
f"{len(files_without_checks)} files where kernel " \
|
153 |
+
"launches didn't have checks."
|
154 |
+
print(count_str, file=sys.stderr)
|
155 |
+
print("Files without checks:", file=sys.stderr)
|
156 |
+
for x in files_without_checks:
|
157 |
+
print(f"\t{x}", file=sys.stderr)
|
158 |
+
print(count_str, file=sys.stderr)
|
159 |
+
|
160 |
+
return kernels_without_checks
|
161 |
+
|
162 |
+
|
163 |
+
if __name__ == "__main__":
|
164 |
+
unsafe_launches = check_cuda_kernel_launches()
|
165 |
+
sys.exit(0 if unsafe_launches == 0 else 1)
|
venv/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py
ADDED
@@ -0,0 +1,1322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
2 |
+
|
3 |
+
import faulthandler
|
4 |
+
import logging
|
5 |
+
import multiprocessing
|
6 |
+
import os
|
7 |
+
import queue
|
8 |
+
import subprocess
|
9 |
+
import sys
|
10 |
+
import tempfile
|
11 |
+
import threading
|
12 |
+
import time
|
13 |
+
import traceback
|
14 |
+
import types
|
15 |
+
import unittest
|
16 |
+
from contextlib import contextmanager
|
17 |
+
from dataclasses import dataclass
|
18 |
+
from datetime import timedelta
|
19 |
+
from enum import Enum
|
20 |
+
from functools import partial, reduce, wraps
|
21 |
+
from io import StringIO
|
22 |
+
from typing import Dict, NamedTuple, Optional, Union
|
23 |
+
from unittest.mock import patch
|
24 |
+
|
25 |
+
import torch
|
26 |
+
import torch._dynamo.test_case
|
27 |
+
import torch.cuda.nccl
|
28 |
+
import torch.distributed as c10d
|
29 |
+
import torch.nn as nn
|
30 |
+
from torch.testing._internal.common_utils import (
|
31 |
+
FILE_SCHEMA,
|
32 |
+
find_free_port,
|
33 |
+
IS_SANDCASTLE,
|
34 |
+
retry_on_connect_failures,
|
35 |
+
skip_but_pass_in_sandcastle,
|
36 |
+
skip_but_pass_in_sandcastle_if,
|
37 |
+
TEST_WITH_ROCM,
|
38 |
+
TEST_WITH_TSAN,
|
39 |
+
TestCase,
|
40 |
+
)
|
41 |
+
from torch.testing._internal.common_utils import (
|
42 |
+
parametrize,
|
43 |
+
subtest,
|
44 |
+
)
|
45 |
+
from torch.testing._internal.distributed.multi_threaded_pg import (
|
46 |
+
_install_threaded_pg,
|
47 |
+
_uninstall_threaded_pg,
|
48 |
+
ProcessLocalGroup,
|
49 |
+
)
|
50 |
+
import operator
|
51 |
+
|
52 |
+
logging.basicConfig(level=logging.INFO)
|
53 |
+
logger = logging.getLogger(__name__)
|
54 |
+
|
55 |
+
|
56 |
+
class TestSkip(NamedTuple):
|
57 |
+
exit_code: int
|
58 |
+
message: str
|
59 |
+
|
60 |
+
|
61 |
+
TEST_SKIPS = {
|
62 |
+
"backend_unavailable": TestSkip(
|
63 |
+
72, "Skipped because distributed backend is not available."
|
64 |
+
),
|
65 |
+
"small_worldsize": TestSkip(73, "Skipped due to small world size."),
|
66 |
+
"odd_worldsize": TestSkip(87, "Skipped due to odd world size."),
|
67 |
+
"no_cuda": TestSkip(74, "CUDA is not available."),
|
68 |
+
"multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"),
|
69 |
+
"multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"),
|
70 |
+
"multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"),
|
71 |
+
"multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"),
|
72 |
+
"multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"),
|
73 |
+
"multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"),
|
74 |
+
"multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"),
|
75 |
+
"multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"),
|
76 |
+
"nccl": TestSkip(76, "c10d not compiled with NCCL support"),
|
77 |
+
"skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
|
78 |
+
"no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
|
79 |
+
"generic": TestSkip(
|
80 |
+
86, "Test skipped at subprocess level, look at subprocess log for skip reason"
|
81 |
+
),
|
82 |
+
"importerror": TestSkip(88, "Test skipped due to missing import"),
|
83 |
+
}
|
84 |
+
|
85 |
+
|
86 |
+
@dataclass
|
87 |
+
class DistTestCases:
|
88 |
+
# Backends that do not support a specific collective
|
89 |
+
skip_collective = {}
|
90 |
+
skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"}
|
91 |
+
skip_collective["reduce"] = set()
|
92 |
+
skip_collective["sendrecv anysource"] = {"nccl", "ucc"}
|
93 |
+
skip_collective["cpu barrier"] = {"nccl", "ucc"}
|
94 |
+
|
95 |
+
# Sets showing that something is implemented
|
96 |
+
backend_feature = {}
|
97 |
+
backend_feature["gpu"] = {"nccl", "gloo", "ucc"}
|
98 |
+
backend_feature["cuda"] = {"nccl", "gloo", "ucc"}
|
99 |
+
backend_feature["ddp"] = {"nccl", "gloo", "ucc"}
|
100 |
+
backend_feature["subgroup"] = {"nccl", "gloo", "ucc"}
|
101 |
+
backend_feature["plugin"] = set()
|
102 |
+
|
103 |
+
|
104 |
+
def skip_if_no_gpu(func):
|
105 |
+
"""Skips if the world size exceeds the number of GPUs, ensuring that if the
|
106 |
+
test is run, each rank has its own GPU via ``torch.cuda.device(rank)``."""
|
107 |
+
|
108 |
+
@wraps(func)
|
109 |
+
def wrapper(*args, **kwargs):
|
110 |
+
if not torch.cuda.is_available():
|
111 |
+
sys.exit(TEST_SKIPS["no_cuda"].exit_code)
|
112 |
+
world_size = int(os.environ["WORLD_SIZE"])
|
113 |
+
if torch.cuda.device_count() < world_size:
|
114 |
+
sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code)
|
115 |
+
|
116 |
+
return func(*args, **kwargs)
|
117 |
+
|
118 |
+
return wrapper
|
119 |
+
|
120 |
+
|
121 |
+
def skip_if_small_worldsize(func):
|
122 |
+
@wraps(func)
|
123 |
+
def wrapper(*args, **kwargs):
|
124 |
+
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
|
125 |
+
sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
|
126 |
+
|
127 |
+
return func(*args, **kwargs)
|
128 |
+
|
129 |
+
return wrapper
|
130 |
+
|
131 |
+
|
132 |
+
def skip_if_odd_worldsize(func):
|
133 |
+
@wraps(func)
|
134 |
+
def wrapper(*args, **kwargs):
|
135 |
+
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1:
|
136 |
+
sys.exit(TEST_SKIPS["odd_worldsize"].exit_code)
|
137 |
+
|
138 |
+
return func(*args, **kwargs)
|
139 |
+
|
140 |
+
return wrapper
|
141 |
+
|
142 |
+
|
143 |
+
def require_n_gpus_for_nccl_backend(n, backend):
|
144 |
+
def decorator(func):
|
145 |
+
@wraps(func)
|
146 |
+
def wrapper(*args, **kwargs):
|
147 |
+
if backend == "nccl" and torch.cuda.device_count() < n:
|
148 |
+
sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code)
|
149 |
+
else:
|
150 |
+
return func(*args, **kwargs)
|
151 |
+
|
152 |
+
return wrapper
|
153 |
+
|
154 |
+
return decorator
|
155 |
+
|
156 |
+
|
157 |
+
def import_transformers_or_skip():
|
158 |
+
def decorator(func):
|
159 |
+
@wraps(func)
|
160 |
+
def wrapper(*args, **kwargs):
|
161 |
+
try:
|
162 |
+
from transformers import ( # noqa: F401
|
163 |
+
AutoModelForMaskedLM,
|
164 |
+
BertConfig,
|
165 |
+
)
|
166 |
+
|
167 |
+
return func(*args, **kwargs)
|
168 |
+
except ImportError:
|
169 |
+
sys.exit(TEST_SKIPS["importerror"].exit_code)
|
170 |
+
|
171 |
+
return wrapper
|
172 |
+
|
173 |
+
return decorator
|
174 |
+
|
175 |
+
|
176 |
+
def skip_if_lt_x_gpu(x):
|
177 |
+
def decorator(func):
|
178 |
+
@wraps(func)
|
179 |
+
def wrapper(*args, **kwargs):
|
180 |
+
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
|
181 |
+
return func(*args, **kwargs)
|
182 |
+
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
|
183 |
+
|
184 |
+
return wrapper
|
185 |
+
|
186 |
+
return decorator
|
187 |
+
|
188 |
+
|
189 |
+
# This decorator helps avoiding initializing cuda while testing other backends
|
190 |
+
def nccl_skip_if_lt_x_gpu(backend, x):
|
191 |
+
def decorator(func):
|
192 |
+
@wraps(func)
|
193 |
+
def wrapper(*args, **kwargs):
|
194 |
+
if backend != "nccl":
|
195 |
+
return func(*args, **kwargs)
|
196 |
+
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
|
197 |
+
return func(*args, **kwargs)
|
198 |
+
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
|
199 |
+
|
200 |
+
return wrapper
|
201 |
+
|
202 |
+
return decorator
|
203 |
+
|
204 |
+
|
205 |
+
def verify_ddp_error_logged(model_DDP, err_substr):
|
206 |
+
# Verify error was logged in ddp_logging_data.
|
207 |
+
ddp_logging_data = model_DDP._get_ddp_logging_data()
|
208 |
+
assert "iteration" in ddp_logging_data
|
209 |
+
assert "has_error" in ddp_logging_data
|
210 |
+
assert "error" in ddp_logging_data
|
211 |
+
logging_err = ddp_logging_data["error"]
|
212 |
+
# Remove C++ stacktrace if needed.
|
213 |
+
actual = (
|
214 |
+
err_substr
|
215 |
+
if err_substr.find("\nException raised from ") == -1
|
216 |
+
else err_substr.split("\nException raised from ")[0]
|
217 |
+
)
|
218 |
+
assert (
|
219 |
+
actual in logging_err
|
220 |
+
), f"Did not find expected {actual} in ddp logging data error: {logging_err}"
|
221 |
+
|
222 |
+
|
223 |
+
def with_nccl_blocking_wait(func):
|
224 |
+
"""
|
225 |
+
Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of
|
226 |
+
this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for
|
227 |
+
the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and
|
228 |
+
TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
|
229 |
+
"""
|
230 |
+
|
231 |
+
@wraps(func)
|
232 |
+
def wrapper(*args, **kwargs):
|
233 |
+
# Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING
|
234 |
+
try:
|
235 |
+
cached_nccl_async_error_handling: Union[str, None] = os.environ[
|
236 |
+
"TORCH_NCCL_ASYNC_ERROR_HANDLING"
|
237 |
+
]
|
238 |
+
del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"]
|
239 |
+
except KeyError:
|
240 |
+
# TORCH_NCCL_ASYNC_ERROR_HANDLING was unset
|
241 |
+
cached_nccl_async_error_handling = None
|
242 |
+
|
243 |
+
# Save val of TORCH_NCCL_BLOCKING_WAIT and set it.
|
244 |
+
try:
|
245 |
+
cached_nccl_blocking_wait: Union[str, None] = os.environ[
|
246 |
+
"TORCH_NCCL_BLOCKING_WAIT"
|
247 |
+
]
|
248 |
+
except KeyError:
|
249 |
+
cached_nccl_blocking_wait = None
|
250 |
+
finally:
|
251 |
+
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
|
252 |
+
|
253 |
+
try:
|
254 |
+
ret = func(*args, **kwargs)
|
255 |
+
return ret
|
256 |
+
finally:
|
257 |
+
# restore old values.
|
258 |
+
if cached_nccl_async_error_handling is not None:
|
259 |
+
os.environ[
|
260 |
+
"TORCH_NCCL_ASYNC_ERROR_HANDLING"
|
261 |
+
] = cached_nccl_async_error_handling
|
262 |
+
|
263 |
+
if cached_nccl_blocking_wait is not None:
|
264 |
+
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
|
265 |
+
|
266 |
+
return wrapper
|
267 |
+
|
268 |
+
|
269 |
+
def with_dist_debug_levels(levels):
|
270 |
+
"""
|
271 |
+
Runs a test for each distributed debug level specified in levels.
|
272 |
+
"""
|
273 |
+
|
274 |
+
def decorator(func):
|
275 |
+
@wraps(func)
|
276 |
+
def wrapper(*args, **kwargs):
|
277 |
+
old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
|
278 |
+
for level in levels:
|
279 |
+
os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
|
280 |
+
c10d.set_debug_level_from_env()
|
281 |
+
ret = func(*args, **kwargs)
|
282 |
+
c10d.barrier()
|
283 |
+
if old_level is not None:
|
284 |
+
os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
|
285 |
+
# Only returns test return for last test, but since these are
|
286 |
+
# unittests the return value is not really used and earlier tests
|
287 |
+
# would've raised had they failed.
|
288 |
+
return ret
|
289 |
+
|
290 |
+
return wrapper
|
291 |
+
|
292 |
+
return decorator
|
293 |
+
|
294 |
+
|
295 |
+
def requires_gloo():
|
296 |
+
return skip_but_pass_in_sandcastle_if(
|
297 |
+
not c10d.is_gloo_available(),
|
298 |
+
"c10d was not compiled with the Gloo backend",
|
299 |
+
)
|
300 |
+
|
301 |
+
|
302 |
+
def requires_nccl_version(version, msg):
|
303 |
+
if not c10d.is_nccl_available():
|
304 |
+
return skip_but_pass_in_sandcastle(
|
305 |
+
"c10d was not compiled with the NCCL backend",
|
306 |
+
)
|
307 |
+
else:
|
308 |
+
return skip_but_pass_in_sandcastle_if(
|
309 |
+
torch.cuda.nccl.version() < version,
|
310 |
+
"Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format(
|
311 |
+
version, torch.cuda.nccl.version(), msg
|
312 |
+
),
|
313 |
+
)
|
314 |
+
|
315 |
+
|
316 |
+
def requires_nccl():
|
317 |
+
return skip_but_pass_in_sandcastle_if(
|
318 |
+
not c10d.is_nccl_available(),
|
319 |
+
"c10d was not compiled with the NCCL backend",
|
320 |
+
)
|
321 |
+
|
322 |
+
def requires_ucc():
|
323 |
+
return skip_but_pass_in_sandcastle_if(
|
324 |
+
not c10d.is_ucc_available(),
|
325 |
+
"c10d was not compiled with the UCC backend",
|
326 |
+
)
|
327 |
+
|
328 |
+
def requires_mpi():
|
329 |
+
return skip_but_pass_in_sandcastle_if(
|
330 |
+
not c10d.is_mpi_available(),
|
331 |
+
"c10d was not compiled with the MPI backend",
|
332 |
+
)
|
333 |
+
|
334 |
+
|
335 |
+
def skip_if_rocm(func):
|
336 |
+
"""Skips a test for ROCm"""
|
337 |
+
func.skip_if_rocm = True
|
338 |
+
|
339 |
+
@wraps(func)
|
340 |
+
def wrapper(*args, **kwargs):
|
341 |
+
if not TEST_WITH_ROCM:
|
342 |
+
return func(*args, **kwargs)
|
343 |
+
sys.exit(TEST_SKIPS["skipIfRocm"].exit_code)
|
344 |
+
|
345 |
+
return wrapper
|
346 |
+
|
347 |
+
|
348 |
+
def skip_if_win32():
|
349 |
+
return skip_but_pass_in_sandcastle_if(
|
350 |
+
sys.platform == "win32",
|
351 |
+
"This unit test case is not supported on Windows platform",
|
352 |
+
)
|
353 |
+
|
354 |
+
|
355 |
+
@retry_on_connect_failures
|
356 |
+
def create_tcp_store(
|
357 |
+
addr="localhost",
|
358 |
+
world_size=1,
|
359 |
+
is_master=True,
|
360 |
+
timeout=timedelta(minutes=5),
|
361 |
+
wait_for_workers=True,
|
362 |
+
jit_class=False,
|
363 |
+
use_libuv=False
|
364 |
+
):
|
365 |
+
"""
|
366 |
+
Creates a TCP store. Retries if the chosen port is already in use.
|
367 |
+
"""
|
368 |
+
port = find_free_port()
|
369 |
+
if jit_class:
|
370 |
+
timeout_millisecond = int(timeout / timedelta(milliseconds=1))
|
371 |
+
return torch.classes.dist_c10d.TCPStore(
|
372 |
+
addr, port, world_size, is_master, timeout_millisecond
|
373 |
+
)
|
374 |
+
else:
|
375 |
+
return c10d.TCPStore(
|
376 |
+
addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv
|
377 |
+
)
|
378 |
+
|
379 |
+
|
380 |
+
if TEST_WITH_TSAN:
|
381 |
+
# TSAN runs much slower.
|
382 |
+
TIMEOUT_DEFAULT = 500
|
383 |
+
else:
|
384 |
+
TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300'))
|
385 |
+
TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
|
386 |
+
|
387 |
+
|
388 |
+
# https://github.com/pytorch/pytorch/issues/75665
|
389 |
+
if TEST_WITH_ROCM:
|
390 |
+
TIMEOUT_OVERRIDE["test_join_kwargs"] = 200
|
391 |
+
|
392 |
+
|
393 |
+
def create_device(interface=None):
|
394 |
+
if sys.platform == "win32" or interface is None:
|
395 |
+
return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
|
396 |
+
else:
|
397 |
+
return c10d.ProcessGroupGloo.create_device(interface=interface)
|
398 |
+
|
399 |
+
|
400 |
+
def get_timeout(test_id) -> int:
|
401 |
+
return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT)
|
402 |
+
|
403 |
+
|
404 |
+
@contextmanager
|
405 |
+
def captured_output():
|
406 |
+
new_out, new_err = StringIO(), StringIO()
|
407 |
+
old_out, old_err = sys.stdout, sys.stderr
|
408 |
+
try:
|
409 |
+
sys.stdout, sys.stderr = new_out, new_err
|
410 |
+
yield sys.stdout, sys.stderr
|
411 |
+
finally:
|
412 |
+
sys.stdout, sys.stderr = old_out, old_err
|
413 |
+
|
414 |
+
|
415 |
+
def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
|
416 |
+
"""
|
417 |
+
Generate a number of basic test cases for sparse reduction.
|
418 |
+
These cover tensors with a varying number of sparse dimensions and a varying
|
419 |
+
number of dense dimensions. The only reduction operation we support is sum.
|
420 |
+
"""
|
421 |
+
|
422 |
+
def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
|
423 |
+
# First sparse dimension is [0..rank].
|
424 |
+
# Subsequent dimensions are always 0, so we know there is
|
425 |
+
# a non-empty intersection between any two sparse tensors.
|
426 |
+
indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
|
427 |
+
shape = [world_size] + [2 for _ in range(dense_dims)]
|
428 |
+
for _ in range(sparse_dims - 1):
|
429 |
+
indices = torch.cat((indices, torch.zeros(1, rank + 1)))
|
430 |
+
shape.append(world_size)
|
431 |
+
values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
|
432 |
+
return torch.sparse_coo_tensor(indices, values, shape)
|
433 |
+
|
434 |
+
def compute_sum(fn, world_size: int):
|
435 |
+
return reduce(
|
436 |
+
operator.add, [fn(rank, world_size) for rank in range(world_size)]
|
437 |
+
)
|
438 |
+
|
439 |
+
return [
|
440 |
+
(
|
441 |
+
[
|
442 |
+
fn(num_inputs * rank + i, num_inputs * world_size)
|
443 |
+
for i in range(num_inputs)
|
444 |
+
],
|
445 |
+
[compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)],
|
446 |
+
)
|
447 |
+
for fn in [
|
448 |
+
partial(generate, sparse_dims=1),
|
449 |
+
partial(generate, sparse_dims=2),
|
450 |
+
partial(generate, sparse_dims=3),
|
451 |
+
partial(generate, dense_dims=1),
|
452 |
+
partial(generate, dense_dims=2),
|
453 |
+
partial(generate, dense_dims=3),
|
454 |
+
]
|
455 |
+
]
|
456 |
+
|
457 |
+
|
458 |
+
# HELPER FOR MULTIGPU TESTS
|
459 |
+
def init_multigpu_helper(world_size: int, backend: str):
|
460 |
+
"""Multigpu tests are designed to simulate the multi nodes with multi
|
461 |
+
GPUs on each node. Nccl backend requires equal #GPUs in each process.
|
462 |
+
On a single node, all visible GPUs are evenly
|
463 |
+
divided to subsets, each process only uses a subset.
|
464 |
+
"""
|
465 |
+
nGPUs = torch.cuda.device_count()
|
466 |
+
visible_devices = range(nGPUs)
|
467 |
+
|
468 |
+
# If rank is less than or equal to number of available GPU's
|
469 |
+
# then each rank can be mapped to corresponding GPU.
|
470 |
+
nGPUs_per_process = 1
|
471 |
+
if world_size > nGPUs:
|
472 |
+
nGPUs_per_process = nGPUs // world_size
|
473 |
+
rank_to_GPU = {
|
474 |
+
i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process])
|
475 |
+
for i in range(world_size)
|
476 |
+
}
|
477 |
+
return rank_to_GPU
|
478 |
+
|
479 |
+
|
480 |
+
tmp_dir: Optional[tempfile.TemporaryDirectory] = None
|
481 |
+
|
482 |
+
|
483 |
+
def initialize_temp_directories(init_method: Optional[str] = None) -> None:
|
484 |
+
global tmp_dir
|
485 |
+
tmp_dir = tempfile.TemporaryDirectory()
|
486 |
+
os.environ["TEMP_DIR"] = tmp_dir.name
|
487 |
+
os.mkdir(os.path.join(tmp_dir.name, "barrier"))
|
488 |
+
os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
|
489 |
+
init_dir_path = os.path.join(tmp_dir.name, "init_dir")
|
490 |
+
os.mkdir(init_dir_path)
|
491 |
+
# Set init method if specified.
|
492 |
+
if init_method is not None:
|
493 |
+
os.environ["INIT_METHOD"] = init_method
|
494 |
+
else:
|
495 |
+
os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
|
496 |
+
init_dir_path, "shared_init_file"
|
497 |
+
)
|
498 |
+
|
499 |
+
|
500 |
+
def cleanup_temp_dir() -> None:
|
501 |
+
if tmp_dir is not None:
|
502 |
+
tmp_dir.cleanup()
|
503 |
+
|
504 |
+
|
505 |
+
# Most tests operate with this worldsize
|
506 |
+
DEFAULT_WORLD_SIZE = 4
|
507 |
+
|
508 |
+
# [How does MultiProcessTestCase work?]
|
509 |
+
# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
|
510 |
+
# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
|
511 |
+
# example which inherits from this class. Its `Setup()` methods calls into
|
512 |
+
# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
|
513 |
+
# subprocesses. During the spawn, the main process passes the test name to
|
514 |
+
# subprocesses, and the name is acquired from self.id(). The subprocesses
|
515 |
+
# then use the provided test function name to retrieve the function attribute
|
516 |
+
# from the test instance and run it. The main process simply waits for all
|
517 |
+
# subprocesses to join.
|
518 |
+
|
519 |
+
|
520 |
+
class MultiProcessTestCase(TestCase):
|
521 |
+
MAIN_PROCESS_RANK = -1
|
522 |
+
# This exit code is used to indicate that the test code had an error and
|
523 |
+
# exited abnormally. There are certain tests that might use sys.exit() to
|
524 |
+
# simulate failures and in those cases, we can't have an exit code of 0,
|
525 |
+
# but we still want to ensure we didn't run into any other errors.
|
526 |
+
TEST_ERROR_EXIT_CODE = 10
|
527 |
+
|
528 |
+
# do not early terminate for distributed tests.
|
529 |
+
def _should_stop_test_suite(self) -> bool:
|
530 |
+
return False
|
531 |
+
|
532 |
+
@property
|
533 |
+
def world_size(self) -> int:
|
534 |
+
return DEFAULT_WORLD_SIZE
|
535 |
+
|
536 |
+
def join_or_run(self, fn):
|
537 |
+
@wraps(fn)
|
538 |
+
def wrapper(self):
|
539 |
+
if self.rank == self.MAIN_PROCESS_RANK:
|
540 |
+
self._join_processes(fn)
|
541 |
+
else:
|
542 |
+
fn()
|
543 |
+
|
544 |
+
return types.MethodType(wrapper, self)
|
545 |
+
|
546 |
+
# The main process spawns N subprocesses that run the test.
|
547 |
+
# Constructor patches current instance test method to
|
548 |
+
# assume the role of the main process and join its subprocesses,
|
549 |
+
# or run the underlying test function.
|
550 |
+
def __init__(self, method_name: str = "runTest") -> None:
|
551 |
+
super().__init__(method_name)
|
552 |
+
fn = getattr(self, method_name)
|
553 |
+
setattr(self, method_name, self.join_or_run(fn))
|
554 |
+
|
555 |
+
def setUp(self) -> None:
|
556 |
+
super().setUp()
|
557 |
+
self.skip_return_code_checks = [] # type: ignore[var-annotated]
|
558 |
+
self.processes = [] # type: ignore[var-annotated]
|
559 |
+
self.rank = self.MAIN_PROCESS_RANK
|
560 |
+
self.file_name = tempfile.NamedTemporaryFile(delete=False).name
|
561 |
+
# pid to pipe consisting of error message from process.
|
562 |
+
self.pid_to_pipe = {} # type: ignore[var-annotated]
|
563 |
+
|
564 |
+
def tearDown(self) -> None:
|
565 |
+
super().tearDown()
|
566 |
+
for p in self.processes:
|
567 |
+
p.terminate()
|
568 |
+
# Each Process instance holds a few open file descriptors. The unittest
|
569 |
+
# runner creates a new TestCase instance for each test method and keeps
|
570 |
+
# it alive until the end of the entire suite. We must thus reset the
|
571 |
+
# processes to prevent an effective file descriptor leak.
|
572 |
+
self.processes = []
|
573 |
+
|
574 |
+
def _current_test_name(self) -> str:
|
575 |
+
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
|
576 |
+
return self.id().split(".")[-1]
|
577 |
+
|
578 |
+
def _start_processes(self, proc) -> None:
|
579 |
+
self.processes = []
|
580 |
+
for rank in range(int(self.world_size)):
|
581 |
+
parent_conn, child_conn = torch.multiprocessing.Pipe()
|
582 |
+
process = proc(
|
583 |
+
target=self.__class__._run,
|
584 |
+
name="process " + str(rank),
|
585 |
+
args=(rank, self._current_test_name(), self.file_name, child_conn),
|
586 |
+
)
|
587 |
+
process.start()
|
588 |
+
logger.info("Started process %s with pid %s", rank, process.pid)
|
589 |
+
self.pid_to_pipe[process.pid] = parent_conn
|
590 |
+
self.processes.append(process)
|
591 |
+
|
592 |
+
def _spawn_processes(self) -> None:
|
593 |
+
proc = torch.multiprocessing.get_context("spawn").Process
|
594 |
+
self._start_processes(proc)
|
595 |
+
|
596 |
+
class Event(Enum):
|
597 |
+
GET_TRACEBACK = 1
|
598 |
+
|
599 |
+
@staticmethod
|
600 |
+
def _event_listener(parent_pipe, signal_pipe, rank: int):
|
601 |
+
logger.info("Starting event listener thread for rank %s", rank)
|
602 |
+
while True:
|
603 |
+
ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
|
604 |
+
|
605 |
+
if parent_pipe in ready_pipes:
|
606 |
+
|
607 |
+
if parent_pipe.closed:
|
608 |
+
logger.info(
|
609 |
+
"Pipe closed for process %s, stopping event listener thread", rank
|
610 |
+
)
|
611 |
+
return
|
612 |
+
|
613 |
+
event = parent_pipe.recv()
|
614 |
+
logger.info("Received event %s on process %s", event, rank)
|
615 |
+
|
616 |
+
if event == MultiProcessTestCase.Event.GET_TRACEBACK:
|
617 |
+
# Return traceback to the parent process.
|
618 |
+
with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
|
619 |
+
faulthandler.dump_traceback(tmp_file)
|
620 |
+
# Flush buffers and seek to read from the beginning
|
621 |
+
tmp_file.flush()
|
622 |
+
tmp_file.seek(0)
|
623 |
+
parent_pipe.send(tmp_file.read())
|
624 |
+
|
625 |
+
logger.info("Process %s sent traceback", rank)
|
626 |
+
|
627 |
+
if signal_pipe in ready_pipes:
|
628 |
+
return
|
629 |
+
|
630 |
+
@classmethod
|
631 |
+
def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
|
632 |
+
self = cls(test_name)
|
633 |
+
self.rank = rank
|
634 |
+
self.file_name = file_name
|
635 |
+
self.run_test(test_name, parent_pipe)
|
636 |
+
|
637 |
+
def run_test(self, test_name: str, parent_pipe) -> None:
|
638 |
+
# Start event listener thread.
|
639 |
+
signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
|
640 |
+
event_listener_thread = threading.Thread(
|
641 |
+
target=MultiProcessTestCase._event_listener,
|
642 |
+
args=(parent_pipe, signal_recv_pipe, self.rank),
|
643 |
+
daemon=True,
|
644 |
+
)
|
645 |
+
event_listener_thread.start()
|
646 |
+
if sys.platform != "win32" and sys.platform != "darwin":
|
647 |
+
# Register signal handler to dump stack traces on FATALs.
|
648 |
+
# Windows and MacOS do not support the signal handlers.
|
649 |
+
torch._C._set_print_stack_traces_on_fatal_signal(True)
|
650 |
+
# Show full C++ stacktraces when a Python error originating from C++ is raised.
|
651 |
+
os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
|
652 |
+
|
653 |
+
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
|
654 |
+
# We're retrieving a corresponding test and executing it.
|
655 |
+
try:
|
656 |
+
getattr(self, test_name)()
|
657 |
+
except unittest.SkipTest as se:
|
658 |
+
logger.info(
|
659 |
+
"Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se)
|
660 |
+
)
|
661 |
+
sys.exit(TEST_SKIPS["generic"].exit_code)
|
662 |
+
except Exception as e:
|
663 |
+
logger.error(
|
664 |
+
"Caught exception: \n%s exiting "
|
665 |
+
"process %s with exit code: %s",
|
666 |
+
traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE
|
667 |
+
)
|
668 |
+
# Send error to parent process.
|
669 |
+
parent_pipe.send(traceback.format_exc())
|
670 |
+
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
|
671 |
+
finally:
|
672 |
+
if signal_send_pipe is not None:
|
673 |
+
signal_send_pipe.send(None)
|
674 |
+
|
675 |
+
assert event_listener_thread is not None
|
676 |
+
event_listener_thread.join()
|
677 |
+
# Close pipe after done with test.
|
678 |
+
parent_pipe.close()
|
679 |
+
|
680 |
+
def _get_timedout_process_traceback(self) -> None:
|
681 |
+
pipes = []
|
682 |
+
for i, process in enumerate(self.processes):
|
683 |
+
if process.exitcode is None:
|
684 |
+
pipe = self.pid_to_pipe[process.pid]
|
685 |
+
try:
|
686 |
+
pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
|
687 |
+
pipes.append((i, pipe))
|
688 |
+
except ConnectionError as e:
|
689 |
+
logger.error(
|
690 |
+
"Encountered error while trying to get traceback for process %s: %s", i, e
|
691 |
+
)
|
692 |
+
|
693 |
+
# Wait for results.
|
694 |
+
for rank, pipe in pipes:
|
695 |
+
try:
|
696 |
+
# Wait for traceback
|
697 |
+
if pipe.poll(5):
|
698 |
+
if pipe.closed:
|
699 |
+
logger.info(
|
700 |
+
"Pipe closed for process %s, cannot retrieve traceback", rank
|
701 |
+
)
|
702 |
+
continue
|
703 |
+
|
704 |
+
traceback = pipe.recv()
|
705 |
+
logger.error(
|
706 |
+
"Process %s timed out with traceback: \n\n%s", rank, traceback
|
707 |
+
)
|
708 |
+
else:
|
709 |
+
logger.error(
|
710 |
+
"Could not retrieve traceback for timed out process: %s", rank
|
711 |
+
)
|
712 |
+
except ConnectionError as e:
|
713 |
+
logger.error(
|
714 |
+
"Encountered error while trying to get traceback for process %s: %s", rank, e
|
715 |
+
)
|
716 |
+
|
717 |
+
def _join_processes(self, fn) -> None:
|
718 |
+
timeout = get_timeout(self.id())
|
719 |
+
start_time = time.time()
|
720 |
+
subprocess_error = False
|
721 |
+
try:
|
722 |
+
while True:
|
723 |
+
# check to see if any subprocess exited with an error early.
|
724 |
+
for (i, p) in enumerate(self.processes):
|
725 |
+
# This is the exit code processes exit with if they
|
726 |
+
# encountered an exception.
|
727 |
+
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
|
728 |
+
print(
|
729 |
+
f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
|
730 |
+
)
|
731 |
+
active_children = torch.multiprocessing.active_children()
|
732 |
+
for ac in active_children:
|
733 |
+
ac.terminate()
|
734 |
+
subprocess_error = True
|
735 |
+
break
|
736 |
+
if subprocess_error:
|
737 |
+
break
|
738 |
+
# All processes have joined cleanly if they all a valid exitcode
|
739 |
+
if all(p.exitcode is not None for p in self.processes):
|
740 |
+
break
|
741 |
+
# Check if we should time out the test. If so, we terminate each process.
|
742 |
+
elapsed = time.time() - start_time
|
743 |
+
if elapsed > timeout:
|
744 |
+
self._get_timedout_process_traceback()
|
745 |
+
print(
|
746 |
+
f"Timing out after {timeout} seconds and killing subprocesses."
|
747 |
+
)
|
748 |
+
for p in self.processes:
|
749 |
+
p.terminate()
|
750 |
+
break
|
751 |
+
# Sleep to avoid excessive busy polling.
|
752 |
+
time.sleep(0.1)
|
753 |
+
|
754 |
+
elapsed_time = time.time() - start_time
|
755 |
+
|
756 |
+
if fn in self.skip_return_code_checks:
|
757 |
+
self._check_no_test_errors(elapsed_time)
|
758 |
+
else:
|
759 |
+
self._check_return_codes(elapsed_time)
|
760 |
+
finally:
|
761 |
+
# Close all pipes
|
762 |
+
for pipe in self.pid_to_pipe.values():
|
763 |
+
pipe.close()
|
764 |
+
|
765 |
+
def _check_no_test_errors(self, elapsed_time) -> None:
|
766 |
+
"""
|
767 |
+
Checks that we didn't have any errors thrown in the child processes.
|
768 |
+
"""
|
769 |
+
for i, p in enumerate(self.processes):
|
770 |
+
if p.exitcode is None:
|
771 |
+
raise RuntimeError(
|
772 |
+
f"Process {i} timed out after {elapsed_time} seconds"
|
773 |
+
)
|
774 |
+
self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
|
775 |
+
|
776 |
+
def _check_return_codes(self, elapsed_time) -> None:
|
777 |
+
"""
|
778 |
+
Checks that the return codes of all spawned processes match, and skips
|
779 |
+
tests if they returned a return code indicating a skipping condition.
|
780 |
+
"""
|
781 |
+
# If no processes are spawned, there is nothing to check.
|
782 |
+
if not self.processes:
|
783 |
+
logger.warning("Note: no subprocesses were spawned, test was likely skipped.")
|
784 |
+
return
|
785 |
+
|
786 |
+
first_process = self.processes[0]
|
787 |
+
# first, we check if there are errors in actual processes
|
788 |
+
# (via TEST_ERROR_EXIT CODE), and raise an exception for those.
|
789 |
+
# the reason we do this is to attempt to raise a more helpful error
|
790 |
+
# message than "Process x terminated/timed out"
|
791 |
+
# TODO: we should pipe the exception of the failed subprocess here.
|
792 |
+
# Currently, the actual exception is displayed as a logging output.
|
793 |
+
errored_processes = [
|
794 |
+
(i, p)
|
795 |
+
for i, p in enumerate(self.processes)
|
796 |
+
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
|
797 |
+
]
|
798 |
+
if errored_processes:
|
799 |
+
error = ""
|
800 |
+
for i, process in errored_processes:
|
801 |
+
# Get error from pipe.
|
802 |
+
error_message = self.pid_to_pipe[process.pid].recv()
|
803 |
+
error += (
|
804 |
+
"Process {} exited with error code {} and exception:\n{}\n".format(
|
805 |
+
i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message
|
806 |
+
)
|
807 |
+
)
|
808 |
+
|
809 |
+
raise RuntimeError(error)
|
810 |
+
# If no process exited uncleanly, we check for timeouts, and then ensure
|
811 |
+
# each process exited cleanly.
|
812 |
+
for i, p in enumerate(self.processes):
|
813 |
+
if p.exitcode is None:
|
814 |
+
raise RuntimeError(
|
815 |
+
f"Process {i} terminated or timed out after {elapsed_time} seconds"
|
816 |
+
)
|
817 |
+
self.assertEqual(
|
818 |
+
p.exitcode,
|
819 |
+
first_process.exitcode,
|
820 |
+
msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format(
|
821 |
+
i, first_process.exitcode, p.exitcode
|
822 |
+
),
|
823 |
+
)
|
824 |
+
for skip in TEST_SKIPS.values():
|
825 |
+
if first_process.exitcode == skip.exit_code:
|
826 |
+
if IS_SANDCASTLE:
|
827 |
+
# Don't use unittest.skip to skip the test on sandcastle
|
828 |
+
# since it creates tasks for skipped tests assuming there
|
829 |
+
# is some follow-up needed. Instead just "pass" the test
|
830 |
+
# with an appropriate message.
|
831 |
+
logger.info(
|
832 |
+
"Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message
|
833 |
+
)
|
834 |
+
return
|
835 |
+
else:
|
836 |
+
raise unittest.SkipTest(skip.message)
|
837 |
+
self.assertEqual(
|
838 |
+
first_process.exitcode,
|
839 |
+
0,
|
840 |
+
msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}",
|
841 |
+
)
|
842 |
+
|
843 |
+
@property
|
844 |
+
def is_master(self) -> bool:
|
845 |
+
return self.rank == 0
|
846 |
+
|
847 |
+
|
848 |
+
# Cannot use functools.cache as it requires python 3.9
|
849 |
+
EFA_PROBE_RESULT = None
|
850 |
+
|
851 |
+
|
852 |
+
def has_efa() -> bool:
|
853 |
+
"""
|
854 |
+
If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has
|
855 |
+
Libfabric EFA interfaces and EFA software components installed,
|
856 |
+
see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html.
|
857 |
+
"""
|
858 |
+
global EFA_PROBE_RESULT
|
859 |
+
if EFA_PROBE_RESULT is not None:
|
860 |
+
return EFA_PROBE_RESULT
|
861 |
+
|
862 |
+
try:
|
863 |
+
EFA_PROBE_RESULT = (
|
864 |
+
subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0
|
865 |
+
)
|
866 |
+
except FileNotFoundError:
|
867 |
+
EFA_PROBE_RESULT = False
|
868 |
+
return EFA_PROBE_RESULT
|
869 |
+
|
870 |
+
|
871 |
+
def tp_transports():
|
872 |
+
"""
|
873 |
+
If the machine has Libfabric EFA interfaces and EFA software components installed it may cause
|
874 |
+
'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe
|
875 |
+
uses InfiniBand transport, so we exclude it from tensorpipe transports,
|
876 |
+
see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022
|
877 |
+
"""
|
878 |
+
return ["shm", "uv"] if has_efa() else None
|
879 |
+
|
880 |
+
|
881 |
+
def spawn_threads_and_init_comms(
|
882 |
+
func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE
|
883 |
+
):
|
884 |
+
"""
|
885 |
+
Wrapper to use with a test method
|
886 |
+
"""
|
887 |
+
if func is None:
|
888 |
+
return partial(
|
889 |
+
spawn_threads_and_init_comms, timeout=timeout, world_size=world_size
|
890 |
+
)
|
891 |
+
|
892 |
+
|
893 |
+
def _run_test_method_with_multi_threads(world_size, callback):
|
894 |
+
world = _install_threaded_pg()
|
895 |
+
global_store = c10d.HashStore()
|
896 |
+
|
897 |
+
def world_is_valid():
|
898 |
+
return world == c10d.distributed_c10d._world
|
899 |
+
|
900 |
+
def worker(rank, world_pg, store):
|
901 |
+
c10d.init_process_group(
|
902 |
+
backend="threaded", rank=rank, world_size=world_size, store=store
|
903 |
+
)
|
904 |
+
try:
|
905 |
+
callback()
|
906 |
+
except BaseException as ex:
|
907 |
+
# Exceptions are handled in MultiThreadedTestCase
|
908 |
+
MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info()))
|
909 |
+
ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
|
910 |
+
finally:
|
911 |
+
if world_is_valid():
|
912 |
+
c10d.destroy_process_group()
|
913 |
+
|
914 |
+
threads = []
|
915 |
+
for rank in range(world_size):
|
916 |
+
t = threading.Thread(target=worker, args=(rank, world, global_store))
|
917 |
+
t.start()
|
918 |
+
threads.append(t)
|
919 |
+
|
920 |
+
return threads
|
921 |
+
|
922 |
+
|
923 |
+
@wraps(func)
|
924 |
+
def wrapper(self, *args, **kwargs):
|
925 |
+
# TODO: get test name from kwargs
|
926 |
+
torch._C._distributed_c10d._set_thread_isolation_mode(True)
|
927 |
+
try:
|
928 |
+
threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs))
|
929 |
+
# join and error handling
|
930 |
+
MultiThreadedTestCase._join_threads(threads, func)
|
931 |
+
finally:
|
932 |
+
torch._C._distributed_c10d._set_thread_isolation_mode(False)
|
933 |
+
|
934 |
+
return wrapper
|
935 |
+
|
936 |
+
|
937 |
+
class MultiThreadedTestCase(TestCase):
|
938 |
+
"""
|
939 |
+
Test runner that runs all tests with the in-proc process group using
|
940 |
+
multiple threads with the threaded process group.
|
941 |
+
|
942 |
+
Each test spawns world_size threads and run the test method in each thread.
|
943 |
+
|
944 |
+
Difference from regular MultiProcess test runner:
|
945 |
+
Must explicitly defines SetUp and call self._spawn_threads() to run the tests.
|
946 |
+
Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown)
|
947 |
+
to set up / tear down each thread when running each test.
|
948 |
+
No global state possible
|
949 |
+
How bad of a limitation is this?
|
950 |
+
"""
|
951 |
+
exception_queue = queue.Queue()
|
952 |
+
|
953 |
+
MAIN_THREAD_RANK = -1
|
954 |
+
|
955 |
+
def join_or_run(self, fn):
|
956 |
+
@wraps(fn)
|
957 |
+
def wrapper(self):
|
958 |
+
if self.rank == self.MAIN_THREAD_RANK:
|
959 |
+
self._join_threads(self.threads, fn)
|
960 |
+
else:
|
961 |
+
fn()
|
962 |
+
|
963 |
+
return types.MethodType(wrapper, self)
|
964 |
+
|
965 |
+
def __init__(self, method_name: str = "runTest") -> None:
|
966 |
+
super().__init__(method_name)
|
967 |
+
test_fn = getattr(self, method_name, None)
|
968 |
+
setattr(self, method_name, self.join_or_run(test_fn))
|
969 |
+
|
970 |
+
def perThreadSetUp(self):
|
971 |
+
# super().setUp() # TestCase.setUp() calls torch.manual_seed()
|
972 |
+
pass
|
973 |
+
|
974 |
+
def perThreadTearDown(self):
|
975 |
+
pass
|
976 |
+
|
977 |
+
def setUp(self) -> None:
|
978 |
+
"""
|
979 |
+
setUp only set up things in the main thread, if you want to configure things
|
980 |
+
in the spawned threads, use perThreadSetUp
|
981 |
+
"""
|
982 |
+
super().setUp()
|
983 |
+
self.rank = self.MAIN_THREAD_RANK
|
984 |
+
self.threads = []
|
985 |
+
# Show full C++ stacktraces when a Python error originating from C++ is raised.
|
986 |
+
os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
|
987 |
+
|
988 |
+
def tearDown(self):
|
989 |
+
"""
|
990 |
+
tearDown only set up things in the main thread, if you want to configure things
|
991 |
+
in the spawned threads, use perThreadTearDown
|
992 |
+
"""
|
993 |
+
super().tearDown()
|
994 |
+
self.threads = []
|
995 |
+
|
996 |
+
def _spawn_threads(self):
|
997 |
+
"""
|
998 |
+
class method to spawn threads and run test, use this method in the SetUp of your TestCase
|
999 |
+
"""
|
1000 |
+
torch._C._distributed_c10d._set_thread_isolation_mode(True)
|
1001 |
+
test_name = self._current_test_name
|
1002 |
+
# for each test case, we need to create thread local world, and a global store
|
1003 |
+
world = _install_threaded_pg()
|
1004 |
+
self.__class__.global_store = c10d.HashStore()
|
1005 |
+
|
1006 |
+
def world_is_valid():
|
1007 |
+
return world == c10d.distributed_c10d._world
|
1008 |
+
|
1009 |
+
if not world_is_valid():
|
1010 |
+
raise RuntimeError("Invalid world")
|
1011 |
+
|
1012 |
+
for rank in range(self.world_size):
|
1013 |
+
t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size))
|
1014 |
+
t.start()
|
1015 |
+
self.threads.append(t)
|
1016 |
+
|
1017 |
+
@classmethod
|
1018 |
+
def _run(cls, test_name, rank, world_size):
|
1019 |
+
self = cls(test_name)
|
1020 |
+
self.rank = rank
|
1021 |
+
|
1022 |
+
# precision/rel_tol is a thread-local setting since it may be overridden per test, need to make
|
1023 |
+
# every thread have the same value. This would be relevant when we use op db tests, where it
|
1024 |
+
# needs those states to be set i.e. using instantiate_device_type_tests()
|
1025 |
+
# TODO: figure out a better way to do this
|
1026 |
+
if hasattr(self, "_tls"):
|
1027 |
+
self._tls = threading.local()
|
1028 |
+
self._tls.precision = TestCase._precision
|
1029 |
+
self._tls.rel_tol = TestCase._rel_tol
|
1030 |
+
|
1031 |
+
self.run_test_with_threaded_pg(test_name, rank, world_size)
|
1032 |
+
|
1033 |
+
def run_test_with_threaded_pg(self, test_name, rank, world_size):
|
1034 |
+
"""
|
1035 |
+
Run the current test associated with `test_name` using the threaded process group.
|
1036 |
+
"""
|
1037 |
+
c10d.init_process_group(
|
1038 |
+
backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store
|
1039 |
+
)
|
1040 |
+
self.perThreadSetUp()
|
1041 |
+
|
1042 |
+
try:
|
1043 |
+
getattr(self, test_name)()
|
1044 |
+
except BaseException as ex:
|
1045 |
+
self.exception_queue.put((rank, sys.exc_info()))
|
1046 |
+
ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
|
1047 |
+
finally:
|
1048 |
+
c10d.destroy_process_group()
|
1049 |
+
self.perThreadTearDown()
|
1050 |
+
|
1051 |
+
|
1052 |
+
@classmethod
|
1053 |
+
def _join_threads(cls, threads, fn):
|
1054 |
+
timeout = TIMEOUT_DEFAULT
|
1055 |
+
try:
|
1056 |
+
for idx, thread in enumerate(threads):
|
1057 |
+
thread.join(max(0, timeout))
|
1058 |
+
if thread.is_alive():
|
1059 |
+
MultiThreadedTestCase.exception_queue.put(
|
1060 |
+
(
|
1061 |
+
idx,
|
1062 |
+
(
|
1063 |
+
TimeoutError,
|
1064 |
+
TimeoutError(
|
1065 |
+
f"Rank failed to join in under {timeout} seconds"
|
1066 |
+
),
|
1067 |
+
None,
|
1068 |
+
),
|
1069 |
+
)
|
1070 |
+
)
|
1071 |
+
ProcessLocalGroup.reset()
|
1072 |
+
failed_ranks = []
|
1073 |
+
while not cls.exception_queue.empty():
|
1074 |
+
failure = cls.exception_queue.get()
|
1075 |
+
failed_ranks.append(failure)
|
1076 |
+
finally:
|
1077 |
+
_uninstall_threaded_pg()
|
1078 |
+
torch._C._distributed_c10d._set_thread_isolation_mode(False)
|
1079 |
+
|
1080 |
+
cls._check_return_codes(failed_ranks, timeout, fn)
|
1081 |
+
|
1082 |
+
@classmethod
|
1083 |
+
def _check_return_codes(cls, failed_ranks, timeout, fn):
|
1084 |
+
# Print based on exceptions raised from threads
|
1085 |
+
# SkipTest: print info for each thread
|
1086 |
+
# TimeoutError: raise RuntimeError for any timed out thread
|
1087 |
+
# Normal Exception: print error for each thread that raises exception
|
1088 |
+
# and raise a RuntimeError
|
1089 |
+
error_msg = ""
|
1090 |
+
skip_code = -1
|
1091 |
+
for rank, exc_info in failed_ranks:
|
1092 |
+
exc = exc_info[1]
|
1093 |
+
if isinstance(exc, unittest.SkipTest):
|
1094 |
+
logger.info(
|
1095 |
+
"Thread %s skipping test %s for following reason: %s", rank, fn, str(exc)
|
1096 |
+
)
|
1097 |
+
if skip_code < 0:
|
1098 |
+
skip_code = TEST_SKIPS["generic"].exit_code
|
1099 |
+
elif isinstance(exc, TimeoutError):
|
1100 |
+
msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n"
|
1101 |
+
logger.error(msg)
|
1102 |
+
raise RuntimeError(msg)
|
1103 |
+
elif isinstance(exc, Exception):
|
1104 |
+
msg = "".join(traceback.format_exception(*exc_info))
|
1105 |
+
logger.error(
|
1106 |
+
"Caught exception: \n%s exiting thread %s", msg, rank
|
1107 |
+
)
|
1108 |
+
error_msg += (
|
1109 |
+
f"Thread {rank} exited with exception:\n{msg}\n"
|
1110 |
+
)
|
1111 |
+
elif isinstance(exc, SystemExit):
|
1112 |
+
if type(exc.code) == int and skip_code < 0:
|
1113 |
+
skip_code = exc.code
|
1114 |
+
|
1115 |
+
# check exceptions
|
1116 |
+
if len(error_msg) > 0:
|
1117 |
+
raise RuntimeError(error_msg)
|
1118 |
+
# check skip
|
1119 |
+
if skip_code > 0:
|
1120 |
+
for skip in TEST_SKIPS.values():
|
1121 |
+
if skip_code == skip.exit_code:
|
1122 |
+
if IS_SANDCASTLE:
|
1123 |
+
# "pass" the test with an appropriate message.
|
1124 |
+
logger.info(
|
1125 |
+
"Skipping %s on sandcastle for the following reason: %s", fn, skip.message
|
1126 |
+
)
|
1127 |
+
return
|
1128 |
+
else:
|
1129 |
+
raise unittest.SkipTest(skip.message)
|
1130 |
+
|
1131 |
+
@property
|
1132 |
+
def world_size(self) -> int:
|
1133 |
+
return DEFAULT_WORLD_SIZE
|
1134 |
+
|
1135 |
+
@property
|
1136 |
+
def _current_test_name(self) -> str:
|
1137 |
+
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
|
1138 |
+
return self.id().split(".")[-1]
|
1139 |
+
|
1140 |
+
def assertEqualOnRank(self, x, y, msg=None, *, rank=0):
|
1141 |
+
"""
|
1142 |
+
The reason why we have this util function instead of
|
1143 |
+
self.assertEqual is all threads are sharing one CPU RNG
|
1144 |
+
so the assertion result is only reliable on rank 0
|
1145 |
+
"""
|
1146 |
+
if self.rank == rank:
|
1147 |
+
self.assertEqual(x, y, msg)
|
1148 |
+
|
1149 |
+
def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0):
|
1150 |
+
if self.rank == rank:
|
1151 |
+
self.assertNotEqual(x, y)
|
1152 |
+
|
1153 |
+
|
1154 |
+
class SaveForwardInputsModule(nn.Module):
|
1155 |
+
def __init__(
|
1156 |
+
self,
|
1157 |
+
forward_inputs: Dict[nn.Module, torch.Tensor],
|
1158 |
+
cast_forward_inputs: bool,
|
1159 |
+
) -> None:
|
1160 |
+
super().__init__()
|
1161 |
+
self.l = nn.Linear(100, 100)
|
1162 |
+
self.forward_inputs = forward_inputs
|
1163 |
+
self.cast_forward_inputs = cast_forward_inputs
|
1164 |
+
|
1165 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
1166 |
+
self.forward_inputs[self] = x
|
1167 |
+
return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x)
|
1168 |
+
|
1169 |
+
|
1170 |
+
class SaveForwardInputsModel(nn.Module):
|
1171 |
+
def __init__(
|
1172 |
+
self,
|
1173 |
+
forward_inputs: Dict[nn.Module, torch.Tensor],
|
1174 |
+
cast_forward_inputs: bool,
|
1175 |
+
) -> None:
|
1176 |
+
super().__init__()
|
1177 |
+
self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
|
1178 |
+
self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
|
1179 |
+
self.forward_inputs = forward_inputs
|
1180 |
+
|
1181 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
1182 |
+
self.forward_inputs[self] = x
|
1183 |
+
return self.c2(self.c1(x))
|
1184 |
+
|
1185 |
+
@contextmanager
|
1186 |
+
def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True):
|
1187 |
+
# To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase,
|
1188 |
+
# Just manually implement the most important part of the dynamo behavior to reset/clear.
|
1189 |
+
torch.cuda.set_device(rank)
|
1190 |
+
os.environ['MASTER_ADDR'] = 'localhost'
|
1191 |
+
os.environ['MASTER_PORT'] = '6789'
|
1192 |
+
if init_pg:
|
1193 |
+
c10d.init_process_group("nccl", rank=rank, world_size=world_size)
|
1194 |
+
torch._dynamo.reset()
|
1195 |
+
torch._dynamo.utils.counters.clear()
|
1196 |
+
try:
|
1197 |
+
yield
|
1198 |
+
finally:
|
1199 |
+
torch._dynamo.reset()
|
1200 |
+
torch._dynamo.utils.counters.clear()
|
1201 |
+
if init_pg:
|
1202 |
+
c10d.destroy_process_group()
|
1203 |
+
|
1204 |
+
|
1205 |
+
class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase):
|
1206 |
+
"""
|
1207 |
+
Test harness for single-process dynamo distributed tests,
|
1208 |
+
initializes dist process group.
|
1209 |
+
|
1210 |
+
Prefer this for simple tests, as it's easier to debug.
|
1211 |
+
"""
|
1212 |
+
|
1213 |
+
@classmethod
|
1214 |
+
def setUpClass(cls):
|
1215 |
+
super().setUpClass()
|
1216 |
+
# _exit_stack is set up in TestCase
|
1217 |
+
cls._exit_stack.enter_context(
|
1218 |
+
patch.dict(
|
1219 |
+
os.environ,
|
1220 |
+
{
|
1221 |
+
"MASTER_ADDR": "localhost",
|
1222 |
+
"MASTER_PORT": "12355",
|
1223 |
+
},
|
1224 |
+
)
|
1225 |
+
)
|
1226 |
+
cls.rank = 0
|
1227 |
+
cls.device = f"cuda:{cls.rank}"
|
1228 |
+
cls.device_ids = None if "cuda" in cls.device else [cls.rank]
|
1229 |
+
c10d.init_process_group("nccl", rank=cls.rank, world_size=1)
|
1230 |
+
|
1231 |
+
@classmethod
|
1232 |
+
def tearDownClass(cls):
|
1233 |
+
c10d.destroy_process_group()
|
1234 |
+
super().tearDownClass()
|
1235 |
+
|
1236 |
+
|
1237 |
+
class DynamoDistributedMultiProcTestCase(MultiProcessTestCase):
|
1238 |
+
"""
|
1239 |
+
Use this for tests that actually run on multiple GPUs.
|
1240 |
+
|
1241 |
+
Decorate tests with @skip_if_lt_x_gpu(ngpu)
|
1242 |
+
|
1243 |
+
Note: MultiProcTestCase spawns processes per test and is slow.
|
1244 |
+
Prefer MultiThreadedTestCase for most tests. Perhaps use this one
|
1245 |
+
sparingly for integration tests.
|
1246 |
+
"""
|
1247 |
+
def setUp(self):
|
1248 |
+
super().setUp()
|
1249 |
+
self._spawn_processes()
|
1250 |
+
|
1251 |
+
def tearDown(self):
|
1252 |
+
super().tearDown()
|
1253 |
+
try:
|
1254 |
+
os.remove(self.file_name)
|
1255 |
+
except OSError:
|
1256 |
+
pass
|
1257 |
+
|
1258 |
+
@property
|
1259 |
+
def world_size(self) -> int:
|
1260 |
+
return torch.cuda.device_count()
|
1261 |
+
|
1262 |
+
@classmethod
|
1263 |
+
def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
|
1264 |
+
# The rest is copypasta from MultiProcessTestCase._run
|
1265 |
+
self = cls(test_name)
|
1266 |
+
self.rank = rank
|
1267 |
+
self.file_name = file_name
|
1268 |
+
self.run_test(test_name, parent_pipe)
|
1269 |
+
|
1270 |
+
|
1271 |
+
# NOTE [test parametrization utils for native funcol migration]
|
1272 |
+
#
|
1273 |
+
# Between the time we switch to the native funcol by default and the time when
|
1274 |
+
# we are confident that we can remove the legacy implementation, we want to
|
1275 |
+
# ensure that the legacy funcol remains covered by unit tests. This is to
|
1276 |
+
# prepare for any potential (but unlikely) reverts. The following utilities
|
1277 |
+
# help achieve this goal.
|
1278 |
+
#
|
1279 |
+
# run_with_{native,legacy}_funcol - mark a test to run with only
|
1280 |
+
# {native,legacy} funcol. These decorators are for impl specific tests (e.g.
|
1281 |
+
# verifying generated code with FileCheck).
|
1282 |
+
#
|
1283 |
+
# run_with_both_funcol_impls - parametrize a test to run with both legacy and
|
1284 |
+
# native funcol.
|
1285 |
+
#
|
1286 |
+
# run_with_both_funcol_impls_with_arg - same as run_with_both_funcol_impls, but
|
1287 |
+
# passes `enable_native_funcol` to the test so impl specific checks can be
|
1288 |
+
# carried out.
|
1289 |
+
def with_native_funcol(use_native_funcol: bool, remove_arg: bool):
|
1290 |
+
import torch.distributed._functional_collectives_impl as funcol_impl
|
1291 |
+
|
1292 |
+
def decorator(fn):
|
1293 |
+
def inner(*args, **kwargs):
|
1294 |
+
if remove_arg:
|
1295 |
+
del kwargs["use_native_funcol"]
|
1296 |
+
with patch.object(funcol_impl, '_use_native_funcol', new=use_native_funcol):
|
1297 |
+
return fn(*args, **kwargs)
|
1298 |
+
|
1299 |
+
return inner
|
1300 |
+
|
1301 |
+
return decorator
|
1302 |
+
|
1303 |
+
|
1304 |
+
run_with_native_funcol = with_native_funcol(True, remove_arg=False)
|
1305 |
+
run_with_legacy_funcol = with_native_funcol(False, remove_arg=False)
|
1306 |
+
|
1307 |
+
|
1308 |
+
run_with_both_funcol_impls = parametrize(
|
1309 |
+
"use_native_funcol",
|
1310 |
+
[
|
1311 |
+
subtest(True, decorators=[with_native_funcol(True, remove_arg=True)]),
|
1312 |
+
subtest(False, decorators=[with_native_funcol(False, remove_arg=True)]),
|
1313 |
+
]
|
1314 |
+
)
|
1315 |
+
|
1316 |
+
run_with_both_funcol_impls_with_arg = parametrize(
|
1317 |
+
"use_native_funcol",
|
1318 |
+
[
|
1319 |
+
subtest(True, decorators=[with_native_funcol(True, remove_arg=False)]),
|
1320 |
+
subtest(False, decorators=[with_native_funcol(False, remove_arg=False)]),
|
1321 |
+
]
|
1322 |
+
)
|
venv/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py
ADDED
@@ -0,0 +1,1441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Owner(s): ["oncall: distributed"]
|
2 |
+
|
3 |
+
import contextlib
|
4 |
+
import itertools
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
import sys
|
8 |
+
import warnings
|
9 |
+
from abc import ABC, abstractmethod
|
10 |
+
from contextlib import nullcontext
|
11 |
+
from copy import deepcopy
|
12 |
+
from enum import auto, Enum
|
13 |
+
from functools import partial, wraps
|
14 |
+
from typing import (
|
15 |
+
Any,
|
16 |
+
Callable,
|
17 |
+
Dict,
|
18 |
+
List,
|
19 |
+
no_type_check,
|
20 |
+
Optional,
|
21 |
+
Tuple,
|
22 |
+
Type,
|
23 |
+
Union,
|
24 |
+
)
|
25 |
+
from unittest import mock
|
26 |
+
|
27 |
+
import torch
|
28 |
+
import torch.distributed as dist
|
29 |
+
import torch.nn as nn
|
30 |
+
import torch.nn.functional as F
|
31 |
+
from torch.distributed._composable.fsdp._fsdp_param_group import (
|
32 |
+
FSDPParamGroup,
|
33 |
+
RegisterPostBackwardFunction,
|
34 |
+
)
|
35 |
+
from torch.distributed._tensor import distribute_tensor, DTensor, Shard
|
36 |
+
from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP
|
37 |
+
from torch.distributed.fsdp._common_utils import TrainingState
|
38 |
+
from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES
|
39 |
+
from torch.distributed.fsdp.fully_sharded_data_parallel import (
|
40 |
+
BackwardPrefetch,
|
41 |
+
MixedPrecision,
|
42 |
+
ShardingStrategy,
|
43 |
+
)
|
44 |
+
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
|
45 |
+
from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap
|
46 |
+
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
|
47 |
+
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
|
48 |
+
from torch.testing._internal.common_distributed import (
|
49 |
+
MultiProcessTestCase,
|
50 |
+
MultiThreadedTestCase,
|
51 |
+
TEST_SKIPS,
|
52 |
+
)
|
53 |
+
from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms
|
54 |
+
from torch.utils._triton import has_triton
|
55 |
+
|
56 |
+
|
57 |
+
class FSDPInitMode(Enum):
|
58 |
+
# No FSDP wrapping
|
59 |
+
NO_FSDP = auto()
|
60 |
+
# FSDP recursive wrapping
|
61 |
+
RECURSIVE = auto()
|
62 |
+
# TODO: FSDP non-recursive wrapping
|
63 |
+
# NONRECURSIVE = auto()
|
64 |
+
|
65 |
+
|
66 |
+
class CUDAInitMode(Enum):
|
67 |
+
# Move model to CUDA before passing to the FSDP constructor
|
68 |
+
CUDA_BEFORE = auto()
|
69 |
+
# Move model to CUDA after passing to the FSDP constructor
|
70 |
+
CUDA_AFTER = auto()
|
71 |
+
# Keep on CPU
|
72 |
+
CUDA_NEVER = auto()
|
73 |
+
|
74 |
+
|
75 |
+
class FSDPTestModel(nn.Module, ABC):
|
76 |
+
"""This defines the interface expected from all models used commonly for
|
77 |
+
FSDP unit tests."""
|
78 |
+
|
79 |
+
@abstractmethod
|
80 |
+
def get_input(self, device) -> Tuple[torch.Tensor, ...]:
|
81 |
+
"""Returns an input for the model as as tuple."""
|
82 |
+
...
|
83 |
+
|
84 |
+
@abstractmethod
|
85 |
+
def get_loss(self, input, output) -> torch.Tensor:
|
86 |
+
"""Returns the loss given the input and output."""
|
87 |
+
...
|
88 |
+
|
89 |
+
@abstractmethod
|
90 |
+
def run_backward(self, loss) -> None:
|
91 |
+
"""Runs the backward pass (e.g. including ``loss.backward()``)."""
|
92 |
+
...
|
93 |
+
|
94 |
+
@staticmethod
|
95 |
+
@abstractmethod
|
96 |
+
def init(*args: Any, **kwargs: Any) -> nn.Module:
|
97 |
+
"""Initializes an instance of this model."""
|
98 |
+
...
|
99 |
+
|
100 |
+
|
101 |
+
def _assert_module_states(
|
102 |
+
model: nn.Module,
|
103 |
+
process_group: dist.ProcessGroup,
|
104 |
+
assert_fn: Callable,
|
105 |
+
):
|
106 |
+
"""
|
107 |
+
All-gathers module states across ranks and calls ``assert_fn`` on each pair
|
108 |
+
of corresponding states from rank 0 and a nonzero rank. For example, if
|
109 |
+
``assert_fn`` is ``self.assertEqual()``, then this checks that all module
|
110 |
+
states are equal across ranks.
|
111 |
+
"""
|
112 |
+
# Include names for debugging convenience
|
113 |
+
named_module_states = [
|
114 |
+
(param_name, param.detach().cpu())
|
115 |
+
for param_name, param in model.named_parameters()
|
116 |
+
]
|
117 |
+
named_module_states += [
|
118 |
+
(buffer_name, buffer.detach().cpu())
|
119 |
+
for buffer_name, buffer in model.named_buffers()
|
120 |
+
]
|
121 |
+
world_size = dist.get_world_size(process_group)
|
122 |
+
olist = [None for _ in range(world_size)]
|
123 |
+
dist.all_gather_object(olist, named_module_states, group=process_group)
|
124 |
+
rank0_states = olist[0]
|
125 |
+
assert rank0_states is not None # mypy
|
126 |
+
for state in olist[1:]:
|
127 |
+
assert state is not None # mypy
|
128 |
+
for (_, p1), (_, p2) in zip(rank0_states, state):
|
129 |
+
assert_fn(p1, p2)
|
130 |
+
|
131 |
+
|
132 |
+
def _zero_model(
|
133 |
+
model: nn.Module,
|
134 |
+
zero_buffers: bool = False,
|
135 |
+
summon_full=True,
|
136 |
+
):
|
137 |
+
"""Zeros the parameters and optionally buffers of ``model`` in place."""
|
138 |
+
ctx = FSDP.summon_full_params(model) if summon_full else nullcontext()
|
139 |
+
with ctx:
|
140 |
+
for param in model.parameters():
|
141 |
+
with torch.no_grad():
|
142 |
+
param.zero_()
|
143 |
+
if zero_buffers:
|
144 |
+
for buffer in model.buffers():
|
145 |
+
with torch.no_grad():
|
146 |
+
buffer.zero_()
|
147 |
+
|
148 |
+
|
149 |
+
def _get_state_dict(model, cpu_offload=False, half=False):
|
150 |
+
if not cpu_offload:
|
151 |
+
model = model.cuda()
|
152 |
+
if half:
|
153 |
+
model.half()
|
154 |
+
|
155 |
+
return model.state_dict()
|
156 |
+
|
157 |
+
|
158 |
+
def subtest_name(test_name_mapping, *args):
|
159 |
+
return "_".join(
|
160 |
+
[test_name_mapping[str(s)] if s is not None else "none" for s in args]
|
161 |
+
)
|
162 |
+
|
163 |
+
|
164 |
+
def _broadcast_state_dict(rank, state_dict):
|
165 |
+
# For non-FSDP roots, some parts of the model state on rank 0 may
|
166 |
+
# not be on CPU, so we move everything to CPU to avoid issues like:
|
167 |
+
# https://github.com/pytorch/pytorch/issues/77113.
|
168 |
+
for param_name, param in state_dict.items():
|
169 |
+
if param.device != torch.device("cpu"):
|
170 |
+
state_dict[param_name] = param.cpu()
|
171 |
+
|
172 |
+
olist = [state_dict if rank == 0 else None]
|
173 |
+
dist.broadcast_object_list(olist)
|
174 |
+
state_dict = olist[0]
|
175 |
+
# Ensure that the state is on CUDA
|
176 |
+
for param_name in state_dict.keys():
|
177 |
+
state_dict[param_name] = state_dict[param_name].cuda()
|
178 |
+
return state_dict
|
179 |
+
|
180 |
+
|
181 |
+
def get_full_params(model: nn.Module, recurse: bool = True):
|
182 |
+
"""
|
183 |
+
Returns the full unsharded parameters of ``model``. Any FSDP-managed
|
184 |
+
parameters offloaded to CPU are moved to GPU in the returned list.
|
185 |
+
|
186 |
+
Args:
|
187 |
+
recurse (bool): If ``False``, only unshards the parameters immediate to
|
188 |
+
``model``; if ``True``, recurses through the module hierarchy
|
189 |
+
rooted at ``model``.
|
190 |
+
"""
|
191 |
+
with FSDP.summon_full_params(model, recurse=recurse):
|
192 |
+
return deepcopy(list(model.parameters()))
|
193 |
+
|
194 |
+
|
195 |
+
def _maybe_cuda(model: nn.Module, move_to_cuda: bool):
|
196 |
+
return model.cuda() if move_to_cuda else model
|
197 |
+
|
198 |
+
|
199 |
+
def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs):
|
200 |
+
return model if not wrap_fsdp else FSDP(model, *args, **kwargs)
|
201 |
+
|
202 |
+
|
203 |
+
class DummyProcessGroup:
|
204 |
+
def __init__(self, rank: int, size: int):
|
205 |
+
self._rank = rank
|
206 |
+
self._size = size
|
207 |
+
|
208 |
+
def rank(self) -> int:
|
209 |
+
return self._rank
|
210 |
+
|
211 |
+
def size(self) -> int:
|
212 |
+
return self._size
|
213 |
+
|
214 |
+
def allreduce(self, *args, **kwargs):
|
215 |
+
dist_wait = mock.Mock()
|
216 |
+
|
217 |
+
def get_future():
|
218 |
+
future: torch.futures.Future = torch.futures.Future()
|
219 |
+
future.set_result(1)
|
220 |
+
return future
|
221 |
+
|
222 |
+
dist_wait.get_future = get_future
|
223 |
+
return dist_wait
|
224 |
+
|
225 |
+
|
226 |
+
class TransformerWithSharedParams(FSDPTestModel):
|
227 |
+
def __init__(
|
228 |
+
self,
|
229 |
+
group: dist.ProcessGroup,
|
230 |
+
cuda_init_mode: CUDAInitMode,
|
231 |
+
add_bn: bool,
|
232 |
+
deterministic: bool,
|
233 |
+
):
|
234 |
+
super().__init__()
|
235 |
+
self.rank = group.rank()
|
236 |
+
self.world_size = group.size()
|
237 |
+
if deterministic:
|
238 |
+
torch.manual_seed(0)
|
239 |
+
d_vocab = 23
|
240 |
+
d_model = 16
|
241 |
+
|
242 |
+
self.embed_tokens = nn.Embedding(d_vocab, d_model)
|
243 |
+
self.transformer = nn.Transformer(
|
244 |
+
d_model=d_model,
|
245 |
+
num_encoder_layers=2,
|
246 |
+
num_decoder_layers=2,
|
247 |
+
dim_feedforward=8,
|
248 |
+
dropout=0.1,
|
249 |
+
)
|
250 |
+
self.output_proj = nn.Linear(d_model, d_vocab)
|
251 |
+
|
252 |
+
# share the embedding and output projection weights
|
253 |
+
self.output_proj.weight = self.embed_tokens.weight
|
254 |
+
self.register_buffer(
|
255 |
+
"vocab_bias", self.embed_tokens.weight.new_ones((d_model,))
|
256 |
+
)
|
257 |
+
self.register_buffer(
|
258 |
+
"long_buffer",
|
259 |
+
torch.zeros_like(self.vocab_bias, dtype=torch.long),
|
260 |
+
) # type: ignore[arg-type]
|
261 |
+
|
262 |
+
self.bs = 2
|
263 |
+
self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity()
|
264 |
+
if cuda_init_mode == CUDAInitMode.CUDA_BEFORE:
|
265 |
+
self = self.cuda()
|
266 |
+
if deterministic:
|
267 |
+
self.eval()
|
268 |
+
|
269 |
+
def get_input(self, device):
|
270 |
+
torch.manual_seed(1 + self.rank) # keep everything deterministic
|
271 |
+
src = torch.arange(12, device=device).view(6, self.bs) # T x B
|
272 |
+
tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B
|
273 |
+
return (src, tgt)
|
274 |
+
|
275 |
+
def forward(self, src_ids, tgt_ids):
|
276 |
+
src = self.embed_tokens(src_ids)
|
277 |
+
src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator]
|
278 |
+
tgt = self.embed_tokens(tgt_ids)
|
279 |
+
tgt = self.bn(tgt)
|
280 |
+
x = self.transformer(src, tgt)
|
281 |
+
return self.output_proj(x)
|
282 |
+
|
283 |
+
def get_loss(self, input, output):
|
284 |
+
_, tgt = input
|
285 |
+
return nn.functional.cross_entropy(
|
286 |
+
output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum"
|
287 |
+
)
|
288 |
+
|
289 |
+
def run_backward(self, loss):
|
290 |
+
loss.backward()
|
291 |
+
|
292 |
+
@staticmethod
|
293 |
+
def init(
|
294 |
+
group: dist.ProcessGroup,
|
295 |
+
fsdp_init_mode: FSDPInitMode,
|
296 |
+
cuda_init_mode: CUDAInitMode,
|
297 |
+
fsdp_kwargs: Optional[Dict[str, Any]] = None,
|
298 |
+
deterministic: bool = False,
|
299 |
+
add_bn: bool = True,
|
300 |
+
) -> Union[nn.Module, FSDP]:
|
301 |
+
"""
|
302 |
+
Initializes a :class:`TransformerWithSharedParams` instance.
|
303 |
+
|
304 |
+
Args:
|
305 |
+
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
|
306 |
+
any modules with FSDP. If ``RECURSIVE``, then wraps with
|
307 |
+
top-level FSDP. By default, the top-level FSDP uses the
|
308 |
+
``ModuleWrapPolicy`` for encoder and decoder layers, but a
|
309 |
+
different auto wrap policy may be specified via
|
310 |
+
``fsdp_kwargs``.
|
311 |
+
cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
|
312 |
+
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
|
313 |
+
forwarded to the FSDP constructor.
|
314 |
+
deterministic (bool): Whether to make the model deterministic
|
315 |
+
across constructions.
|
316 |
+
add_bn (bool): Whether to include batch norm in the model.
|
317 |
+
"""
|
318 |
+
|
319 |
+
if fsdp_kwargs is None:
|
320 |
+
fsdp_kwargs = {}
|
321 |
+
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
|
322 |
+
if isinstance(group, tuple):
|
323 |
+
pg = group[0]
|
324 |
+
else:
|
325 |
+
pg = group
|
326 |
+
return TransformerWithSharedParams(
|
327 |
+
pg, cuda_init_mode, add_bn, deterministic
|
328 |
+
)
|
329 |
+
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
|
330 |
+
# Default to the `ModuleWrapPolicy`
|
331 |
+
if "auto_wrap_policy" not in fsdp_kwargs:
|
332 |
+
auto_wrap_policy = ModuleWrapPolicy(
|
333 |
+
{
|
334 |
+
TransformerEncoderLayer,
|
335 |
+
TransformerDecoderLayer,
|
336 |
+
}
|
337 |
+
)
|
338 |
+
else:
|
339 |
+
auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy")
|
340 |
+
|
341 |
+
if (
|
342 |
+
"sharding_strategy" in fsdp_kwargs
|
343 |
+
and fsdp_kwargs["sharding_strategy"]
|
344 |
+
in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2}
|
345 |
+
and not isinstance(group, tuple)
|
346 |
+
):
|
347 |
+
fsdp_pg = None
|
348 |
+
else:
|
349 |
+
fsdp_pg = group
|
350 |
+
|
351 |
+
if isinstance(group, tuple):
|
352 |
+
tformer_pg = group[0]
|
353 |
+
else:
|
354 |
+
tformer_pg = group
|
355 |
+
|
356 |
+
m = TransformerWithSharedParams(
|
357 |
+
tformer_pg, cuda_init_mode, add_bn, deterministic
|
358 |
+
)
|
359 |
+
fsdp_model = FSDP(
|
360 |
+
m,
|
361 |
+
fsdp_pg,
|
362 |
+
auto_wrap_policy=auto_wrap_policy,
|
363 |
+
**fsdp_kwargs,
|
364 |
+
)
|
365 |
+
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
|
366 |
+
fsdp_model = fsdp_model.cuda()
|
367 |
+
return fsdp_model
|
368 |
+
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
|
369 |
+
|
370 |
+
def get_ignored_modules(self):
|
371 |
+
return [self.transformer]
|
372 |
+
|
373 |
+
|
374 |
+
class NestedWrappedModule(FSDPTestModel):
|
375 |
+
def __init__(
|
376 |
+
self,
|
377 |
+
group: dist.ProcessGroup,
|
378 |
+
wrap_fsdp: bool,
|
379 |
+
cuda_init_mode: CUDAInitMode,
|
380 |
+
deterministic: bool,
|
381 |
+
**fsdp_kwargs,
|
382 |
+
):
|
383 |
+
super().__init__()
|
384 |
+
self.rank = group.rank()
|
385 |
+
self.world_size = group.size()
|
386 |
+
move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
|
387 |
+
|
388 |
+
def _maybe_wrap(layer):
|
389 |
+
if wrap_fsdp:
|
390 |
+
return FSDP(layer, group, **fsdp_kwargs)
|
391 |
+
return layer
|
392 |
+
|
393 |
+
if deterministic:
|
394 |
+
torch.manual_seed(0)
|
395 |
+
self.module = nn.Sequential(
|
396 |
+
_maybe_cuda(nn.Linear(8, 4), move_to_cuda),
|
397 |
+
_maybe_wrap(
|
398 |
+
nn.Sequential(
|
399 |
+
_maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
|
400 |
+
_maybe_cuda(nn.Linear(16, 16), move_to_cuda),
|
401 |
+
),
|
402 |
+
),
|
403 |
+
_maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)),
|
404 |
+
_maybe_cuda(nn.Linear(4, 8), move_to_cuda),
|
405 |
+
)
|
406 |
+
|
407 |
+
def get_input(self, device):
|
408 |
+
torch.manual_seed(1 + self.rank) # keep everything deterministic
|
409 |
+
return (torch.rand(4, 8, device=device),)
|
410 |
+
|
411 |
+
def forward(self, x):
|
412 |
+
return self.module(x)
|
413 |
+
|
414 |
+
def get_loss(self, input, output):
|
415 |
+
loss = output.sum()
|
416 |
+
return loss
|
417 |
+
|
418 |
+
def run_backward(self, loss):
|
419 |
+
loss.backward()
|
420 |
+
|
421 |
+
@staticmethod
|
422 |
+
def init(
|
423 |
+
group: dist.ProcessGroup,
|
424 |
+
fsdp_init_mode: FSDPInitMode,
|
425 |
+
cuda_init_mode: CUDAInitMode,
|
426 |
+
fsdp_kwargs: Optional[Dict[str, Any]] = None,
|
427 |
+
deterministic: bool = False,
|
428 |
+
) -> nn.Module:
|
429 |
+
"""
|
430 |
+
Initializes a :class:`NestedWrappedModule` instance.
|
431 |
+
|
432 |
+
Args:
|
433 |
+
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
|
434 |
+
any modules with FSDP. If ``RECURSIVE``, then wraps some nested
|
435 |
+
modules with FSDP but not the top-level module. The model may
|
436 |
+
later be wrapped with a top-level FSDP external to this method
|
437 |
+
if desired.
|
438 |
+
cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
|
439 |
+
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
|
440 |
+
forwarded to the FSDP constructor.
|
441 |
+
deterministic (bool): Whether to make the model deterministic
|
442 |
+
across constructions.
|
443 |
+
"""
|
444 |
+
if fsdp_kwargs is None:
|
445 |
+
fsdp_kwargs = {}
|
446 |
+
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
|
447 |
+
return NestedWrappedModule(
|
448 |
+
group,
|
449 |
+
wrap_fsdp=False,
|
450 |
+
cuda_init_mode=cuda_init_mode,
|
451 |
+
deterministic=deterministic,
|
452 |
+
)
|
453 |
+
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
|
454 |
+
# Does not wrap with top-level FSDP
|
455 |
+
fsdp_model = NestedWrappedModule(
|
456 |
+
group,
|
457 |
+
wrap_fsdp=True,
|
458 |
+
cuda_init_mode=cuda_init_mode,
|
459 |
+
deterministic=deterministic,
|
460 |
+
**fsdp_kwargs,
|
461 |
+
)
|
462 |
+
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
|
463 |
+
fsdp_model = fsdp_model.cuda()
|
464 |
+
return fsdp_model
|
465 |
+
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
|
466 |
+
|
467 |
+
|
468 |
+
class AlwaysWrapNestedWrappedModule(NestedWrappedModule):
|
469 |
+
@staticmethod
|
470 |
+
def init(
|
471 |
+
group: dist.ProcessGroup,
|
472 |
+
fsdp_init_mode: FSDPInitMode,
|
473 |
+
cuda_init_mode: CUDAInitMode,
|
474 |
+
fsdp_kwargs: Optional[Dict[str, Any]] = None,
|
475 |
+
deterministic: bool = False,
|
476 |
+
):
|
477 |
+
"""
|
478 |
+
Initializes a :class:`NestedWrappedModule` instance, but unlike
|
479 |
+
:meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this
|
480 |
+
wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap
|
481 |
+
policy.
|
482 |
+
"""
|
483 |
+
model = super(
|
484 |
+
AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule
|
485 |
+
).init(
|
486 |
+
group=group,
|
487 |
+
fsdp_init_mode=FSDPInitMode.NO_FSDP,
|
488 |
+
cuda_init_mode=cuda_init_mode,
|
489 |
+
fsdp_kwargs=fsdp_kwargs,
|
490 |
+
deterministic=deterministic,
|
491 |
+
)
|
492 |
+
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
|
493 |
+
return model
|
494 |
+
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
|
495 |
+
fsdp_kwargs = fsdp_kwargs or {}
|
496 |
+
fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs)
|
497 |
+
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
|
498 |
+
fsdp_model = fsdp_model.cuda()
|
499 |
+
return fsdp_model
|
500 |
+
|
501 |
+
|
502 |
+
class NonUniformReqGradNWM(NestedWrappedModule):
|
503 |
+
def __init__(
|
504 |
+
self,
|
505 |
+
group: dist.ProcessGroup,
|
506 |
+
wrap_fsdp: bool,
|
507 |
+
cuda_init_mode: CUDAInitMode,
|
508 |
+
deterministic: bool,
|
509 |
+
**fsdp_kwargs,
|
510 |
+
):
|
511 |
+
super(NestedWrappedModule, self).__init__()
|
512 |
+
# This `__init__` only differs from `NestedWrappedModule.__init__` in that
|
513 |
+
# the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential`
|
514 |
+
# container. This arrangement results in all elements of the last two parameters
|
515 |
+
# residing on a single rank. Freezing all parameters except those two allows us
|
516 |
+
# to verify that `ShardedGradScaler` accommodates situations where some ranks
|
517 |
+
# have no (non-zero sized) parameter shards.
|
518 |
+
self.rank = group.rank()
|
519 |
+
self.world_size = group.size()
|
520 |
+
move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
|
521 |
+
|
522 |
+
def _maybe_wrap(layer):
|
523 |
+
if wrap_fsdp:
|
524 |
+
return FSDP(layer, group, **fsdp_kwargs)
|
525 |
+
return layer
|
526 |
+
|
527 |
+
if deterministic:
|
528 |
+
torch.manual_seed(0)
|
529 |
+
self.module = nn.Sequential(
|
530 |
+
_maybe_cuda(nn.Linear(8, 4), move_to_cuda),
|
531 |
+
_maybe_wrap(
|
532 |
+
nn.Sequential(
|
533 |
+
_maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
|
534 |
+
_maybe_cuda(nn.Linear(16, 16), move_to_cuda),
|
535 |
+
),
|
536 |
+
),
|
537 |
+
_maybe_wrap(
|
538 |
+
nn.Sequential(
|
539 |
+
_maybe_cuda(nn.Linear(16, 4), move_to_cuda),
|
540 |
+
_maybe_cuda(nn.Linear(4, 8), move_to_cuda),
|
541 |
+
),
|
542 |
+
),
|
543 |
+
)
|
544 |
+
|
545 |
+
@staticmethod
|
546 |
+
def _set_nonuniform_req_grad(model, req_grad_mask) -> None:
|
547 |
+
for n, p in model.named_parameters():
|
548 |
+
if not re.match(req_grad_mask, n):
|
549 |
+
p.requires_grad_(False)
|
550 |
+
|
551 |
+
@staticmethod
|
552 |
+
def init(
|
553 |
+
group: dist.ProcessGroup,
|
554 |
+
fsdp_init_mode: FSDPInitMode,
|
555 |
+
cuda_init_mode: CUDAInitMode,
|
556 |
+
fsdp_kwargs: Optional[Dict[str, Any]] = None,
|
557 |
+
deterministic: bool = False,
|
558 |
+
):
|
559 |
+
"""
|
560 |
+
Initializes a :class:`NestedWrappedModule` instance, but unlike
|
561 |
+
:meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential`
|
562 |
+
container to enable the desired non-uniform ``requires_grad``
|
563 |
+
``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP``
|
564 |
+
init modes, freezes all parameters except the last two to validate
|
565 |
+
``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in
|
566 |
+
FSDP ``use_orig_params=True`` mode.
|
567 |
+
"""
|
568 |
+
# The parameters that should remain unfrozen are in `module.2.1`. The regex
|
569 |
+
# pattern below matches the relevant parameter names both with and without
|
570 |
+
# an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present.
|
571 |
+
req_grad_pattern = re.compile(r"module\.2.*\.1.*")
|
572 |
+
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
|
573 |
+
ddp_model = NonUniformReqGradNWM(
|
574 |
+
group,
|
575 |
+
wrap_fsdp=False,
|
576 |
+
cuda_init_mode=cuda_init_mode,
|
577 |
+
deterministic=deterministic,
|
578 |
+
)
|
579 |
+
NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern)
|
580 |
+
return ddp_model
|
581 |
+
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
|
582 |
+
if fsdp_kwargs is None:
|
583 |
+
fsdp_kwargs = {}
|
584 |
+
fsdp_model = NonUniformReqGradNWM(
|
585 |
+
group,
|
586 |
+
wrap_fsdp=True,
|
587 |
+
cuda_init_mode=cuda_init_mode,
|
588 |
+
deterministic=deterministic,
|
589 |
+
**fsdp_kwargs,
|
590 |
+
)
|
591 |
+
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
|
592 |
+
fsdp_model = fsdp_model.cuda()
|
593 |
+
NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern)
|
594 |
+
return fsdp_model
|
595 |
+
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
|
596 |
+
|
597 |
+
|
598 |
+
class ModuleWithDelay(FSDPTestModel):
|
599 |
+
"""This class wraps a :class:`FSDPTestModel` to optionally add a delay
|
600 |
+
after computing the loss and/or before the gradient reduction."""
|
601 |
+
|
602 |
+
def __init__(
|
603 |
+
self,
|
604 |
+
module: nn.Module,
|
605 |
+
delay_after_loss_ms: int,
|
606 |
+
delay_before_reduction_ms: int,
|
607 |
+
):
|
608 |
+
super().__init__()
|
609 |
+
self.delay_after_loss_ms = delay_after_loss_ms
|
610 |
+
self.delay_before_reduction_ms = delay_before_reduction_ms
|
611 |
+
self.module = module
|
612 |
+
|
613 |
+
def get_input(self, device):
|
614 |
+
return self.module.get_input(device)
|
615 |
+
|
616 |
+
def forward(self, x):
|
617 |
+
return self.module(x)
|
618 |
+
|
619 |
+
def get_loss(self, input, output):
|
620 |
+
loss = self.module.get_loss(input, output)
|
621 |
+
if self.delay_after_loss_ms > 0:
|
622 |
+
torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms()))
|
623 |
+
return loss
|
624 |
+
|
625 |
+
def run_backward(self, loss):
|
626 |
+
orig_reduce_scatter = torch.distributed.reduce_scatter_tensor
|
627 |
+
|
628 |
+
def _delayed_reduce_scatter(*args, **kwargs):
|
629 |
+
if self.delay_before_reduction_ms > 0:
|
630 |
+
torch.cuda._sleep(
|
631 |
+
int(self.delay_before_reduction_ms * get_cycles_per_ms())
|
632 |
+
)
|
633 |
+
return orig_reduce_scatter(*args, **kwargs)
|
634 |
+
|
635 |
+
with mock.patch(
|
636 |
+
"torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter
|
637 |
+
):
|
638 |
+
self.module.run_backward(loss)
|
639 |
+
|
640 |
+
@staticmethod
|
641 |
+
def init(
|
642 |
+
module_class: Type[FSDPTestModel],
|
643 |
+
*model_args: Any,
|
644 |
+
delay_after_loss_ms: int,
|
645 |
+
delay_before_reduction_ms: int,
|
646 |
+
**model_kwargs: Any,
|
647 |
+
):
|
648 |
+
"""
|
649 |
+
Args:
|
650 |
+
module_class (Type[FSDPTestModel]): Wrapped module class to which
|
651 |
+
to add delays.
|
652 |
+
model_args: Positional arguments forwarded to the ``module_class``
|
653 |
+
``init()``.
|
654 |
+
delay_after_loss_ms (int): Delay after computing the loss/before
|
655 |
+
the optimizer step (in ms).
|
656 |
+
delay_before_reduction_ms (int): Delay before reduce-scattering
|
657 |
+
gradients (in ms).
|
658 |
+
model_kwargs: Keyword arguments forwarded to the ``module_class``
|
659 |
+
``init()``.
|
660 |
+
"""
|
661 |
+
return ModuleWithDelay(
|
662 |
+
module_class.init(*model_args, **model_kwargs),
|
663 |
+
delay_after_loss_ms,
|
664 |
+
delay_before_reduction_ms,
|
665 |
+
)
|
666 |
+
|
667 |
+
|
668 |
+
class NestedWrappedModuleWithDelay(ModuleWithDelay):
|
669 |
+
@staticmethod
|
670 |
+
def init( # type: ignore[override]
|
671 |
+
group: dist.ProcessGroup,
|
672 |
+
fsdp_init_mode: FSDPInitMode,
|
673 |
+
cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER,
|
674 |
+
fsdp_kwargs: Optional[Dict[str, Any]] = None,
|
675 |
+
deterministic: bool = False,
|
676 |
+
delay_after_loss_ms: int = 0,
|
677 |
+
delay_before_reduction_ms: int = 0,
|
678 |
+
):
|
679 |
+
return ModuleWithDelay.init(
|
680 |
+
NestedWrappedModule,
|
681 |
+
group=group,
|
682 |
+
fsdp_init_mode=fsdp_init_mode,
|
683 |
+
cuda_init_mode=cuda_init_mode,
|
684 |
+
fsdp_kwargs=fsdp_kwargs,
|
685 |
+
deterministic=deterministic,
|
686 |
+
delay_after_loss_ms=delay_after_loss_ms,
|
687 |
+
delay_before_reduction_ms=delay_before_reduction_ms,
|
688 |
+
)
|
689 |
+
|
690 |
+
|
691 |
+
class DummyDDP(nn.Module):
|
692 |
+
def __init__(self, module):
|
693 |
+
super().__init__()
|
694 |
+
self.module = module
|
695 |
+
|
696 |
+
def forward(self, *args, **kwargs):
|
697 |
+
return self.module(*args, **kwargs)
|
698 |
+
|
699 |
+
|
700 |
+
class MixtureOfExperts(NestedWrappedModule):
|
701 |
+
def __init__(
|
702 |
+
self,
|
703 |
+
group: dist.ProcessGroup,
|
704 |
+
wrap_fsdp: bool,
|
705 |
+
cuda_init_mode: CUDAInitMode,
|
706 |
+
delay_before_free_ms: int,
|
707 |
+
deterministic: bool,
|
708 |
+
**fsdp_kwargs,
|
709 |
+
):
|
710 |
+
super().__init__(
|
711 |
+
group=group,
|
712 |
+
wrap_fsdp=wrap_fsdp,
|
713 |
+
cuda_init_mode=cuda_init_mode,
|
714 |
+
deterministic=deterministic,
|
715 |
+
)
|
716 |
+
self.group = group
|
717 |
+
self.delay_before_free_ms = delay_before_free_ms
|
718 |
+
self.wrap_fsdp = wrap_fsdp
|
719 |
+
self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
|
720 |
+
if deterministic:
|
721 |
+
# Give each rank different expert parameters
|
722 |
+
torch.manual_seed(42 + self.rank)
|
723 |
+
d_expert = 23
|
724 |
+
d_shared = 12
|
725 |
+
d_input = 8
|
726 |
+
expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda)
|
727 |
+
|
728 |
+
self.num_expert_params = sum([p.numel() for p in expert.parameters()])
|
729 |
+
for p in expert.parameters():
|
730 |
+
p.expert = True # type: ignore[attr-defined]
|
731 |
+
|
732 |
+
if deterministic:
|
733 |
+
# Keep all other parameters the same across ranks
|
734 |
+
torch.manual_seed(0)
|
735 |
+
|
736 |
+
shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda)
|
737 |
+
|
738 |
+
if wrap_fsdp:
|
739 |
+
# we create a process group of size 1 for the expert params
|
740 |
+
expert_group = torch.distributed.new_group(
|
741 |
+
[group.rank()]
|
742 |
+
) # world size 1 means no shard
|
743 |
+
expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment]
|
744 |
+
shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment]
|
745 |
+
|
746 |
+
self.module = nn.Sequential(
|
747 |
+
_maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda),
|
748 |
+
shared,
|
749 |
+
expert,
|
750 |
+
_maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda),
|
751 |
+
)
|
752 |
+
|
753 |
+
def forward(self, x):
|
754 |
+
if self.delay_before_free_ms > 0:
|
755 |
+
expert = self.module[2]
|
756 |
+
if isinstance(expert, FSDP):
|
757 |
+
orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
|
758 |
+
|
759 |
+
def _delayed_reshard(*args, **kwargs):
|
760 |
+
torch.cuda._sleep(
|
761 |
+
int(self.delay_before_free_ms * get_cycles_per_ms())
|
762 |
+
)
|
763 |
+
return orig_reshard(*args, **kwargs)
|
764 |
+
|
765 |
+
# This patch covers any `import torch..._reshard` uses.
|
766 |
+
with mock.patch(
|
767 |
+
"torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard
|
768 |
+
):
|
769 |
+
return self.module(x)
|
770 |
+
|
771 |
+
return self.module(x)
|
772 |
+
|
773 |
+
def run_backward(self, loss):
|
774 |
+
loss.backward()
|
775 |
+
# Manually reduce gradients if not wrapped in FullyShardedDataParallel
|
776 |
+
if not self.wrap_fsdp:
|
777 |
+
with torch.no_grad():
|
778 |
+
for p in self.parameters():
|
779 |
+
if hasattr(p, "expert"):
|
780 |
+
continue # these params don't need grad reduction
|
781 |
+
if p.grad is not None:
|
782 |
+
p.grad.div_(self.world_size)
|
783 |
+
torch.distributed.all_reduce(p.grad, group=self.group)
|
784 |
+
|
785 |
+
@staticmethod
|
786 |
+
def init(
|
787 |
+
group: dist.ProcessGroup,
|
788 |
+
fsdp_init_mode: FSDPInitMode,
|
789 |
+
cuda_init_mode: CUDAInitMode,
|
790 |
+
fsdp_kwargs: Optional[Dict[str, Any]] = None,
|
791 |
+
deterministic: bool = False,
|
792 |
+
delay_before_free_ms: int = 0,
|
793 |
+
):
|
794 |
+
"""
|
795 |
+
Initializes a :class:`MixtureOfExperts` instance.
|
796 |
+
|
797 |
+
Args:
|
798 |
+
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
|
799 |
+
any modules with FSDP. If ``RECURSIVE``, then wraps some nested
|
800 |
+
modules with FSDP, including the expert and shared layers, but
|
801 |
+
not the top-level module. The model may later be wrapped with a
|
802 |
+
top-level FSDP external to this method if desired.
|
803 |
+
cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
|
804 |
+
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
|
805 |
+
forwarded to the FSDP constructor.
|
806 |
+
deterministic (bool): Whether to make the model deterministic
|
807 |
+
across constructions.
|
808 |
+
delay_before_free_ms (int): Delay before resharding expert
|
809 |
+
parameters in the forward pass (in ms).
|
810 |
+
"""
|
811 |
+
if fsdp_kwargs is None:
|
812 |
+
fsdp_kwargs = {}
|
813 |
+
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
|
814 |
+
return MixtureOfExperts(
|
815 |
+
group,
|
816 |
+
wrap_fsdp=False,
|
817 |
+
cuda_init_mode=cuda_init_mode,
|
818 |
+
delay_before_free_ms=delay_before_free_ms,
|
819 |
+
deterministic=deterministic,
|
820 |
+
)
|
821 |
+
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
|
822 |
+
# Does not wrap with top-level FSDP
|
823 |
+
fsdp_model = MixtureOfExperts(
|
824 |
+
group,
|
825 |
+
wrap_fsdp=True,
|
826 |
+
cuda_init_mode=cuda_init_mode,
|
827 |
+
delay_before_free_ms=delay_before_free_ms,
|
828 |
+
deterministic=deterministic,
|
829 |
+
**fsdp_kwargs,
|
830 |
+
)
|
831 |
+
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
|
832 |
+
fsdp_model = fsdp_model.cuda()
|
833 |
+
return fsdp_model
|
834 |
+
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
|
835 |
+
|
836 |
+
|
837 |
+
class MLP(nn.Module):
|
838 |
+
def __init__(
|
839 |
+
self,
|
840 |
+
dim: int,
|
841 |
+
device: Optional[torch.device] = None,
|
842 |
+
with_buffer: bool = False,
|
843 |
+
dim_multiplier: int = 4,
|
844 |
+
):
|
845 |
+
super().__init__()
|
846 |
+
self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device)
|
847 |
+
self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device)
|
848 |
+
if with_buffer:
|
849 |
+
self.register_buffer("buffer", torch.randn((dim,), device=device))
|
850 |
+
else:
|
851 |
+
self.buffer = None
|
852 |
+
|
853 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
854 |
+
z = self.in_proj(x)
|
855 |
+
z = F.relu(z)
|
856 |
+
z = self.out_proj(z)
|
857 |
+
z = F.relu(z)
|
858 |
+
if self.buffer is not None:
|
859 |
+
z = z + self.buffer
|
860 |
+
return z
|
861 |
+
|
862 |
+
def reset_parameters(self):
|
863 |
+
if self.buffer is not None:
|
864 |
+
torch.nn.init.normal_(self.buffer)
|
865 |
+
|
866 |
+
|
867 |
+
class DoubleLinear(nn.Module):
|
868 |
+
"""
|
869 |
+
This can be used for returning multiple outputs from a module
|
870 |
+
(``use_second_linear=True``) or for having an unused module (``False``).
|
871 |
+
"""
|
872 |
+
|
873 |
+
def __init__(self, dim: int, use_second_linear: bool = True):
|
874 |
+
super().__init__()
|
875 |
+
self.lin1 = nn.Linear(dim, dim)
|
876 |
+
self.lin2 = nn.Linear(dim, dim)
|
877 |
+
self.relu = nn.ReLU()
|
878 |
+
self.use_second_linear = use_second_linear
|
879 |
+
|
880 |
+
def forward(
|
881 |
+
self, x: torch.Tensor
|
882 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
|
883 |
+
if self.use_second_linear:
|
884 |
+
return self.relu(self.lin1(x)), self.relu(self.lin2(x))
|
885 |
+
return self.relu(self.lin1(x))
|
886 |
+
|
887 |
+
|
888 |
+
@contextlib.contextmanager
|
889 |
+
def patch_all_gather(new_all_gather_into_tensor: Callable):
|
890 |
+
orig_all_gather = dist.all_gather_into_tensor
|
891 |
+
dist.all_gather_into_tensor = new_all_gather_into_tensor
|
892 |
+
try:
|
893 |
+
yield
|
894 |
+
finally:
|
895 |
+
dist.all_gather_into_tensor = orig_all_gather
|
896 |
+
|
897 |
+
|
898 |
+
@contextlib.contextmanager
|
899 |
+
def patch_reduce_scatter(new_reduce_scatter_tensor: Callable):
|
900 |
+
orig_reduce_scatter = dist.reduce_scatter_tensor
|
901 |
+
dist.reduce_scatter_tensor = new_reduce_scatter_tensor
|
902 |
+
try:
|
903 |
+
yield
|
904 |
+
finally:
|
905 |
+
dist.reduce_scatter_tensor = orig_reduce_scatter
|
906 |
+
|
907 |
+
|
908 |
+
@no_type_check
|
909 |
+
@contextlib.contextmanager
|
910 |
+
def patch_unshard(new_unshard: Callable):
|
911 |
+
orig_unshard = FSDPParamGroup.unshard
|
912 |
+
FSDPParamGroup.unshard = new_unshard
|
913 |
+
try:
|
914 |
+
yield
|
915 |
+
finally:
|
916 |
+
FSDPParamGroup.unshard = orig_unshard
|
917 |
+
|
918 |
+
|
919 |
+
@no_type_check
|
920 |
+
@contextlib.contextmanager
|
921 |
+
def patch_post_backward(new_post_backward: Callable):
|
922 |
+
orig_post_backward = FSDPParamGroup.post_backward
|
923 |
+
FSDPParamGroup.post_backward = new_post_backward
|
924 |
+
try:
|
925 |
+
yield
|
926 |
+
finally:
|
927 |
+
FSDPParamGroup.post_backward = orig_post_backward
|
928 |
+
|
929 |
+
|
930 |
+
@no_type_check
|
931 |
+
@contextlib.contextmanager
|
932 |
+
def patch_register_post_backward_hook_backward(new_backward: Callable):
|
933 |
+
orig_backward = RegisterPostBackwardFunction.backward
|
934 |
+
RegisterPostBackwardFunction.backward = new_backward
|
935 |
+
try:
|
936 |
+
yield
|
937 |
+
finally:
|
938 |
+
RegisterPostBackwardFunction.backward = orig_backward
|
939 |
+
|
940 |
+
|
941 |
+
def reduce_scatter_with_assert(
|
942 |
+
cls,
|
943 |
+
orig_reduce_scatter: Callable,
|
944 |
+
assert_fn: Callable, # `assert_fn(output: Tensor)`
|
945 |
+
*args: Any,
|
946 |
+
**kwargs: Any,
|
947 |
+
):
|
948 |
+
if len(args) > 0:
|
949 |
+
output = args[0]
|
950 |
+
elif "output" in kwargs:
|
951 |
+
output = kwargs["output"]
|
952 |
+
else:
|
953 |
+
raise AssertionError(
|
954 |
+
f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}"
|
955 |
+
)
|
956 |
+
assert_fn(output)
|
957 |
+
return orig_reduce_scatter(*args, **kwargs)
|
958 |
+
|
959 |
+
|
960 |
+
def check_sharded_parity(
|
961 |
+
cls, # unit test class
|
962 |
+
replicated_module: nn.Module,
|
963 |
+
sharded_module: nn.Module,
|
964 |
+
prefixes_to_ignore: Tuple[str, ...] = (),
|
965 |
+
):
|
966 |
+
for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip(
|
967 |
+
replicated_module.named_parameters(), sharded_module.named_parameters()
|
968 |
+
):
|
969 |
+
clean_sharded_name = sharded_name
|
970 |
+
for prefix in prefixes_to_ignore:
|
971 |
+
clean_sharded_name = clean_sharded_name.replace(prefix, "")
|
972 |
+
cls.assertEqual(replicated_name, clean_sharded_name)
|
973 |
+
cls.assertIsInstance(sharded_param, DTensor)
|
974 |
+
assert isinstance(sharded_param, DTensor) # mypy
|
975 |
+
mesh, placements = sharded_param.device_mesh, sharded_param.placements
|
976 |
+
if tuple(placements) == (Shard(0), Shard(0)):
|
977 |
+
raise AssertionError(
|
978 |
+
"FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), "
|
979 |
+
"so we cannot check for equality using it"
|
980 |
+
)
|
981 |
+
sharded_ref_param = distribute_tensor(replicated_param, mesh, placements)
|
982 |
+
cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local())
|
983 |
+
if replicated_param.grad is None:
|
984 |
+
cls.assertIsNone(sharded_param.grad)
|
985 |
+
continue
|
986 |
+
cls.assertIsNotNone(sharded_param.grad)
|
987 |
+
sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements)
|
988 |
+
cls.assertIsInstance(sharded_param.grad, DTensor)
|
989 |
+
assert isinstance(sharded_param.grad, DTensor) # mypy
|
990 |
+
cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local())
|
991 |
+
|
992 |
+
|
993 |
+
def run_subtests(
|
994 |
+
cls_inst,
|
995 |
+
subtest_config: Dict[str, List[Any]],
|
996 |
+
test_fn: Callable,
|
997 |
+
*test_args,
|
998 |
+
**test_kwargs: Any,
|
999 |
+
):
|
1000 |
+
"""
|
1001 |
+
Runs a test function given by ``test_fn`` as a subtest according to the
|
1002 |
+
configurations specified by ``subtest_config``. This amortizes the
|
1003 |
+
costly setup overhead (including process spawn and initializing the
|
1004 |
+
process group) over the subtests.
|
1005 |
+
|
1006 |
+
Args:
|
1007 |
+
subtest_config (Dict[str, List[Any]]): A mapping from subtest
|
1008 |
+
keyword argument name to a list of its possible values.
|
1009 |
+
test_fn (Callable): A callable that runs the actual test.
|
1010 |
+
test_args: Positional arguments to pass to ``test_fn``.
|
1011 |
+
test_kwargs: Keyword arguments to pass to ``test_fn``.
|
1012 |
+
"""
|
1013 |
+
# Convert the config mapping to a list to have a fixed order
|
1014 |
+
subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items())
|
1015 |
+
subtest_config_keys: List[str] = [item[0] for item in subtest_config_items]
|
1016 |
+
subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items]
|
1017 |
+
for values in itertools.product(*subtest_config_values):
|
1018 |
+
# Map keyword to chosen value
|
1019 |
+
subtest_kwargs = dict(zip(subtest_config_keys, values))
|
1020 |
+
with cls_inst.subTest(**subtest_kwargs):
|
1021 |
+
test_fn(*test_args, **test_kwargs, **subtest_kwargs)
|
1022 |
+
dist.barrier()
|
1023 |
+
|
1024 |
+
|
1025 |
+
class FSDPTestMultiThread(MultiThreadedTestCase):
|
1026 |
+
@property
|
1027 |
+
def world_size(self):
|
1028 |
+
return torch.cuda.device_count() if torch.cuda.is_available() else 4
|
1029 |
+
|
1030 |
+
def setUp(self):
|
1031 |
+
super().setUp()
|
1032 |
+
self._spawn_threads()
|
1033 |
+
|
1034 |
+
def run_subtests(self, *args, **kwargs):
|
1035 |
+
return run_subtests(self, *args, **kwargs)
|
1036 |
+
|
1037 |
+
|
1038 |
+
class FSDPTest(MultiProcessTestCase):
|
1039 |
+
def setUp(self):
|
1040 |
+
super().setUp()
|
1041 |
+
# Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`,
|
1042 |
+
# which can cause unit test flakiness:
|
1043 |
+
# https://github.com/pytorch/pytorch/issues/90848
|
1044 |
+
os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0"
|
1045 |
+
self._spawn_processes()
|
1046 |
+
|
1047 |
+
@property
|
1048 |
+
def world_size(self):
|
1049 |
+
return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4
|
1050 |
+
|
1051 |
+
@property
|
1052 |
+
def process_group(self):
|
1053 |
+
return dist.distributed_c10d._get_default_group()
|
1054 |
+
|
1055 |
+
@property
|
1056 |
+
def init_method(self):
|
1057 |
+
return f"{FILE_SCHEMA}{self.file_name}"
|
1058 |
+
|
1059 |
+
def _check_cpu_offload(self, fsdp_model, cpu_offload):
|
1060 |
+
self.assertEqual(cpu_offload, fsdp_model.cpu_offload)
|
1061 |
+
|
1062 |
+
def _check_backward_prefetch(self, fsdp_model, backward_prefetch):
|
1063 |
+
self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch)
|
1064 |
+
|
1065 |
+
def _check_forward_prefetch(self, fsdp_model, forward_prefetch):
|
1066 |
+
self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch)
|
1067 |
+
|
1068 |
+
def run_subtests(self, *args, **kwargs):
|
1069 |
+
return run_subtests(self, *args, **kwargs)
|
1070 |
+
|
1071 |
+
@classmethod
|
1072 |
+
def _run(cls, rank, test_name, file_name, pipe):
|
1073 |
+
self = cls(test_name)
|
1074 |
+
self.rank = rank
|
1075 |
+
self.file_name = file_name
|
1076 |
+
|
1077 |
+
print(f"dist init r={self.rank}, world={self.world_size}")
|
1078 |
+
|
1079 |
+
# Specify gloo backend to make 'init_process_group()' succeed,
|
1080 |
+
# Actual tests will be skipped if there is no enough GPUs.
|
1081 |
+
backend = "nccl" if torch.cuda.is_available() else "gloo"
|
1082 |
+
|
1083 |
+
try:
|
1084 |
+
dist.init_process_group(
|
1085 |
+
init_method=self.init_method,
|
1086 |
+
backend=backend,
|
1087 |
+
world_size=int(self.world_size),
|
1088 |
+
rank=self.rank,
|
1089 |
+
)
|
1090 |
+
except RuntimeError as e:
|
1091 |
+
if "recompile" in e.args[0]:
|
1092 |
+
sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
|
1093 |
+
|
1094 |
+
raise
|
1095 |
+
|
1096 |
+
if torch.cuda.is_available() and torch.cuda.device_count():
|
1097 |
+
torch.cuda.set_device(self.rank % torch.cuda.device_count())
|
1098 |
+
|
1099 |
+
# Execute barrier prior to running test to ensure that every process
|
1100 |
+
# has finished initialization and that the following test
|
1101 |
+
# immediately exiting due to a skip doesn't cause flakiness.
|
1102 |
+
dist.barrier()
|
1103 |
+
|
1104 |
+
self.run_test(test_name, pipe)
|
1105 |
+
|
1106 |
+
dist.barrier()
|
1107 |
+
|
1108 |
+
dist.destroy_process_group()
|
1109 |
+
|
1110 |
+
def _train_for_several_steps(
|
1111 |
+
self,
|
1112 |
+
model: nn.Module,
|
1113 |
+
num_steps: int,
|
1114 |
+
autocast: bool,
|
1115 |
+
lr: float = 0.01,
|
1116 |
+
fsdp_cpu_offload: Optional[CPUOffload] = None,
|
1117 |
+
save_model: bool = False,
|
1118 |
+
mixed_precision: Optional[MixedPrecision] = None,
|
1119 |
+
enable_sharded_grad_scaler: bool = False,
|
1120 |
+
use_pure_fp16: bool = False,
|
1121 |
+
sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
|
1122 |
+
):
|
1123 |
+
cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params
|
1124 |
+
|
1125 |
+
model_device = next(model.parameters()).device
|
1126 |
+
if sharded_grad_scaler_kwargs is None:
|
1127 |
+
sharded_grad_scaler_kwargs = {}
|
1128 |
+
sharded_grad_scaler = ShardedGradScaler(
|
1129 |
+
enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs
|
1130 |
+
)
|
1131 |
+
# use SGD with momentum instead of Adam, since Adam is scale invariant
|
1132 |
+
# and this makes it bad for tests
|
1133 |
+
optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
|
1134 |
+
for _ in range(num_steps):
|
1135 |
+
optim.zero_grad()
|
1136 |
+
with torch.cuda.amp.autocast(enabled=autocast):
|
1137 |
+
# Inputs always cuda regardless of cpu offloading, or model.device
|
1138 |
+
input = model.module.get_input(torch.device("cuda"))
|
1139 |
+
if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)):
|
1140 |
+
if isinstance(input, torch.Tensor):
|
1141 |
+
input = input.half()
|
1142 |
+
else:
|
1143 |
+
input = tuple(x.half() for x in input)
|
1144 |
+
output = model(*input)
|
1145 |
+
# Post-forward, if CPU offloading model param should be on CPU.
|
1146 |
+
if (
|
1147 |
+
cpu_offload_params
|
1148 |
+
and isinstance(model, FSDP)
|
1149 |
+
# If not resharding after forward, the parameters are still
|
1150 |
+
# exposed as unsharded views into the GPU flat parameter
|
1151 |
+
and model.sharding_strategy
|
1152 |
+
not in NO_RESHARD_AFTER_FORWARD_STRATEGIES
|
1153 |
+
):
|
1154 |
+
for p in model.parameters():
|
1155 |
+
# Params should always be on CPU
|
1156 |
+
self.assertEqual(p.device, torch.device("cpu"))
|
1157 |
+
|
1158 |
+
loss = model.module.get_loss(input, output).to(model_device)
|
1159 |
+
loss = sharded_grad_scaler.scale(loss)
|
1160 |
+
|
1161 |
+
if not mixed_precision and not use_pure_fp16:
|
1162 |
+
assert (
|
1163 |
+
loss.dtype == torch.float32
|
1164 |
+
), "loss data type should be float32, as the original \
|
1165 |
+
parameter data type is float32."
|
1166 |
+
else:
|
1167 |
+
if use_pure_fp16:
|
1168 |
+
self.assertEqual(loss.dtype, torch.float16)
|
1169 |
+
# FSDP loss is fp16, DDP AMP loss is fp32
|
1170 |
+
elif isinstance(model, FSDP):
|
1171 |
+
assert mixed_precision is not None # mypy
|
1172 |
+
self.assertEqual(loss.dtype, mixed_precision.param_dtype)
|
1173 |
+
else:
|
1174 |
+
self.assertEqual(loss.dtype, torch.float32)
|
1175 |
+
model.module.run_backward(loss)
|
1176 |
+
# Post-backward, if CPU offloading model params should be on CPU.
|
1177 |
+
if cpu_offload_params and isinstance(model, FSDP):
|
1178 |
+
for p in model.parameters():
|
1179 |
+
# Params should always be on CPU
|
1180 |
+
self.assertEqual(p.device, torch.device("cpu"))
|
1181 |
+
# Unscale the gradients and step
|
1182 |
+
sharded_grad_scaler.step(optim)
|
1183 |
+
# Update the scale factor
|
1184 |
+
sharded_grad_scaler.update()
|
1185 |
+
# if save_model, simulate save + load.
|
1186 |
+
if save_model:
|
1187 |
+
state_dict = {k: v.clone() for k, v in model.state_dict().items()}
|
1188 |
+
# Zero params, if save/load state_dict did not work properly, this
|
1189 |
+
# would break the parity test with DDP.
|
1190 |
+
_zero_model(model)
|
1191 |
+
model.load_state_dict(state_dict)
|
1192 |
+
|
1193 |
+
if isinstance(model, FSDP):
|
1194 |
+
model._assert_state(TrainingState.IDLE)
|
1195 |
+
return loss.detach() # type: ignore[possibly-undefined]
|
1196 |
+
|
1197 |
+
def _test_fsdp_parity(
|
1198 |
+
self,
|
1199 |
+
model_class: Type[FSDPTestModel],
|
1200 |
+
fsdp_init_mode: FSDPInitMode,
|
1201 |
+
cuda_init_mode: CUDAInitMode,
|
1202 |
+
ref_init_fn: Optional[Callable] = None,
|
1203 |
+
num_iters: int = 2,
|
1204 |
+
save_model: bool = True,
|
1205 |
+
cpu_offload: CPUOffload = CPUOffload(),
|
1206 |
+
backward_prefetch: Optional[BackwardPrefetch] = None,
|
1207 |
+
sharding_strategy: Optional[ShardingStrategy] = None,
|
1208 |
+
mixed_precision: Optional[MixedPrecision] = None,
|
1209 |
+
forward_prefetch: bool = False,
|
1210 |
+
use_orig_params: bool = False,
|
1211 |
+
enable_sharded_grad_scaler: bool = False,
|
1212 |
+
use_pure_fp16: bool = False,
|
1213 |
+
init_kwargs: Optional[Dict[str, Any]] = None,
|
1214 |
+
sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
|
1215 |
+
**fsdp_kwargs,
|
1216 |
+
):
|
1217 |
+
"""
|
1218 |
+
Tests FSDP training against a reference, which defaults to DDP but
|
1219 |
+
may be customized with ``ref_init_fn``.
|
1220 |
+
|
1221 |
+
Args:
|
1222 |
+
model_class (Type[FSDPTestModel]): A model class that inherits from
|
1223 |
+
``FSDPTestModel``, which defines the expected interface.
|
1224 |
+
fsdp_init_mode (FSDPInitMode): The mode to initialize the
|
1225 |
+
FSDP-wrapped model. This should not be ``NO_FSDP``.
|
1226 |
+
ref_init_fn (Optional[Callable]): A callable to invoke that wraps a
|
1227 |
+
non-wrapped model to construct the reference model, where this
|
1228 |
+
wrapper should provide data parallel semantics. If ``None``,
|
1229 |
+
then the callable defaults to the DDP constructor.
|
1230 |
+
"""
|
1231 |
+
assert (
|
1232 |
+
fsdp_init_mode != FSDPInitMode.NO_FSDP
|
1233 |
+
), "Expects an FSDP init mode that wraps with FSDP"
|
1234 |
+
if init_kwargs is None:
|
1235 |
+
init_kwargs = {}
|
1236 |
+
lr = 1e-2
|
1237 |
+
rank = self.process_group.rank()
|
1238 |
+
# Establish reference behavior with DDP
|
1239 |
+
model = model_class.init(
|
1240 |
+
self.process_group,
|
1241 |
+
FSDPInitMode.NO_FSDP,
|
1242 |
+
CUDAInitMode.CUDA_BEFORE,
|
1243 |
+
deterministic=True,
|
1244 |
+
**init_kwargs,
|
1245 |
+
)
|
1246 |
+
if ref_init_fn is None:
|
1247 |
+
ref_model = DDP(model, device_ids=[rank], output_device=rank)
|
1248 |
+
else:
|
1249 |
+
ref_model = ref_init_fn(model)
|
1250 |
+
if use_pure_fp16:
|
1251 |
+
ref_model = ref_model.half()
|
1252 |
+
ref_loss = self._train_for_several_steps(
|
1253 |
+
ref_model,
|
1254 |
+
num_iters,
|
1255 |
+
autocast=mixed_precision is not None,
|
1256 |
+
lr=lr,
|
1257 |
+
fsdp_cpu_offload=cpu_offload,
|
1258 |
+
mixed_precision=mixed_precision,
|
1259 |
+
enable_sharded_grad_scaler=enable_sharded_grad_scaler,
|
1260 |
+
use_pure_fp16=use_pure_fp16,
|
1261 |
+
sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
|
1262 |
+
)
|
1263 |
+
ddp_params = list(ref_model.parameters())
|
1264 |
+
# Check against FSDP behavior
|
1265 |
+
fsdp_kwargs.update(
|
1266 |
+
{
|
1267 |
+
"cpu_offload": cpu_offload,
|
1268 |
+
"backward_prefetch": backward_prefetch,
|
1269 |
+
"sharding_strategy": sharding_strategy,
|
1270 |
+
"mixed_precision": mixed_precision,
|
1271 |
+
"forward_prefetch": forward_prefetch,
|
1272 |
+
"use_orig_params": use_orig_params,
|
1273 |
+
}
|
1274 |
+
)
|
1275 |
+
try:
|
1276 |
+
fsdp_model = model_class.init(
|
1277 |
+
self.process_group,
|
1278 |
+
fsdp_init_mode,
|
1279 |
+
cuda_init_mode,
|
1280 |
+
fsdp_kwargs,
|
1281 |
+
deterministic=True,
|
1282 |
+
**init_kwargs,
|
1283 |
+
)
|
1284 |
+
except Exception as e:
|
1285 |
+
raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e
|
1286 |
+
if not isinstance(fsdp_model, FSDP):
|
1287 |
+
# Enforce that we wrap with top-level FSDP since we are comparing
|
1288 |
+
# assuming a data parallel reference and some test models may not
|
1289 |
+
# do so in their `init()` method
|
1290 |
+
fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs)
|
1291 |
+
if use_pure_fp16:
|
1292 |
+
# Change the model parameter dtype after FSDP initialization
|
1293 |
+
fsdp_model = fsdp_model.half()
|
1294 |
+
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
|
1295 |
+
fsdp_model = fsdp_model.cuda()
|
1296 |
+
offload_params = cpu_offload is not None and cpu_offload.offload_params
|
1297 |
+
# Offloading parameters with `CUDA_AFTER` should raise an error during
|
1298 |
+
# lazy initialization due to the parameter devices not being CPU;
|
1299 |
+
# otherwise, all parameter devices should be CPU
|
1300 |
+
expects_device_error = (
|
1301 |
+
offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER
|
1302 |
+
)
|
1303 |
+
expects_cpu_device = (
|
1304 |
+
offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER
|
1305 |
+
)
|
1306 |
+
if expects_cpu_device:
|
1307 |
+
cpu_device = torch.device("cpu")
|
1308 |
+
for param in fsdp_model.parameters():
|
1309 |
+
self.assertEqual(param.device, cpu_device)
|
1310 |
+
context = (
|
1311 |
+
self.assertRaisesRegex(
|
1312 |
+
RuntimeError,
|
1313 |
+
"An FSDP-managed module with parameter CPU offloading enabled "
|
1314 |
+
"has parameters on cuda",
|
1315 |
+
)
|
1316 |
+
if expects_device_error
|
1317 |
+
else nullcontext()
|
1318 |
+
)
|
1319 |
+
with context:
|
1320 |
+
fsdp_loss = self._train_for_several_steps(
|
1321 |
+
fsdp_model,
|
1322 |
+
num_iters,
|
1323 |
+
autocast=False,
|
1324 |
+
lr=lr,
|
1325 |
+
fsdp_cpu_offload=cpu_offload,
|
1326 |
+
save_model=save_model,
|
1327 |
+
mixed_precision=mixed_precision,
|
1328 |
+
enable_sharded_grad_scaler=enable_sharded_grad_scaler,
|
1329 |
+
use_pure_fp16=use_pure_fp16,
|
1330 |
+
sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
|
1331 |
+
)
|
1332 |
+
# No need to check for parameter and loss parity if expecting an error
|
1333 |
+
if expects_device_error:
|
1334 |
+
return
|
1335 |
+
# Check parameter devices are CPU if offloading to CPU before calling
|
1336 |
+
# `get_full_params()`, which will cast the parameters to FP32
|
1337 |
+
if offload_params:
|
1338 |
+
cpu_device = torch.device("cpu")
|
1339 |
+
for param in fsdp_model.parameters():
|
1340 |
+
self.assertEqual(param.device, cpu_device)
|
1341 |
+
fsdp_loss = fsdp_loss.cuda()
|
1342 |
+
fsdp_unsharded_params = get_full_params(fsdp_model)
|
1343 |
+
# Do not check dtype since the reference DDP loss may not be the same
|
1344 |
+
# dtype as the FSDP loss in the case of mixed precision
|
1345 |
+
torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False)
|
1346 |
+
# Do not check for parameter parity if using mixed precision since (1)
|
1347 |
+
# the DDP parameters are in FP16 (from `half()`) while the FSDP
|
1348 |
+
# parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs
|
1349 |
+
# the optimizer in FP16 while FSDP runs it in FP32
|
1350 |
+
# TODO: Disable checking the parameters for pure FP16 due to floating
|
1351 |
+
# point inaccuracy. Note that this means that the backward pass is not
|
1352 |
+
# checked: https://github.com/pytorch/pytorch/issues/90784
|
1353 |
+
if mixed_precision is None and not use_pure_fp16:
|
1354 |
+
self.assertEqual(
|
1355 |
+
ddp_params,
|
1356 |
+
fsdp_unsharded_params,
|
1357 |
+
exact_device=True,
|
1358 |
+
msg="FSDP did not match DDP",
|
1359 |
+
)
|
1360 |
+
|
1361 |
+
|
1362 |
+
def test_compiled_fsdp(compile_compute_on_module: Optional[type] = None):
|
1363 |
+
def fully_shard_with_compiled_compute(*args, **kwargs):
|
1364 |
+
# compile ``module._call_impl``
|
1365 |
+
# to showcase how to include user-registered hooks
|
1366 |
+
if compile_compute_on_module is None or isinstance(
|
1367 |
+
args[0], compile_compute_on_module
|
1368 |
+
):
|
1369 |
+
args[0].compile()
|
1370 |
+
return torch.distributed._composable.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator]
|
1371 |
+
|
1372 |
+
class FullyShardPatch(Enum):
|
1373 |
+
# apply ``partial`` in order to use ``Enum.value``
|
1374 |
+
EAGER = partial(torch.distributed._composable.fsdp.fully_shard) # type: ignore[var-annotated, arg-type]
|
1375 |
+
COMPILED_COMPUTE = partial(fully_shard_with_compiled_compute) # type: ignore[arg-type]
|
1376 |
+
# add FULL for tracing FSDP
|
1377 |
+
|
1378 |
+
def decorator(func):
|
1379 |
+
@wraps(func)
|
1380 |
+
def wrapper(*args, **kwargs):
|
1381 |
+
original_fully_shard = torch.distributed._composable.fsdp.fully_shard
|
1382 |
+
for fully_shard_patch in FullyShardPatch:
|
1383 |
+
if fully_shard_patch != FullyShardPatch.EAGER and not has_triton():
|
1384 |
+
warnings.warn("Inductor on GPU needs Triton and recent GPU arch")
|
1385 |
+
continue
|
1386 |
+
imported_fully_shard = (
|
1387 |
+
f"{func.__module__}.{original_fully_shard.__name__}"
|
1388 |
+
)
|
1389 |
+
with mock.patch(
|
1390 |
+
imported_fully_shard,
|
1391 |
+
fully_shard_patch.value,
|
1392 |
+
):
|
1393 |
+
func(*args, **kwargs)
|
1394 |
+
torch.distributed.barrier()
|
1395 |
+
# mock.patch.__exit__ does not work with multi-thread
|
1396 |
+
# thread 1 set {func.__module__}.fully_shard
|
1397 |
+
# thread 2 read {func.__module__}.fully_shard and thought it is original
|
1398 |
+
# hence we manually reset them after __exit__
|
1399 |
+
import_path, _ = mock._get_target(imported_fully_shard) # type: ignore[attr-defined]
|
1400 |
+
setattr(
|
1401 |
+
import_path(), original_fully_shard.__name__, original_fully_shard
|
1402 |
+
)
|
1403 |
+
|
1404 |
+
return wrapper
|
1405 |
+
|
1406 |
+
return decorator
|
1407 |
+
|
1408 |
+
|
1409 |
+
class SkipModule(nn.Module):
|
1410 |
+
def __init__(self):
|
1411 |
+
super().__init__()
|
1412 |
+
self.lin = nn.Linear(10, 10, bias=False)
|
1413 |
+
|
1414 |
+
def forward(self, x):
|
1415 |
+
return self.lin(x)
|
1416 |
+
|
1417 |
+
|
1418 |
+
class NestedLinear(nn.Module):
|
1419 |
+
def __init__(self, fsdp_wrap):
|
1420 |
+
super().__init__()
|
1421 |
+
if fsdp_wrap:
|
1422 |
+
self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda())
|
1423 |
+
else:
|
1424 |
+
self.nested_linear = nn.Linear(10, 10, bias=False).cuda()
|
1425 |
+
|
1426 |
+
def forward(self, x):
|
1427 |
+
return self.nested_linear(x)
|
1428 |
+
|
1429 |
+
|
1430 |
+
class SkipModel(nn.Module):
|
1431 |
+
def __init__(self, double_nest):
|
1432 |
+
super().__init__()
|
1433 |
+
self.linear = nn.Linear(10, 10, bias=False).cuda()
|
1434 |
+
self.linear_skip = SkipModule().cuda()
|
1435 |
+
self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest))
|
1436 |
+
|
1437 |
+
def forward(self, x):
|
1438 |
+
x = self.linear(x)
|
1439 |
+
x = self.linear_skip(x)
|
1440 |
+
x = self.nested_linear(x)
|
1441 |
+
return x
|
venv/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: ignore-errors
|
2 |
+
|
3 |
+
# Torch
|
4 |
+
import torch
|
5 |
+
import torch.cuda
|
6 |
+
import torch.jit
|
7 |
+
import torch.jit._logging
|
8 |
+
import torch.jit.frontend
|
9 |
+
import torch.jit.quantized
|
10 |
+
|
11 |
+
# Testing utils
|
12 |
+
from torch.testing._internal.common_dtype import floating_and_complex_types_and
|
13 |
+
from torch.testing._internal.common_utils import TestCase, \
|
14 |
+
freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors
|
15 |
+
from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
|
16 |
+
|
17 |
+
# Standard library
|
18 |
+
from itertools import chain
|
19 |
+
from typing import List, Union
|
20 |
+
from torch._C import TensorType
|
21 |
+
|
22 |
+
import io
|
23 |
+
|
24 |
+
def check_output_types(self, func, ref_outputs, args, kwargs):
|
25 |
+
graph = getattr(func, 'last_graph', None)
|
26 |
+
types = [o.type() for o in graph.outputs()]
|
27 |
+
self.assertTrue(len(types) == 1)
|
28 |
+
t = types[0]
|
29 |
+
torch._C._jit_assert_is_instance(ref_outputs, t)
|
30 |
+
|
31 |
+
# Test names in this set are only checked for a single derivative
|
32 |
+
nn_functional_single_grad = frozenset('test_nn_' + name for name in [
|
33 |
+
'pdist',
|
34 |
+
'multilabel_margin_loss',
|
35 |
+
'max_unpool3d',
|
36 |
+
'multi_margin_loss',
|
37 |
+
'binary_cross_entropy',
|
38 |
+
'binary_cross_entropy_size_average',
|
39 |
+
'ctc_loss',
|
40 |
+
'grid_sample',
|
41 |
+
])
|
42 |
+
|
43 |
+
def check_against_reference(self, func, reference_func, output_func, args, kwargs=None,
|
44 |
+
allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False):
|
45 |
+
"""Verifies a function performs identically to some reference implementation.
|
46 |
+
|
47 |
+
Commonly, this is used to verify that a JIT implementation
|
48 |
+
(output_func) matches the behavior of the eager implementation
|
49 |
+
(reference_func).
|
50 |
+
"""
|
51 |
+
kwargs = kwargs if kwargs else {}
|
52 |
+
|
53 |
+
def allSum(vs):
|
54 |
+
if isinstance(vs, torch.Tensor):
|
55 |
+
vs = (vs,)
|
56 |
+
return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum()
|
57 |
+
for i, v in enumerate(vs)
|
58 |
+
if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16))
|
59 |
+
|
60 |
+
def clone_tensor(t, preserve_requires_grad):
|
61 |
+
require_grad = preserve_requires_grad and t.requires_grad
|
62 |
+
return t.detach().clone().requires_grad_(require_grad)
|
63 |
+
|
64 |
+
def clone_inputs(preserve_requires_grad: bool):
|
65 |
+
inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
|
66 |
+
|
67 |
+
for arg in args:
|
68 |
+
if isinstance(arg, torch.Tensor):
|
69 |
+
inputs.append(clone_tensor(arg, preserve_requires_grad))
|
70 |
+
elif is_iterable_of_tensors(arg):
|
71 |
+
inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg])
|
72 |
+
else:
|
73 |
+
inputs.append(arg)
|
74 |
+
|
75 |
+
return inputs
|
76 |
+
|
77 |
+
# Returns tensors in args that requires_grad, including tensors in TensorList args
|
78 |
+
def get_recording_tensors(args):
|
79 |
+
recording_tensors: List[torch.Tensor] = []
|
80 |
+
|
81 |
+
for arg in args:
|
82 |
+
if isinstance(arg, torch.Tensor) and arg.requires_grad:
|
83 |
+
recording_tensors.append(arg)
|
84 |
+
elif is_iterable_of_tensors(arg):
|
85 |
+
recording_tensors.extend(filter(lambda t: t.requires_grad, arg))
|
86 |
+
|
87 |
+
return recording_tensors
|
88 |
+
|
89 |
+
# test no gradients case
|
90 |
+
nograd_inputs = clone_inputs(preserve_requires_grad=False)
|
91 |
+
outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs)
|
92 |
+
with enable_profiling_mode_for_profiling_tests():
|
93 |
+
outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs)
|
94 |
+
self.assertEqual(outputs, outputs_test)
|
95 |
+
|
96 |
+
if check_types:
|
97 |
+
check_output_types(self, func, outputs_test, nograd_inputs, kwargs)
|
98 |
+
|
99 |
+
if no_grad:
|
100 |
+
# skip grad tests
|
101 |
+
return
|
102 |
+
|
103 |
+
with enable_profiling_mode_for_profiling_tests():
|
104 |
+
# test single grad case
|
105 |
+
recording_inputs = clone_inputs(preserve_requires_grad=True)
|
106 |
+
recording_tensors = get_recording_tensors(recording_inputs)
|
107 |
+
outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
|
108 |
+
grads = torch.autograd.grad(allSum(outputs), recording_tensors,
|
109 |
+
allow_unused=allow_unused)
|
110 |
+
outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
|
111 |
+
grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors,
|
112 |
+
allow_unused=allow_unused)
|
113 |
+
self.assertEqual(outputs, outputs_test)
|
114 |
+
self.assertEqual(grads, grads_test)
|
115 |
+
# test the grad grad case
|
116 |
+
if self._testMethodName in nn_functional_single_grad or no_gradgrad:
|
117 |
+
return
|
118 |
+
|
119 |
+
outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
|
120 |
+
l1 = allSum(outputs)
|
121 |
+
grads = torch.autograd.grad(l1, recording_tensors, create_graph=True,
|
122 |
+
allow_unused=allow_unused)
|
123 |
+
|
124 |
+
l2 = (allSum(grads) * l1)
|
125 |
+
grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused)
|
126 |
+
recording_inputs = clone_inputs(preserve_requires_grad=True)
|
127 |
+
recording_tensors = get_recording_tensors(recording_inputs)
|
128 |
+
outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
|
129 |
+
l1_test = allSum(outputs_test)
|
130 |
+
grads_test = torch.autograd.grad(
|
131 |
+
l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused)
|
132 |
+
|
133 |
+
l2_test = (allSum(grads_test) * l1_test)
|
134 |
+
grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused)
|
135 |
+
|
136 |
+
self.assertEqual(outputs, outputs_test)
|
137 |
+
self.assertEqual(grads, grads_test)
|
138 |
+
for g2, g2_test in zip(grads2, grads2_test):
|
139 |
+
if g2 is None and g2_test is None:
|
140 |
+
continue
|
141 |
+
self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4)
|
142 |
+
|
143 |
+
class JitCommonTestCase(TestCase):
|
144 |
+
def createFunctionFromGraph(self, trace):
|
145 |
+
graph = trace if isinstance(trace, torch._C.Graph) else trace.graph()
|
146 |
+
return torch._C._create_function_from_graph("forward", graph)
|
147 |
+
|
148 |
+
def assertExportImport(self, trace, inputs):
|
149 |
+
m = self.createFunctionFromGraph(trace)
|
150 |
+
self.assertExportImportModule(m, inputs)
|
151 |
+
|
152 |
+
def assertExportImportModule(self, m, inputs):
|
153 |
+
m_import = self.getExportImportCopy(m)
|
154 |
+
a = self.runAndSaveRNG(m, inputs)
|
155 |
+
b = self.runAndSaveRNG(m_import, inputs)
|
156 |
+
self.assertEqual(a, b, "Results of original model and "
|
157 |
+
"exported/imported version of model differed")
|
158 |
+
|
159 |
+
def runAndSaveRNG(self, func, inputs, kwargs=None):
|
160 |
+
kwargs = kwargs if kwargs else {}
|
161 |
+
with freeze_rng_state():
|
162 |
+
results = func(*inputs, **kwargs)
|
163 |
+
return results
|
164 |
+
|
165 |
+
def getExportImportCopy(self, m, also_test_file=True, map_location=None):
|
166 |
+
buffer = io.BytesIO()
|
167 |
+
torch.jit.save(m, buffer)
|
168 |
+
buffer.seek(0)
|
169 |
+
imported = torch.jit.load(buffer, map_location=map_location)
|
170 |
+
|
171 |
+
if not also_test_file:
|
172 |
+
return imported
|
173 |
+
|
174 |
+
with TemporaryFileName() as fname:
|
175 |
+
torch.jit.save(imported, fname)
|
176 |
+
return torch.jit.load(fname, map_location=map_location)
|
177 |
+
|
178 |
+
def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph,
|
179 |
+
fusion_nodes_not_found, non_fusible_nodes_being_fused,
|
180 |
+
fusion_nodes_found, nodes_in_diff_graph):
|
181 |
+
err_msg = "\nFailure in testing nodes' autodifferentiation. "
|
182 |
+
if should_autodiff_node:
|
183 |
+
err_msg += "One or more nodes were expected to be autodiffed, " \
|
184 |
+
"but were not found in specified fusible/nonfusible " \
|
185 |
+
"DifferentiableGraph groups. \nSpecifically:"
|
186 |
+
# The node is intended to appear in a differentiable graph but doesn't
|
187 |
+
diff_nodes_missing = []
|
188 |
+
# The node is intended to appear in a differentiable graph
|
189 |
+
# outside of a fusion group but instead is in a fusion group
|
190 |
+
diff_nodes_in_fusion = []
|
191 |
+
# The node is intended to appear in a fusion group but doesn't
|
192 |
+
fusion_nodes_missing = []
|
193 |
+
# The node is intended to appear in a fusion group but instead
|
194 |
+
# is just in an outer differentiable graph
|
195 |
+
fusion_nodes_in_diff = []
|
196 |
+
for node in nodes_not_in_diff_graph:
|
197 |
+
if node in non_fusible_nodes_being_fused:
|
198 |
+
diff_nodes_in_fusion.append(node)
|
199 |
+
else:
|
200 |
+
diff_nodes_missing.append(node)
|
201 |
+
for node in fusion_nodes_not_found:
|
202 |
+
if node in nodes_in_diff_graph:
|
203 |
+
fusion_nodes_in_diff.append(node)
|
204 |
+
else:
|
205 |
+
fusion_nodes_missing.append(node)
|
206 |
+
if len(diff_nodes_missing) > 0:
|
207 |
+
err_msg += f"\n {diff_nodes_missing} were not in one of the " \
|
208 |
+
"DifferentiableGraphs when they were expected to be. " \
|
209 |
+
"Did you intend for these nodes to be autodiffed? " \
|
210 |
+
"If not, remove them from the list of nonfusible nodes."
|
211 |
+
if len(diff_nodes_in_fusion) > 0:
|
212 |
+
err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \
|
213 |
+
"when they were expected to be just in a DifferentiableGraph. If it was " \
|
214 |
+
"intended for these nodes to be in FusionGroups, reclassify these nodes as " \
|
215 |
+
"fusible nodes. If these nodes were not intended to be fused, your " \
|
216 |
+
"autodifferentiation logic might be wrong."
|
217 |
+
if len(fusion_nodes_missing) > 0:
|
218 |
+
err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \
|
219 |
+
"of the DifferentiableGraphs when they were expected to be. " \
|
220 |
+
"They were also not found in an outer DifferentiableGraph. Did you " \
|
221 |
+
"intend for these nodes to be autodifferentiated? If not, you should " \
|
222 |
+
"remove these nodes from the test's fusible nodes. Otherwise your " \
|
223 |
+
"autodifferentiation logic might be wrong."
|
224 |
+
if len(fusion_nodes_in_diff) > 0:
|
225 |
+
err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \
|
226 |
+
"of the DifferentiableGraphs when they were expected to be, " \
|
227 |
+
"instead they were found just in an outer DifferentiableGraph. " \
|
228 |
+
"Did you intend for these nodes to be fused? If not, you should " \
|
229 |
+
"move these nodes into the test's nonfusible nodes. Otherwise your " \
|
230 |
+
"autodifferentiation logic might be wrong."
|
231 |
+
else:
|
232 |
+
err_msg += "One or more nodes were not expected to be autodiffed " \
|
233 |
+
"but were found in a DifferentiableGraph or in a FusionGroup " \
|
234 |
+
"of a DifferentiableGraph. Did you intend for these nodes to be " \
|
235 |
+
"autodiffed? If so, change this test to expect autodifferentiation. " \
|
236 |
+
"\nSpecifically:"
|
237 |
+
if len(fusion_nodes_found) > 0:
|
238 |
+
err_msg += f"\n {fusion_nodes_found} were not expected to be in " \
|
239 |
+
"one of the DifferentiableGraphs, but appeared in a FusionGroup " \
|
240 |
+
"of a DifferentiableGraph. "
|
241 |
+
if len(nodes_in_diff_graph) > 0:
|
242 |
+
err_msg += f"\n {nodes_in_diff_graph} were not expected to " \
|
243 |
+
"be in one of the DifferentiableGraphs but were."
|
244 |
+
return err_msg
|
245 |
+
|
246 |
+
def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes):
|
247 |
+
diff_nodes = graph.findAllNodes('prim::DifferentiableGraph')
|
248 |
+
diff_subgraphs = [node.g('Subgraph') for node in diff_nodes]
|
249 |
+
|
250 |
+
# Note: currently no tests have fusible_nodes
|
251 |
+
fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs]))
|
252 |
+
fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes]
|
253 |
+
|
254 |
+
# For any non-fusible node, it must show up in one of the DifferentiableGraphs.
|
255 |
+
nodes_in_diff_graph = []
|
256 |
+
nodes_not_in_diff_graph = []
|
257 |
+
non_fusible_nodes_being_fused = []
|
258 |
+
for node in nonfusible_nodes:
|
259 |
+
if any(g.findNode(node) is not None for g in diff_subgraphs):
|
260 |
+
nodes_in_diff_graph.append(node)
|
261 |
+
else:
|
262 |
+
nodes_not_in_diff_graph.append(node)
|
263 |
+
if any(g.findNode(node) is not None for g in fusion_subgraphs):
|
264 |
+
non_fusible_nodes_being_fused.append(node)
|
265 |
+
found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes)
|
266 |
+
|
267 |
+
# For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs.
|
268 |
+
fusion_nodes_found = []
|
269 |
+
fusion_nodes_not_found = []
|
270 |
+
for node in fusible_nodes:
|
271 |
+
if any(g.findNode(node) is not None for g in fusion_subgraphs):
|
272 |
+
fusion_nodes_found.append(node)
|
273 |
+
else:
|
274 |
+
fusion_nodes_not_found.append(node)
|
275 |
+
found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes)
|
276 |
+
|
277 |
+
if should_autodiff_node is not None:
|
278 |
+
err_msg = self.autoDiffErrorMessage(should_autodiff_node,
|
279 |
+
nodes_not_in_diff_graph,
|
280 |
+
fusion_nodes_not_found,
|
281 |
+
non_fusible_nodes_being_fused,
|
282 |
+
fusion_nodes_found,
|
283 |
+
nodes_in_diff_graph)
|
284 |
+
self.assertEqual(should_autodiff_node,
|
285 |
+
found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg)
|
286 |
+
|
287 |
+
def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]],
|
288 |
+
traced_graph, assert_propagation, constant_prop=True):
|
289 |
+
# repropagte input shapes provided by tracing,
|
290 |
+
prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
|
291 |
+
for enable_test_mode in [True, False]:
|
292 |
+
# here we are testing allowing/disallowing substituting in complete shapes as constants,
|
293 |
+
# disallowing constants helps stress test partial eval and substitution pipeline
|
294 |
+
torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode)
|
295 |
+
torch._C._jit_erase_non_input_shape_information(traced_graph)
|
296 |
+
if constant_prop:
|
297 |
+
torch._C._jit_pass_constant_propagation(traced_graph)
|
298 |
+
torch._C._jit_pass_propagate_shapes_on_graph(traced_graph)
|
299 |
+
# Add sizes to default tensor type to avoid checking something out of scope
|
300 |
+
# and difficulties with tracer leaving in other parts of tensor type
|
301 |
+
output = next(traced_graph.outputs()).type()
|
302 |
+
|
303 |
+
def test_type(type, actual_size):
|
304 |
+
sizes = type.symbolic_sizes()
|
305 |
+
out_type = TensorType.get().with_sizes(sizes)
|
306 |
+
actual_type = TensorType.get().with_sizes(actual_size)
|
307 |
+
|
308 |
+
# always check actual shape is a subtype of the output
|
309 |
+
self.assertTrue(actual_type.isSubtypeOf(out_type))
|
310 |
+
|
311 |
+
# and then if assertion flag is provided, check shape analysis
|
312 |
+
# is successful
|
313 |
+
if assert_propagation:
|
314 |
+
self.assertEqual(out_type.sizes(), actual_size)
|
315 |
+
|
316 |
+
if output.isSubtypeOf(torch._C.TensorType.get()):
|
317 |
+
test_type(output, out_sizes)
|
318 |
+
else:
|
319 |
+
tuple_elements = output.elements()
|
320 |
+
for i in range(len(tuple_elements)):
|
321 |
+
test_type(tuple_elements[i], out_sizes[i])
|
322 |
+
|
323 |
+
torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled)
|