applied-ai-018 commited on
Commit
ff8ddfa
·
verified ·
1 Parent(s): 81a734d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py +8 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py +9 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py +96 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py +134 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py +64 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py +40 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py +358 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py +42 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py +111 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py +733 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py +64 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py +144 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py +473 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py +0 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py +733 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py +147 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__init__.py +0 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__pycache__/__init__.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc ADDED
Binary file (594 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+ class Net(nn.Module):
5
+
6
+ def __init__(self):
7
+ super().__init__()
8
+ self.linear = nn.Linear(10, 20)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+ class Net(nn.Module):
5
+
6
+ def __init__(self):
7
+ super().__init__()
8
+ self.linear = nn.Linear(10, 20)
9
+ self.relu = nn.ReLU()
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc ADDED
Binary file (254 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc ADDED
Binary file (4.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (207 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from functools import wraps, partial
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ from torch.distributed import rpc
7
+ from torch.testing._internal.common_distributed import (
8
+ MultiProcessTestCase,
9
+ TEST_SKIPS,
10
+ tp_transports,
11
+ )
12
+
13
+ TEST_GPU_NUM = 4
14
+
15
+ class ShardedTensorTestBase(MultiProcessTestCase):
16
+ @property
17
+ def world_size(self):
18
+ return TEST_GPU_NUM
19
+
20
+ def init_pg(self, backend="nccl"):
21
+ if backend not in ["nccl", "gloo", "mpi"]:
22
+ raise RuntimeError(f"Backend {backend} not supported!")
23
+
24
+ dist.init_process_group(
25
+ backend=backend,
26
+ world_size=self.world_size,
27
+ rank=self.rank,
28
+ init_method=f"file://{self.file_name}",
29
+ )
30
+
31
+ # set device for nccl pg for collectives
32
+ if backend == "nccl":
33
+ torch.cuda.set_device(self.rank)
34
+
35
+
36
+ def init_rpc(self):
37
+ rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
38
+ rpc_backend_options.init_method = f"file://{self.file_name}"
39
+ for rank in range(self.world_size):
40
+ rpc_backend_options.set_device_map(
41
+ f"worker{rank}", {rank: self.rank, self.rank: rank}
42
+ )
43
+
44
+ rpc.init_rpc(
45
+ name="worker%d" % self.rank,
46
+ rank=self.rank,
47
+ world_size=self.world_size,
48
+ rpc_backend_options=rpc_backend_options,
49
+ )
50
+
51
+ def init_comms(self, init_rpc=True, backend="nccl"):
52
+ if init_rpc:
53
+ self.init_rpc()
54
+ self.init_pg(backend=backend)
55
+
56
+ def destroy_comms(self, destroy_rpc=True):
57
+ # Wait for all ranks to reach here before starting shutdown.
58
+ dist.barrier()
59
+
60
+ if destroy_rpc:
61
+ rpc.shutdown()
62
+ dist.destroy_process_group()
63
+
64
+ def setUp(self) -> None:
65
+ super().setUp()
66
+ self._spawn_processes()
67
+
68
+ def assert_sharded_tensor_equal(self, st1, st2):
69
+ st1_local_shards = st1.local_shards()
70
+ st2_local_shards = st2.local_shards()
71
+ self.assertEqual(len(st1_local_shards), len(st2_local_shards))
72
+ for i, st1_local_shard in enumerate(st1_local_shards):
73
+ self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor)
74
+ self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata)
75
+
76
+ self.assertEqual(st1.metadata(), st2.metadata())
77
+ self.assertEqual(st1.sharding_spec(), st2.sharding_spec())
78
+ self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards()))
79
+
80
+ # wrapper to initialize comms (processgroup + rpc)
81
+ def with_comms(func=None, init_rpc=True, backend="nccl"):
82
+ if func is None:
83
+ return partial(
84
+ with_comms,
85
+ init_rpc=init_rpc,
86
+ backend=backend,
87
+ )
88
+
89
+ @wraps(func)
90
+ def wrapper(self, *args, **kwargs):
91
+ if backend == "nccl" and torch.cuda.device_count() < self.world_size:
92
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
93
+ self.init_comms(init_rpc=init_rpc, backend=backend)
94
+ func(self, *args, **kwargs)
95
+ self.destroy_comms(destroy_rpc=init_rpc)
96
+ return wrapper
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+
3
+ import torch
4
+ from torch.distributed._shard.sharding_spec import (
5
+ ChunkShardingSpec,
6
+ EnumerableShardingSpec,
7
+ ShardMetadata,
8
+ )
9
+ from torch.distributed._shard.sharding_spec._internals import (
10
+ get_chunked_dim_size,
11
+ get_split_size,
12
+ )
13
+
14
+
15
+ def generate_chunk_sharding_specs_for_test(sharding_dim):
16
+ return [
17
+ ChunkShardingSpec(
18
+ dim=sharding_dim,
19
+ placements=[
20
+ "rank:0/cuda:0",
21
+ "rank:1/cuda:1",
22
+ "rank:2/cuda:2",
23
+ "rank:3/cuda:3",
24
+ ],
25
+ ),
26
+ # Test different ordering. (Case 1)
27
+ ChunkShardingSpec(
28
+ dim=sharding_dim,
29
+ placements=[
30
+ "rank:2/cuda:2",
31
+ "rank:3/cuda:3",
32
+ "rank:0/cuda:0",
33
+ "rank:1/cuda:1",
34
+ ],
35
+ ),
36
+ # Test different ordering. (Case 2)
37
+ ChunkShardingSpec(
38
+ dim=sharding_dim,
39
+ placements=[
40
+ "rank:3/cuda:3",
41
+ "rank:0/cuda:0",
42
+ "rank:1/cuda:1",
43
+ "rank:2/cuda:2",
44
+ ],
45
+ ),
46
+ ]
47
+
48
+
49
+ def generate_enumerable_sharding_specs_for_test():
50
+ return [
51
+ EnumerableShardingSpec(
52
+ [
53
+ ShardMetadata(
54
+ shard_offsets=[0, 0],
55
+ shard_sizes=[5, 5],
56
+ placement="rank:0/cuda:0",
57
+ ),
58
+ ShardMetadata(
59
+ shard_offsets=[5, 0],
60
+ shard_sizes=[5, 5],
61
+ placement="rank:1/cuda:1",
62
+ ),
63
+ ShardMetadata(
64
+ shard_offsets=[0, 5],
65
+ shard_sizes=[5, 5],
66
+ placement="rank:2/cuda:2",
67
+ ),
68
+ ShardMetadata(
69
+ shard_offsets=[5, 5],
70
+ shard_sizes=[5, 5],
71
+ placement="rank:3/cuda:3",
72
+ ),
73
+ ]
74
+ )
75
+ ]
76
+
77
+
78
+ def generate_local_weight_sharding_params_for_test(
79
+ local_weight, sharded_dim, gpu_num, spec, rank
80
+ ):
81
+ """
82
+ Shard the local weight based the given spec, so we can compare against
83
+ the one from sharded tensor.
84
+
85
+ Args:
86
+ local_weight: weight matrix to be sharded.
87
+ sharded_dim: The dimension which we shard on.
88
+ gpu_num: number of ranks.
89
+ spec: sharding spec.
90
+ rank: # of cuda process.
91
+
92
+ Returns:
93
+ start_pos: start position of sharded weight on the given rank.
94
+ chunk_size: chunk size of sharded weight on the given rank.
95
+ """
96
+ sharding_dim_size = local_weight.size(sharded_dim)
97
+ split_size = get_split_size(sharding_dim_size, gpu_num)
98
+ current_offsets = 0
99
+ start_pos = current_offsets
100
+ for idx, placement in enumerate(spec.placements):
101
+ chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
102
+ if rank == placement.rank():
103
+ start_pos = current_offsets
104
+ break
105
+ current_offsets += chunk_size
106
+ return start_pos, chunk_size
107
+
108
+
109
+ def clone_module_parameter(module, param_name):
110
+ """
111
+ Clone a parameter from a given existing module.
112
+
113
+ Args:
114
+ module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned.
115
+ param_name (str): Name of the parameter of ``module`` that needs to be cloned.
116
+
117
+ Returns: cloned tensor as :class:`torch.nn.Parameter`.
118
+ """
119
+ tensor = getattr(module, param_name)
120
+ return torch.nn.Parameter(tensor.detach().clone())
121
+
122
+ def gen_binary_op_func(python_op, inplace=False):
123
+ src_lines = ['def f(lhs, rhs):']
124
+ if "torch" in python_op:
125
+ src_lines.append(f' return {python_op}(lhs, rhs)\n')
126
+ elif inplace:
127
+ src_lines.append(f' lhs {python_op}= rhs\n return lhs\n')
128
+ else:
129
+ src_lines.append(f' return lhs {python_op} rhs\n')
130
+
131
+ code_str = '\n'.join(src_lines)
132
+ g = {'torch': torch}
133
+ builtins.exec(code_str, g)
134
+ return g["f"]
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import random
3
+ import torch
4
+ from torch.distributed._shard import sharded_tensor
5
+
6
+ from torch.distributed._shard.sharding_spec import (
7
+ ChunkShardingSpec,
8
+ )
9
+
10
+ PLACEMENTS = [
11
+ "rank:0/cuda:0",
12
+ "rank:1/cuda:1",
13
+ "rank:2/cuda:2",
14
+ "rank:3/cuda:3",
15
+ ]
16
+
17
+ DEFAULT_GPU_NUM = 4
18
+
19
+
20
+ def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0):
21
+ spec_list = []
22
+ for i in range(len(sharding_dims)):
23
+ random.Random(seed + i).shuffle(PLACEMENTS)
24
+ spec_list.append(
25
+ ChunkShardingSpec(
26
+ dim=sharding_dims[i],
27
+ placements=copy.deepcopy(PLACEMENTS),
28
+ )
29
+ )
30
+ return spec_list
31
+
32
+ class MyShardedModel2(torch.nn.Module):
33
+ def __init__(
34
+ self,
35
+ spec=None,
36
+ group=None,
37
+ init_rrefs=True
38
+ ) -> None:
39
+ super().__init__()
40
+ if spec is not None:
41
+ self.sharded_tensor2 = sharded_tensor.rand(
42
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
43
+ )
44
+ else:
45
+ self.sharded_tensor2 = None
46
+ self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
47
+
48
+
49
+ class MyShardedModel1(torch.nn.Module):
50
+ def __init__(
51
+ self,
52
+ spec=None,
53
+ group=None,
54
+ init_rrefs=True
55
+ ) -> None:
56
+ super().__init__()
57
+ if spec is not None:
58
+ self.sharded_tensor1 = sharded_tensor.rand(
59
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
60
+ )
61
+ else:
62
+ self.sharded_tensor1 = None
63
+ self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
64
+ self.submodule = MyShardedModel2(spec, group, init_rrefs)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
5
+
6
+
7
+ class SimpleMegatronLM(nn.Module):
8
+ def __init__(self, linear_size, rank=None, dtype=torch.float32):
9
+ super().__init__()
10
+ self.fc1 = nn.Linear(*linear_size[0], dtype=dtype)
11
+ self.gelu = nn.GELU()
12
+ self.fc2 = nn.Linear(*linear_size[1], dtype=dtype)
13
+ if rank is not None:
14
+ self.fc1.cuda(rank)
15
+ self.fc2.cuda(rank)
16
+
17
+ def forward(self, inp):
18
+ return self.fc2(self.gelu(self.fc1(inp)))
19
+
20
+ def get_weights(self):
21
+ if isinstance(self.fc1.weight, ShardedTensor):
22
+ weight1 = self.fc1.weight.local_tensor()
23
+ else:
24
+ weight1 = self.fc1.weight
25
+
26
+ if isinstance(self.fc2.weight, ShardedTensor):
27
+ weight2 = self.fc2.weight.local_tensor()
28
+ else:
29
+ weight2 = self.fc2.weight
30
+
31
+ return (weight1, weight2)
32
+
33
+ def get_biases(self):
34
+ return (self.fc1.bias, self.fc2.bias)
35
+
36
+ def get_weight_grads(self):
37
+ return (self.fc1.weight.grad, self.fc2.weight.grad)
38
+
39
+ def get_bias_grads(self):
40
+ return (self.fc1.bias.grad, self.fc2.bias.grad)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+
3
+ import itertools
4
+ import sys
5
+ from functools import wraps
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ Iterator,
10
+ Tuple,
11
+ Dict,
12
+ List,
13
+ Sequence,
14
+ TypeVar,
15
+ cast,
16
+ )
17
+
18
+ import torch
19
+ import torch.distributed as dist
20
+
21
+ from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec
22
+ from torch.testing._internal.common_distributed import (
23
+ MultiProcessTestCase,
24
+ MultiThreadedTestCase,
25
+ TEST_SKIPS,
26
+ skip_if_lt_x_gpu,
27
+ )
28
+
29
+ from torch.distributed._tensor import (
30
+ DeviceMesh,
31
+ Shard,
32
+ Replicate,
33
+ distribute_tensor,
34
+ )
35
+ from torch.distributed._tensor.placement_types import Placement
36
+
37
+ DEVICE_TYPE = "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu"
38
+ PG_BACKEND = "nccl" if DEVICE_TYPE == "cuda" else "gloo"
39
+
40
+ NUM_DEVICES = 4
41
+
42
+ # We use this as a proxy for "multiple GPUs exist"
43
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
44
+ # when we actually have multiple GPUs, relax the requirement to smaller counts.
45
+ NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count())
46
+
47
+ T = TypeVar("T")
48
+
49
+
50
+ class MLPModule(torch.nn.Module):
51
+ def __init__(self, device):
52
+ super().__init__()
53
+ torch.manual_seed(5)
54
+ self.net1 = torch.nn.Linear(10, 16, device=device)
55
+ self.relu = torch.nn.ReLU()
56
+ self.net2 = torch.nn.Linear(16, 10, device=device)
57
+
58
+ def forward(self, x):
59
+ return self.net2(self.relu(self.net1(x)))
60
+
61
+ def reset_parameters(self):
62
+ self.net1.reset_parameters()
63
+ self.net2.reset_parameters()
64
+
65
+
66
+ def skip_unless_torch_gpu(method: T) -> T:
67
+ """
68
+ Test decorator which skips the test unless there's a GPU available to torch.
69
+
70
+ >>> # xdoctest: +SKIP
71
+ >>> @skip_unless_torch_gpu
72
+ >>> def test_some_method(self) -> None:
73
+ >>> ...
74
+ """
75
+ # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set.
76
+ return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method))
77
+
78
+
79
+ class DTensorTestBase(MultiProcessTestCase):
80
+ @property
81
+ def world_size(self) -> int:
82
+ return NUM_DEVICES
83
+
84
+ @property
85
+ def backend(self) -> str:
86
+ return PG_BACKEND
87
+
88
+ def build_device_mesh(self) -> DeviceMesh:
89
+ return DeviceMesh(DEVICE_TYPE, list(range(NUM_DEVICES)))
90
+
91
+ def init_pg(self) -> None:
92
+ if "nccl" in self.backend and torch.cuda.device_count() < self.world_size:
93
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
94
+
95
+ if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]:
96
+ raise RuntimeError(f"Backend {self.backend} not supported!")
97
+
98
+ dist.init_process_group(
99
+ backend=self.backend,
100
+ world_size=self.world_size,
101
+ rank=self.rank, # pyre-ignore[16]
102
+ init_method=f"file://{self.file_name}", # pyre-ignore[16]
103
+ )
104
+
105
+ # set device for nccl pg for collectives
106
+ if "nccl" in self.backend:
107
+ torch.cuda.set_device(self.rank)
108
+
109
+ def destroy_pg(self) -> None:
110
+ # Wait for all ranks to reach here before starting shutdown.
111
+ # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895
112
+ # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu"))
113
+ # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs:
114
+ # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion
115
+ dist.barrier()
116
+ dist.destroy_process_group()
117
+
118
+ def setUp(self) -> None:
119
+ super().setUp()
120
+ self._spawn_processes()
121
+
122
+ # pyre-ignore[2]:
123
+ def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None:
124
+ out = op_call(*args, **kwargs)
125
+ dtc = DTensorConverter(mesh, args, kwargs)
126
+ for d_args, d_kwargs in dtc:
127
+ # pyre can't find assertTrue anymore?
128
+ self.assertEqual(dtc.successful(), True)
129
+ d_out = op_call(*d_args, **d_kwargs)
130
+ self.assertEqual(d_out.full_tensor(), out)
131
+
132
+ def run_subtests(self, *args, **kwargs):
133
+ return run_subtests(self, *args, **kwargs)
134
+
135
+
136
+ TestFunc = Callable[[object], object]
137
+
138
+ # wrapper to initialize comms (processgroup)
139
+ def with_comms(func: TestFunc) -> TestFunc:
140
+ assert func is not None
141
+
142
+ @wraps(func) # pyre-ignore[6]
143
+ def wrapper(
144
+ self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc]
145
+ ) -> None:
146
+ # if backend not specified, and cuda available, then use nccl, else gloo
147
+ if torch.cuda.is_available() and torch.cuda.device_count() >= self.world_size:
148
+ self.device_type = "cuda"
149
+ else:
150
+ self.device_type = "cpu"
151
+
152
+ self.init_pg()
153
+ func(self, *args, **kwargs) # type: ignore[misc]
154
+ self.destroy_pg()
155
+
156
+ return wrapper
157
+
158
+
159
+ def run_subtests(
160
+ cls_inst,
161
+ subtest_config: Dict[str, List[Any]],
162
+ test_fn: Callable,
163
+ *test_args,
164
+ **test_kwargs: Any,
165
+ ):
166
+ """
167
+ Runs a test function given by ``test_fn`` as a subtest according to the
168
+ configurations specified by ``subtest_config``. This amortizes the
169
+ costly setup overhead (including process spawn and initializing the
170
+ process group) over the subtests.
171
+
172
+ Args:
173
+ subtest_config (Dict[str, List[Any]]): A mapping from subtest
174
+ keyword argument name to a list of its possible values.
175
+ test_fn (Callable): A callable that runs the actual test.
176
+ test_args: Positional arguments to pass to ``test_fn``.
177
+ test_kwargs: Keyword arguments to pass to ``test_fn``.
178
+ """
179
+ # Convert the config mapping to a list to have a fixed order
180
+ subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items())
181
+ subtest_config_keys: List[str] = [item[0] for item in subtest_config_items]
182
+ subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items]
183
+ for values in itertools.product(*subtest_config_values):
184
+ # Map keyword to chosen value
185
+ subtest_kwargs = dict(zip(subtest_config_keys, values))
186
+ with cls_inst.subTest(**subtest_kwargs):
187
+ test_fn(*test_args, **test_kwargs, **subtest_kwargs)
188
+ dist.barrier()
189
+
190
+
191
+ class DTensorOpTestBase(MultiThreadedTestCase):
192
+ @property
193
+ def world_size(self) -> int:
194
+ return NUM_DEVICES
195
+
196
+ @property
197
+ def device_type(self) -> str:
198
+ return DEVICE_TYPE
199
+
200
+ def build_device_mesh(self):
201
+ return DeviceMesh(self.device_type, list(range(self.world_size)))
202
+
203
+ def setUp(self) -> None:
204
+ super().setUp()
205
+ self._spawn_threads()
206
+
207
+
208
+ # This is a class for converting args/kwargs of an op into distributed args/kwargs
209
+ class DTensorConverter:
210
+ def __init__(
211
+ self,
212
+ mesh: DeviceMesh,
213
+ args: Tuple[object, ...],
214
+ kwargs: Dict[str, object],
215
+ ) -> None:
216
+ self.hit = 0
217
+ self.miss = 0
218
+ self.mesh = mesh
219
+ self.args = args
220
+ self.kwargs = kwargs
221
+ flatten_args, flatten_args_spec = tree_flatten(args)
222
+ flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs)
223
+
224
+ self.flatten_args: List[object] = flatten_args
225
+ self.flatten_args_spec: TreeSpec = flatten_args_spec
226
+ self.flatten_kwargs: List[object] = flatten_kwargs
227
+ self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec
228
+
229
+ choices_for_args = []
230
+ for arg in self.flatten_args:
231
+ if isinstance(arg, torch.Tensor):
232
+ choices_for_args.append(self.gen_sharding_choices_for_arg(arg))
233
+
234
+ for arg in self.flatten_kwargs:
235
+ if isinstance(arg, torch.Tensor):
236
+ choices_for_args.append(self.gen_sharding_choices_for_arg(arg))
237
+
238
+ self.sharding_combs: Iterator[Sequence[Placement]] = iter(
239
+ itertools.product(*choices_for_args)
240
+ )
241
+
242
+ def successful(self) -> bool:
243
+ return self.hit > 0 and self.miss == 0
244
+
245
+ def is_supported_tensor(self, t: torch.Tensor) -> bool:
246
+ # TODO: dist tensor need to support quantized and sparse
247
+ # tensors, quantized tensor might be relatively easy, but
248
+ # sparse tensor have special layouts that we need to possibly
249
+ # deal with, until we are clear about them, we don't officially
250
+ # support them.
251
+ return not any(
252
+ [
253
+ t.is_sparse_csr,
254
+ t.is_sparse,
255
+ t.is_mkldnn,
256
+ t.is_quantized,
257
+ t.is_nested,
258
+ torch._is_functional_tensor(t),
259
+ t.is_neg(),
260
+ t.is_conj(),
261
+ t.device.type in ("lazy", "meta"),
262
+ # We need a way to test if a tensor is batched but there
263
+ # is no official APi to do it
264
+ # torch._C._is_batched(t),
265
+ ]
266
+ )
267
+
268
+ def gen_sharding_choices_for_arg(
269
+ self, arg: torch.Tensor
270
+ ) -> Sequence[Placement]:
271
+ mesh_size = self.mesh.size()
272
+ sharding_choices: List[Placement] = [Replicate()]
273
+ # c10d collective does not support bool tensor
274
+ # for bool tensor we treat it as replicated
275
+ if arg.dtype != torch.bool:
276
+ # only generating choices with: replicate, or sharding
277
+ # evenly on a dimension that could be sharded
278
+ sharding_choices = sharding_choices + [
279
+ Shard(i)
280
+ for i, s in enumerate(arg.shape)
281
+ if s > 1 and s % mesh_size == 0
282
+ ]
283
+ # TODO: add multi mesh choices
284
+ # all_choices = itertools.product(
285
+ # *(self.mesh.ndim * [sharding_choices])
286
+ # )
287
+ return sharding_choices
288
+
289
+ def __iter__(self) -> "DTensorConverter":
290
+ return self
291
+
292
+ def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]:
293
+ try:
294
+ next_sharding_choices = next(self.sharding_combs)
295
+ idx = 0
296
+
297
+ new_args: List[object] = []
298
+ for arg in self.flatten_args:
299
+ if isinstance(arg, torch.Tensor):
300
+ new_args.append(
301
+ self.to_dist_tensor(
302
+ arg, self.mesh, [next_sharding_choices[idx]]
303
+ )
304
+ )
305
+ idx += 1
306
+ else:
307
+ new_args.append(arg)
308
+
309
+ new_kwargs: List[object] = []
310
+ for arg in self.flatten_kwargs:
311
+ if isinstance(arg, torch.Tensor):
312
+ new_kwargs.append(
313
+ self.to_dist_tensor(
314
+ arg, self.mesh, [next_sharding_choices[idx]]
315
+ )
316
+ )
317
+ idx += 1
318
+ else:
319
+ new_kwargs.append(arg)
320
+
321
+ return (
322
+ tree_unflatten(new_args, self.flatten_args_spec),
323
+ tree_unflatten(new_kwargs, self.flatten_kwargs_spec),
324
+ )
325
+ except StopIteration as e:
326
+ raise StopIteration from e
327
+
328
+ def to_dist_tensor(
329
+ self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement]
330
+ ) -> torch.Tensor:
331
+ if type(t) is torch.Tensor or type(t) is torch.nn.Parameter:
332
+ if self.is_supported_tensor(t):
333
+ self.hit += 1
334
+ if t.ndim == 0:
335
+ # scalar tensor by default will be replicated
336
+ r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim)
337
+ else:
338
+ # distribute non-scalar tensors
339
+ r = distribute_tensor(t, mesh, placements)
340
+ if type(t) is torch.nn.Parameter:
341
+ r = torch.nn.Parameter( # type: ignore[assignment]
342
+ r, requires_grad=r.requires_grad
343
+ )
344
+ return r
345
+ else:
346
+ self.miss += 1
347
+ return t
348
+ elif torch.overrides.is_tensor_like(t):
349
+ # Blindly converting tensor subclasses to dist tensor can cause
350
+ # unpredictable problems, we explicitly disable this conversion
351
+ # for now (i.e. we don't support DTensor holding tensor subclass
352
+ # until there's a strong reason later).
353
+ self.miss += 1
354
+ return t
355
+ else:
356
+ raise RuntimeError(
357
+ f"Trying to convert to DTensor, but got {type(t)}"
358
+ )
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ from functools import wraps
7
+ from typing import Any, Callable, Dict, Optional, Tuple
8
+
9
+ import torch.distributed as dist
10
+
11
+
12
+ def with_temp_dir(
13
+ func: Optional[Callable] = None,
14
+ ) -> Optional[Callable]:
15
+ """
16
+ Wrapper to initialize temp directory for distributed checkpoint.
17
+ """
18
+ assert func is not None
19
+
20
+ @wraps(func)
21
+ def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None:
22
+ # Only create temp_dir when rank is 0
23
+ if dist.get_rank() == 0:
24
+ temp_dir = tempfile.mkdtemp()
25
+ print(f"Using temp directory: {temp_dir}")
26
+ else:
27
+ temp_dir = ""
28
+ object_list = [temp_dir]
29
+
30
+ # Broadcast temp_dir to all the other ranks
31
+ os.sync()
32
+ dist.broadcast_object_list(object_list)
33
+ self.temp_dir = object_list[0]
34
+ os.sync()
35
+
36
+ try:
37
+ func(self, *args, **kwargs)
38
+ finally:
39
+ if dist.get_rank() == 0:
40
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
41
+
42
+ return wrapper
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Owner(s): ["oncall: distributed"]
2
+
3
+ import copy
4
+ from itertools import chain
5
+ from typing import Any, Dict
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from torch.distributed._sharded_tensor import ShardedTensor
11
+ from torch.distributed._tensor import DTensor
12
+ from torch.distributed.checkpoint._state_dict_utils import _gather_state_dict
13
+ from torch.distributed.checkpoint.state_dict import (
14
+ PG,
15
+ set_state_dict,
16
+ STATE,
17
+ StateDictOptions,
18
+ )
19
+
20
+
21
+ class VerifyStateDictMixin:
22
+ def _compare_tensor(self, orig_tensor, dist_tensor):
23
+ if isinstance(dist_tensor, (DTensor, ShardedTensor)):
24
+ dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey")
25
+ self.assertTrue(isinstance(dist_tensor, torch.Tensor))
26
+ self.assertTrue(torch.allclose(orig_tensor, dist_tensor))
27
+
28
+ def _verify_msd(
29
+ self,
30
+ msd: Dict[str, Any],
31
+ dist_msd: Dict[str, Any],
32
+ options: StateDictOptions = StateDictOptions(),
33
+ ) -> None:
34
+ if not options.ignore_frozen_params:
35
+ self.assertEqual(len(msd), len(dist_msd))
36
+ for fqn, param in msd.items():
37
+ dist_param = dist_msd.get(fqn, None)
38
+ if not options.ignore_frozen_params:
39
+ self.assertIsNotNone(dist_param)
40
+ self._compare_tensor(param, dist_param)
41
+ elif dist_param is None:
42
+ self.assertFalse(param.requires_grad)
43
+
44
+ def _verify_osd(
45
+ self,
46
+ model: nn.Module,
47
+ optim: torch.optim.Optimizer,
48
+ osd: Dict[str, Any],
49
+ dist_osd: Dict[str, Any],
50
+ ) -> None:
51
+ params = list(chain.from_iterable(g["params"] for g in optim.param_groups))
52
+ param_pid_mapping = dict(zip(params, range(len(params))))
53
+ fqn_pid_mapping = {}
54
+ for fqn, param in model.named_parameters():
55
+ pid = param_pid_mapping[param]
56
+ fqn_pid_mapping[fqn] = pid
57
+ fqn_pid_mapping[pid] = fqn
58
+ # Check optimizer_state_dict state
59
+
60
+ self.assertEqual(len(osd[STATE]), len(dist_osd[STATE]))
61
+ for pid, states in osd[STATE].items():
62
+ fqn = fqn_pid_mapping[pid]
63
+ dist_states = dist_osd[STATE].get(fqn, None)
64
+ self.assertIsNotNone(dist_states, fqn)
65
+ self.assertEqual(len(states), len(dist_states))
66
+ for key, state in states.items():
67
+ dist_state = states.get(key, None)
68
+ self.assertIsNotNone(dist_state)
69
+ self._compare_tensor(state, dist_state)
70
+
71
+ # Check optimizer_state_dict param_group
72
+ old_dist_osd_pg = dist_osd[PG]
73
+ if len(osd[PG]) != len(dist_osd[PG]):
74
+ self.assertTrue(len(dist_osd[PG]) > len(osd[PG]))
75
+ new_pg = copy.deepcopy(dist_osd[PG][0])
76
+ new_pg["params"] = []
77
+ for dist_group in dist_osd[PG]:
78
+ new_pg["params"].extend(dist_group["params"])
79
+ dist_osd[PG] = [new_pg]
80
+
81
+ self.assertEqual(len(osd[PG]), len(dist_osd[PG]))
82
+ for group, dist_group in zip(osd[PG], dist_osd[PG]):
83
+ self.assertEqual(len(group), len(dist_group))
84
+ for key, value in group.items():
85
+ # Below doesn't work because param_groups can have None
86
+ # values.
87
+ # dist_value = dist_group.get(key, None)
88
+ # self.assertIsNotNone(dist_value, (dist_group, group))
89
+ dist_value = dist_group[key]
90
+ if key == "params":
91
+ fqns = [fqn_pid_mapping[pid] for pid in value]
92
+ self.assertEqual(sorted(fqns), sorted(dist_value))
93
+ else:
94
+ self.assertEqual(value, dist_value)
95
+ dist_osd[PG] = old_dist_osd_pg
96
+
97
+ def _verify_osd_by_load(
98
+ self,
99
+ model: nn.Module,
100
+ optim: torch.optim.Optimizer,
101
+ new_optim: torch.optim.Optimizer,
102
+ dist_osd: Dict[str, Any],
103
+ ) -> None:
104
+ new_dist_osd = _gather_state_dict(dist_osd)
105
+ set_state_dict(
106
+ model,
107
+ optimizers=new_optim,
108
+ model_state_dict={},
109
+ optim_state_dict=new_dist_osd,
110
+ )
111
+ self.assertEqual(optim.state_dict(), new_optim.state_dict())
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import contextlib
4
+ import enum
5
+ import logging
6
+ import os
7
+ import threading
8
+ from typing import NamedTuple
9
+
10
+ import torch
11
+ import torch.distributed as dist
12
+ import torch.distributed.autograd as dist_autograd
13
+ import torch.nn as nn
14
+ from torch.distributed import rpc
15
+ from torch.distributed.nn import RemoteModule
16
+ from torch.nn.parallel import DistributedDataParallel
17
+ from torch.testing._internal.common_distributed import (
18
+ requires_gloo,
19
+ requires_nccl,
20
+ skip_if_lt_x_gpu,
21
+ skip_if_rocm,
22
+ )
23
+ from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
24
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
25
+ RpcAgentTestFixture,
26
+ )
27
+
28
+
29
+ NUM_EM_ROW = 2
30
+ D_SPARSE = 3
31
+ D_DENSE = 2
32
+ D_HID = 3
33
+ D_OUT = 1
34
+ NUM_TRAINERS = 4
35
+ # Trainers + the master + the remote worker
36
+ WORLD_SIZE = NUM_TRAINERS + 2
37
+ TRAINER_RANKS = list(range(NUM_TRAINERS))
38
+ REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1
39
+ MASTER_RANK = REMOTE_WORKER_RANK + 1
40
+
41
+
42
+ class DdpMode(enum.Enum):
43
+ # Don't apply DDP
44
+ NONE = enum.auto()
45
+ # Apply DDP to the top level nn.Module
46
+ OUTSIDE = enum.auto()
47
+ # Embed DDP inside the top level nn.Module
48
+ INSIDE = enum.auto()
49
+
50
+
51
+ def init_logger():
52
+ logger = logging.getLogger(__name__)
53
+ level = logging.DEBUG if "debug" in os.environ else logging.INFO
54
+ logger.setLevel(level)
55
+ console = logging.StreamHandler()
56
+ formatter = logging.Formatter(
57
+ "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
58
+ )
59
+ console.setFormatter(formatter)
60
+ console.setLevel(level)
61
+ # add the handlers to the logger
62
+ logger.addHandler(console)
63
+ logger.propagate = False
64
+ return logger
65
+
66
+
67
+ gLogger = init_logger()
68
+
69
+
70
+ class FeatureSet(NamedTuple):
71
+ """ A feature set has 2 types of features"""
72
+
73
+ dense_features: torch.Tensor
74
+ sparse_features: torch.LongTensor
75
+ values: torch.Tensor
76
+
77
+
78
+ def _call_method(method, rref, *args, **kwargs):
79
+ return method(rref.local_value(), *args, **kwargs)
80
+
81
+
82
+ def _remote_method(method, rref, *args, **kwargs):
83
+ args_tup = tuple([method, rref] + list(args))
84
+ return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
85
+
86
+
87
+ def _remote_method_async(method, rref, *args, **kwargs):
88
+ args_tup = tuple([method, rref] + list(args))
89
+ return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
90
+
91
+
92
+ class RemoteEM(nn.Module):
93
+ def __init__(self, num_embeddings: int, embedding_dim: int):
94
+ gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim)
95
+ super().__init__()
96
+ init_em = [0.5] * embedding_dim
97
+ self.em = nn.EmbeddingBag(
98
+ num_embeddings,
99
+ embedding_dim,
100
+ _weight=torch.tensor([init_em] * num_embeddings),
101
+ )
102
+
103
+ def forward(self, input: torch.Tensor):
104
+ gLogger.debug("Running RemoteEM.forward() on: %s", input)
105
+ return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
106
+
107
+
108
+ # Return a linear module with predefined parameters.
109
+ def getLinear(d_in, d_out):
110
+ l = nn.Linear(d_in, d_out, bias=False)
111
+ w = torch.ones((d_out, d_in))
112
+ w[0][0] = -1
113
+ w.requires_grad_()
114
+ l.weight.data = w
115
+ return l
116
+
117
+
118
+ class RemoteNet(nn.Module):
119
+ def __init__(self, d_in: int, d_out: int):
120
+ gLogger.info("Initing RemoteNet with %s %s", d_in, d_out)
121
+ super().__init__()
122
+ self.fc = getLinear(d_in, d_out)
123
+ self.relu = nn.ReLU()
124
+
125
+ def forward(self, input: torch.Tensor):
126
+ gLogger.debug("Running RemoteNet.forward() on: %s", input)
127
+ return self.relu(self.fc(input))
128
+
129
+
130
+ class HybridModel(nn.Module):
131
+ def __init__(
132
+ self,
133
+ remote_em_rref: rpc.RRef,
134
+ remote_net_rref: rpc.RRef,
135
+ process_group_for_ddp: dist.ProcessGroup = None,
136
+ ):
137
+ super().__init__()
138
+ self.remote_em_rref = remote_em_rref
139
+ self.remote_net_rref = remote_net_rref
140
+ self.fc1 = getLinear(D_DENSE, D_DENSE)
141
+ self.fc2 = getLinear(D_HID, D_OUT)
142
+
143
+ self.non_ddp_params = tuple(self.fc1.parameters()) + tuple(
144
+ self.fc2.parameters()
145
+ )
146
+ self.ddp_params = ()
147
+
148
+ if process_group_for_ddp is not None:
149
+ self.non_ddp_params, self.ddp_params = (
150
+ tuple(self.fc1.parameters()),
151
+ tuple(self.fc2.parameters()),
152
+ )
153
+ gLogger.info("Use DDP for the second local net.")
154
+ self.fc2 = DistributedDataParallel(
155
+ self.fc2, check_reduction=True, process_group=process_group_for_ddp
156
+ )
157
+
158
+ gLogger.info(
159
+ "HybridModel has %s groups of parameters.", len(list(self.parameters()))
160
+ )
161
+
162
+ def forward(self, input: FeatureSet):
163
+ gLogger.debug("Running HybridModel.forward on %s", input)
164
+ sparse = _remote_method(
165
+ RemoteEM.forward, self.remote_em_rref, input.sparse_features
166
+ )
167
+ # The same size of mini batch.
168
+ assert sparse.shape[0] == input.dense_features.shape[0]
169
+ dense = self.fc1(input.dense_features)
170
+ x = torch.cat((dense, sparse), 1)
171
+ gLogger.debug("Concatenated feature: %s", x)
172
+ x = _remote_method(RemoteNet.forward, self.remote_net_rref, x)
173
+ return self.fc2(x)
174
+
175
+
176
+ class Trainer:
177
+ def __init__(
178
+ self,
179
+ remote_em_rref: rpc.RRef,
180
+ remote_net_rref: rpc.RRef,
181
+ ddp_mode: DdpMode,
182
+ rank: int,
183
+ ):
184
+ self.rank = rank
185
+ self.trainer_group = (
186
+ dist.new_group(TRAINER_RANKS)
187
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE)
188
+ else None
189
+ )
190
+ self.remote_em_rref = remote_em_rref
191
+ self.remote_net_rref = remote_net_rref
192
+ self.hybrid_module = HybridModel(
193
+ self.remote_em_rref,
194
+ self.remote_net_rref,
195
+ self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None,
196
+ )
197
+ self.ddp_params, self.non_ddp_params = (
198
+ self.hybrid_module.ddp_params,
199
+ self.hybrid_module.non_ddp_params,
200
+ )
201
+ if ddp_mode == DdpMode.OUTSIDE:
202
+ gLogger.info("Wrapping the whole hybrid module into DDP.")
203
+ self.ddp_params += self.non_ddp_params
204
+ self.non_ddp_params = ()
205
+ self.hybrid_module = DistributedDataParallel(
206
+ self.hybrid_module,
207
+ check_reduction=True,
208
+ process_group=self.trainer_group,
209
+ )
210
+ gLogger.info(
211
+ "Succeeded in creating a HybridModel instance with "
212
+ "%s ddp params and %s other local params.",
213
+ len(self.ddp_params), len(self.non_ddp_params)
214
+ )
215
+
216
+ def destroy_pg(self):
217
+ if self.trainer_group:
218
+ dist.destroy_process_group(self.trainer_group)
219
+
220
+ def train_batch(
221
+ self,
222
+ mini_batch: FeatureSet,
223
+ trainer_has_less_inputs: bool,
224
+ simulate_uneven_inputs: bool,
225
+ ):
226
+ grads_dict = None
227
+
228
+ if not simulate_uneven_inputs:
229
+ input_batches = [mini_batch]
230
+ else:
231
+ # Split into microbatches, and trim to simulate uneven inputs.
232
+ dense_features = mini_batch.dense_features
233
+ sparse_features = mini_batch.sparse_features
234
+ values = mini_batch.values
235
+
236
+ dense_microbatch = torch.split(dense_features, 2)
237
+ sparse_microbatch = torch.split(sparse_features, 2)
238
+ values_microbatch = torch.split(values, 2)
239
+ batches = []
240
+ for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch):
241
+ feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v)
242
+ batches.append(feature_set)
243
+
244
+ if trainer_has_less_inputs:
245
+ input_batches = batches[: len(batches) // 2]
246
+ gLogger.info(
247
+ "Trainer reduced input patches from %s "
248
+ "to %s to simulate uneven inputs.",
249
+ len(batches), len(input_batches)
250
+ )
251
+ else:
252
+ input_batches = batches
253
+
254
+ with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext():
255
+ for b in input_batches:
256
+ with dist_autograd.context() as context_id:
257
+ output = self.hybrid_module.forward(b)
258
+ loss = (output * mini_batch.values).sum()
259
+ dist_autograd.backward(context_id, [loss])
260
+ grads_dict = dist_autograd.get_gradients(context_id)
261
+ gLogger.info(
262
+ "Loss is %s for mini batch: %s. "
263
+ "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict
264
+ )
265
+ return (
266
+ tuple(grads_dict[param] for param in self.ddp_params),
267
+ tuple(grads_dict[param] for param in self.non_ddp_params),
268
+ )
269
+
270
+
271
+ def get_training_examples():
272
+ n = 16
273
+ training_examples = FeatureSet(
274
+ dense_features=torch.zeros((n, D_DENSE)),
275
+ sparse_features=torch.zeros(n, dtype=torch.long),
276
+ values=torch.zeros(n),
277
+ )
278
+ idx = 0
279
+ # Every example has another one that has exactly the same features but an
280
+ # opposite value. Therefore, their grads cancel each other in all-reduce.
281
+ for value in (-1, 1):
282
+ for x in (-1.0 * value, 1.0 * value):
283
+ for y in (1.0 * value, -1.0 * value):
284
+ for z in (0, 1):
285
+ training_examples.dense_features[idx, :] = torch.tensor((x, y))
286
+ training_examples.sparse_features[idx] = z
287
+ training_examples.values[idx] = value
288
+ idx += 1
289
+
290
+ # Split the examples among NUM_TRAINERS trainers
291
+ assert 0 == (n % NUM_TRAINERS)
292
+ examples_per_trainer = int(n / NUM_TRAINERS)
293
+ return [
294
+ FeatureSet(
295
+ dense_features=training_examples.dense_features[
296
+ start : start + examples_per_trainer, :
297
+ ],
298
+ sparse_features=training_examples.sparse_features[
299
+ start : start + examples_per_trainer
300
+ ],
301
+ values=training_examples.values[start : start + examples_per_trainer],
302
+ )
303
+ for start in range(0, n, examples_per_trainer)
304
+ ]
305
+
306
+
307
+ shutdown_signal = threading.Condition()
308
+
309
+
310
+ def set_shutdown_signal():
311
+ global shutdown_signal
312
+ with shutdown_signal:
313
+ shutdown_signal.notify()
314
+
315
+
316
+ class DdpUnderDistAutogradTest(RpcAgentTestFixture):
317
+ @property
318
+ def world_size(self) -> int:
319
+ return WORLD_SIZE
320
+
321
+ def remote_worker_name(self) -> str:
322
+ # The name has to be consistent with that in 'dist_init' decorator.
323
+ return f"worker{REMOTE_WORKER_RANK}"
324
+
325
+ def trainer_name(self, rank):
326
+ # The name has to be consistent with that in 'dist_init' decorator.
327
+ return f"worker{rank}"
328
+
329
+ def _remote_worker_process(self, ddp_mode):
330
+ gLogger.info("The remote worker is running.")
331
+ dist.init_process_group(
332
+ backend="gloo",
333
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
334
+ world_size=self.world_size,
335
+ rank=self.rank,
336
+ )
337
+
338
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
339
+ # new_group needs to be called on ranks.
340
+ dist.new_group(TRAINER_RANKS)
341
+
342
+ global shutdown_signal
343
+ with shutdown_signal:
344
+ shutdown_signal.wait()
345
+ gLogger.info("Exiting remote worker.")
346
+ dist.destroy_process_group()
347
+
348
+ def _trainer_process(self, rank: int):
349
+ gLogger.info("Running the trainer #%s...", rank)
350
+ gLogger.info(
351
+ "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS
352
+ )
353
+ dist.init_process_group(
354
+ backend="gloo",
355
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
356
+ world_size=self.world_size,
357
+ rank=self.rank,
358
+ )
359
+
360
+ gLogger.info("Waiting for shutdown signal on trainer #%s...", rank)
361
+
362
+ global shutdown_signal
363
+ with shutdown_signal:
364
+ shutdown_signal.wait()
365
+ gLogger.info("Exiting the trainer #%s...", rank)
366
+ dist.destroy_process_group()
367
+
368
+ def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool):
369
+ gLogger.info("Running the master process...")
370
+ dist.init_process_group(
371
+ backend="gloo",
372
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
373
+ world_size=self.world_size,
374
+ rank=self.rank,
375
+ )
376
+
377
+ remote_em_rref = rpc.remote(
378
+ self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE)
379
+ )
380
+ remote_net_rref = rpc.remote(
381
+ self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID)
382
+ )
383
+ gLogger.info("Created remote rrefs on master")
384
+ self.do_test_on_master(
385
+ ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref
386
+ )
387
+
388
+ def do_test_on_master(
389
+ self,
390
+ ddp_mode: DdpMode,
391
+ simulate_uneven_inputs: bool,
392
+ remote_em_rref: rpc.RRef,
393
+ remote_net_rref: rpc.RRef,
394
+ ):
395
+ if simulate_uneven_inputs:
396
+ gLogger.info(
397
+ "Running DDP + RPC test with simulating uneven inputs across trainers."
398
+ )
399
+
400
+ trainer_rrefs = []
401
+ for rank in TRAINER_RANKS:
402
+ trainer = self.trainer_name(rank)
403
+ trainer_rrefs.append(
404
+ rpc.remote(
405
+ trainer,
406
+ Trainer,
407
+ args=(remote_em_rref, remote_net_rref, ddp_mode, rank),
408
+ )
409
+ )
410
+
411
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
412
+ # new_group needs to be called on ranks.
413
+ dist.new_group(TRAINER_RANKS)
414
+
415
+ training_examples = get_training_examples()
416
+ for _ in range(3):
417
+ futures = []
418
+ num_trainers = len(trainer_rrefs)
419
+ for idx, trainer_rref in enumerate(trainer_rrefs):
420
+ # Half the trainers will deplete inputs earlier than the rest.
421
+ trainer_has_less_inputs = (
422
+ simulate_uneven_inputs and idx < num_trainers // 2
423
+ )
424
+ futures.append(
425
+ _remote_method_async(
426
+ Trainer.train_batch,
427
+ trainer_rref,
428
+ training_examples[idx],
429
+ trainer_has_less_inputs,
430
+ simulate_uneven_inputs,
431
+ )
432
+ )
433
+
434
+ for future in futures:
435
+ ddp_grads, non_ddp_grads = future.wait()
436
+ # When there are uneven inputs, it is not necessary that grads
437
+ # cancel each other out, since some trainers contribute 0 grad.
438
+ if not simulate_uneven_inputs:
439
+ for grad in ddp_grads:
440
+ self.assertEqual(
441
+ grad,
442
+ torch.zeros_like(grad),
443
+ msg=f"The grad for any ddp parameter should be zeros, because "
444
+ "the training examples' grads cancel each other. Received "
445
+ f"gradient {grad}",
446
+ )
447
+ for grad in non_ddp_grads:
448
+ self.assertNotEqual(
449
+ grad,
450
+ torch.zeros_like(grad),
451
+ msg="The grad for any non-ddp parameter shouldn't be zeros",
452
+ )
453
+
454
+ # Destroy process groups
455
+ for idx, trainer_rref in enumerate(trainer_rrefs):
456
+ _remote_method_async(Trainer.destroy_pg, trainer_rref).wait()
457
+
458
+ # Send shutdown signals.
459
+ for rank in TRAINER_RANKS:
460
+ trainer = self.trainer_name(rank)
461
+ rpc.rpc_sync(trainer, set_shutdown_signal, args=())
462
+
463
+ rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=())
464
+
465
+ def _do_test(self, ddp_mode, simulate_uneven_inputs=False):
466
+ if self.rank == MASTER_RANK:
467
+ self._master_process(ddp_mode, simulate_uneven_inputs)
468
+ elif self.rank == REMOTE_WORKER_RANK:
469
+ self._remote_worker_process(ddp_mode)
470
+ elif self.rank in TRAINER_RANKS:
471
+ self._trainer_process(self.rank)
472
+ else:
473
+ raise RuntimeError(f"Unknown process rank: {self.rank}")
474
+
475
+ @requires_gloo()
476
+ @dist_init
477
+ def test_backward_no_ddp(self):
478
+ self._do_test(DdpMode.NONE)
479
+
480
+ @requires_gloo()
481
+ @dist_init
482
+ def test_backward_ddp_outside(self):
483
+ self._do_test(DdpMode.OUTSIDE)
484
+
485
+ @requires_gloo()
486
+ @dist_init
487
+ def test_backward_ddp_outside_uneven_inputs(self):
488
+ self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True)
489
+
490
+ @requires_gloo()
491
+ @dist_init
492
+ def test_backward_ddp_inside(self):
493
+ self._do_test(DdpMode.INSIDE)
494
+
495
+
496
+ # Common utils for both CPU and CUDA test suites
497
+ class CommonDdpComparisonTest(RpcAgentTestFixture):
498
+ @property
499
+ def world_size(self) -> int:
500
+ return NUM_TRAINERS
501
+
502
+ def trainer_name(self, rank):
503
+ # The name has to be consistent with that in 'dist_init' decorator.
504
+ return f"worker{rank}"
505
+
506
+ @staticmethod
507
+ def get_remote_grads(rref, context_id):
508
+ return dist_autograd.get_gradients(context_id)[rref.local_value().weight]
509
+
510
+
511
+ class DdpComparisonTest(CommonDdpComparisonTest):
512
+ def _run_test_ddp_comparision(self, simulate_uneven_inputs=False):
513
+ gLogger.info("Running trainer rank: %s", self.rank)
514
+ # Each trainer uses a different random seed. Otherwise, they are going
515
+ # to have exactly the same initial model parameters, input, and
516
+ # therefore grads. That means the grads will be the same before and
517
+ # after DDP's all-reduce.
518
+ torch.manual_seed(self.rank)
519
+ dist.init_process_group(
520
+ backend="gloo",
521
+ # Postfix file_name with "pg" since file_name is also used by RPC agent
522
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"),
523
+ world_size=self.world_size,
524
+ rank=self.rank,
525
+ )
526
+ net = nn.Linear(2, 3)
527
+ ddp_net = DistributedDataParallel(net)
528
+
529
+ # Odd ranks join early if simulate_uneven_inputs.
530
+ num_inputs = 1
531
+ if simulate_uneven_inputs:
532
+ if self.rank % 2 == 0:
533
+ num_inputs += 2
534
+ inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)]
535
+
536
+ if simulate_uneven_inputs:
537
+ gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list))
538
+
539
+ # Use distributed autograd. The gradients will be in RPC context map.
540
+ grads_dict = {}
541
+ with ddp_net.join(simulate_uneven_inputs):
542
+ for i, inputs in enumerate(inputs_list):
543
+ with dist_autograd.context() as context_id:
544
+ loss = ddp_net(inputs).norm()
545
+ dist_autograd.backward(context_id, [loss])
546
+ grads_dict = dist_autograd.get_gradients(context_id)
547
+ gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict)
548
+
549
+ # Use local autograd. The gradients will be in each variable's '.grad'.
550
+ ddp_net.zero_grad()
551
+ loss = ddp_net(inputs).norm()
552
+ loss.backward()
553
+
554
+ # The gradients should be the same
555
+ for param in net.parameters():
556
+ self.assertTrue(
557
+ param in grads_dict,
558
+ msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}",
559
+ )
560
+ self.assertEqual(
561
+ grads_dict[param],
562
+ param.grad,
563
+ msg=f"The grads for param {param} are different under local "
564
+ f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}",
565
+ )
566
+ dist.destroy_process_group()
567
+
568
+ @requires_gloo()
569
+ @dist_init
570
+ def test_ddp_comparison(self):
571
+ self._run_test_ddp_comparision()
572
+
573
+ @requires_gloo()
574
+ @dist_init
575
+ def test_ddp_comparison_uneven_inputs(self):
576
+ # test with simulating uneven inputs in DDP
577
+ self._run_test_ddp_comparision(simulate_uneven_inputs=True)
578
+
579
+ @requires_gloo()
580
+ @dist_init
581
+ def test_ddp_dist_autograd_sparse_grads(self):
582
+ # Each trainer uses a different random seed. Otherwise, they are going
583
+ # to have exactly the same initial model parameters, input, and
584
+ # therefore grads. That means the grads will be the same before and
585
+ # after DDP's all-reduce.
586
+ torch.manual_seed(self.rank)
587
+ dist.init_process_group(
588
+ backend="gloo",
589
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
590
+ world_size=self.world_size,
591
+ rank=self.rank,
592
+ )
593
+
594
+ model = nn.EmbeddingBag(10, 3, sparse=True)
595
+ ddp_model = DistributedDataParallel(model)
596
+
597
+ # Different inputs for each
598
+ input = torch.LongTensor(10).random_(0, 10)
599
+ offsets = torch.LongTensor([0, 4])
600
+
601
+ # Run local.
602
+ loss = ddp_model(input, offsets).sum()
603
+ loss.backward()
604
+
605
+ with dist_autograd.context() as context_id:
606
+ loss = ddp_model(input, offsets).sum()
607
+ dist_autograd.backward(context_id, [loss])
608
+ grads_dict = dist_autograd.get_gradients(context_id)
609
+ self.assertEqual(1, len(grads_dict))
610
+ self.assertEqual(model.weight.grad, grads_dict[model.weight])
611
+
612
+ @requires_gloo()
613
+ @dist_init
614
+ def test_ddp_dist_autograd_local_vs_remote(self):
615
+ # Each trainer uses a different random seed. Otherwise, they are going
616
+ # to have exactly the same initial model parameters, input, and
617
+ # therefore grads. That means the grads will be the same before and
618
+ # after DDP's all-reduce.
619
+ torch.manual_seed(self.rank)
620
+ dist.init_process_group(
621
+ backend="gloo",
622
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
623
+ world_size=self.world_size,
624
+ rank=self.rank,
625
+ )
626
+
627
+ # Use two different remote device input string, w/ and w/o the default
628
+ # device string "cpu", respectively.
629
+ for remote_device in ["worker0/cpu", "worker0"]:
630
+ remote_layer1 = RemoteModule(
631
+ remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False)
632
+ )
633
+ layer1 = nn.Linear(10, 5, False)
634
+ # Start with the same parameters for remote and local
635
+ layer1.weight = remote_layer1.module_rref.to_here().weight
636
+
637
+ # Run local case.
638
+ layer2 = nn.Linear(5, 1)
639
+ inputs = torch.rand((10, 10))
640
+ ddp_model = DistributedDataParallel(layer2)
641
+ loss = ddp_model(layer1(inputs)).sum()
642
+ loss.backward()
643
+
644
+ # Run remote case.
645
+ with dist_autograd.context() as context_id:
646
+ loss = ddp_model(remote_layer1(inputs)).sum()
647
+ dist_autograd.backward(context_id, [loss])
648
+ grads_dict = dist_autograd.get_gradients(context_id)
649
+ dist.barrier()
650
+ self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
651
+ self.assertEqual(
652
+ layer1.weight.grad,
653
+ rpc.rpc_sync(
654
+ "worker0",
655
+ CommonDdpComparisonTest.get_remote_grads,
656
+ args=(remote_layer1.module_rref, context_id),
657
+ ),
658
+ )
659
+
660
+
661
+ class CudaDdpComparisonTest(CommonDdpComparisonTest):
662
+ @skip_if_lt_x_gpu(NUM_TRAINERS)
663
+ @requires_nccl()
664
+ @dist_init
665
+ @skip_if_rocm
666
+ def test_ddp_dist_autograd_local_vs_remote_gpu(self):
667
+ # Each trainer uses a different random seed. Otherwise, they are going
668
+ # to have exactly the same initial model parameters, input, and
669
+ # therefore grads. That means the grads will be the same before and
670
+ # after DDP's all-reduce.
671
+ torch.manual_seed(self.rank)
672
+ dist.init_process_group(
673
+ backend="gloo",
674
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
675
+ world_size=self.world_size,
676
+ rank=self.rank,
677
+ )
678
+
679
+ remote_layer1 = RemoteModule(
680
+ remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False)
681
+ )
682
+ layer1 = nn.Linear(10, 7, False)
683
+ # Start with the same parameters for remote and local
684
+ layer1.weight = remote_layer1.module_rref.to_here().weight
685
+
686
+ layer2 = nn.Linear(7, 5).cuda(self.rank)
687
+ ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank])
688
+
689
+ remote_layer3 = RemoteModule(
690
+ remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False)
691
+ )
692
+ layer3 = nn.Linear(5, 3, False)
693
+ # Start with the same parameters for remote and local
694
+ layer3.weight = remote_layer3.module_rref.to_here().weight
695
+
696
+ layer4 = nn.Linear(3, 1).cuda(self.rank)
697
+ ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
698
+
699
+ # Run local case.
700
+ inputs = torch.rand((10, 10))
701
+ loss = ddp_layer4(
702
+ layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank)
703
+ ).sum()
704
+ loss.backward()
705
+
706
+ # Run remote case.
707
+ with dist_autograd.context() as context_id:
708
+ loss = ddp_layer4(
709
+ remote_layer3(
710
+ ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu()
711
+ ).cuda(self.rank)
712
+ ).sum()
713
+ dist_autograd.backward(context_id, [loss])
714
+ grads_dict = dist_autograd.get_gradients(context_id)
715
+ dist.barrier()
716
+ self.assertEqual(
717
+ layer1.weight.grad,
718
+ rpc.rpc_sync(
719
+ "worker0",
720
+ CommonDdpComparisonTest.get_remote_grads,
721
+ args=(remote_layer1.module_rref, context_id),
722
+ ),
723
+ )
724
+ self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
725
+ self.assertEqual(
726
+ layer3.weight.grad,
727
+ rpc.rpc_sync(
728
+ "worker0",
729
+ CommonDdpComparisonTest.get_remote_grads,
730
+ args=(remote_layer3.module_rref, context_id),
731
+ ),
732
+ )
733
+ self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight])
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from datetime import timedelta
3
+ from functools import (
4
+ partial,
5
+ wraps,
6
+ )
7
+
8
+ import torch.distributed as dist
9
+ import torch.distributed.distributed_c10d as c10d
10
+
11
+ class MockProcessGroup(dist.ProcessGroup):
12
+
13
+ def __init__(self, rank, world):
14
+ super().__init__(rank, world)
15
+
16
+ def getBackendName(self):
17
+ return "mock_process_group"
18
+
19
+ def create_mock_pg(prefix_store, rank, world_size, timeout):
20
+ return MockProcessGroup(rank, world_size)
21
+
22
+ dist.Backend.register_backend('mock_process_group', create_mock_pg)
23
+
24
+ def mock_init_dist(rank, world_size):
25
+ # !!! WARNING !!!
26
+ # Kids don't try this at home, this is a cute pile of hacks that
27
+ # depends on a small mountain of c10d internals
28
+ assert not dist.is_initialized()
29
+ store = dist.HashStore()
30
+ # Trick _store_based_barrier into believing everyone else already checked-in
31
+ # Zero is the group index
32
+ store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1)
33
+ dist.init_process_group(
34
+ backend="mock_process_group",
35
+ rank=rank,
36
+ world_size=world_size,
37
+ store=store,
38
+ group_name="fake",
39
+ timeout=timedelta(seconds=1))
40
+
41
+ @contextmanager
42
+ def with_dist(rank=0, world_size=2):
43
+ """
44
+ Context manager that initializer c10d with a fake process group.
45
+ """
46
+ mock_init_dist(rank=rank, world_size=world_size)
47
+ try:
48
+ yield
49
+ finally:
50
+ dist.destroy_process_group()
51
+
52
+ def with_fake_comms(func=None, rank=0, world_size=2):
53
+ """
54
+ Function wrapper that inits a fake process group designed for testing.
55
+ Right now only querying for world size is available
56
+ """
57
+ if func is None:
58
+ return partial(with_fake_comms, rank=rank, world_size=world_size)
59
+
60
+ @wraps(func)
61
+ def wrapper(self, *args, **kwargs):
62
+ with with_dist(rank, world_size):
63
+ func(self, *args, **kwargs)
64
+ return wrapper
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.distributed as dist
2
+
3
+ from torch._C._distributed_c10d import (
4
+ _create_work_from_future,
5
+ AllgatherOptions,
6
+ AllreduceOptions,
7
+ BarrierOptions,
8
+ ReduceScatterOptions,
9
+ BroadcastOptions,
10
+ ScatterOptions,
11
+ AllToAllOptions
12
+ )
13
+ from torch.futures import Future
14
+
15
+ from typing import List
16
+ from torch import Tensor
17
+
18
+
19
+ def ret_work(ret):
20
+ fut = Future()
21
+ fut.set_result(ret)
22
+ return _create_work_from_future(fut)
23
+
24
+
25
+ class FakeProcessGroup(dist.ProcessGroup):
26
+ """
27
+ A fake process group (not related to FakeTensor) is a process group which
28
+ doesn't actually do any communication, it just hallucinates some
29
+ communication. You can run a single rank with a fake process group
30
+ without needing multiple processes (simulates per-rank behavior)
31
+
32
+ NOTE: This is not a real process group, and it would produce wrong results
33
+ for every collective. It should be used as a convinient tool when playing
34
+ with distributed but don't care about the actual data.
35
+ """
36
+ def __init__(self, rank, world_size):
37
+ super().__init__(rank, world_size)
38
+ self._rank = rank
39
+ self._world_size = world_size
40
+
41
+ def allreduce(self, tensor_list, opts=AllreduceOptions()):
42
+ return ret_work(tensor_list)
43
+
44
+ def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()):
45
+ return ret_work(tensor_list)
46
+
47
+ def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()):
48
+ # NOTE: in general it's not good form to try to make FakePG work with 'real data',
49
+ # but the reasoning here is that we want FakePG to work with DeviceMesh's init
50
+ # code that have the data validation, which makes it worth the tradeoff.
51
+ # In general user should use MTPG or normal PG for cases where they may care about
52
+ # real data from collectives
53
+ for chunk in output_tensors[0]:
54
+ chunk.copy_(input_tensor[0])
55
+ return ret_work(output_tensors)
56
+
57
+ def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()):
58
+ return ret_work(output_tensor)
59
+
60
+ def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
61
+ # assume each rank have the same input tensor so we just copy to the results
62
+ # since it's not a real allgather, we simply make this copying logic to let
63
+ # some simple validation works (i.e. calling allgather to see if each rank have
64
+ # the same tensor or not)
65
+ # NOTE: in general it's not good form to try to make FakePG work with 'real data',
66
+ # but the reasoning here is that we want FakePG to work with DeviceMesh's init
67
+ # code that have the data validation, which makes it worth the tradeoff.
68
+ # In general user should use MTPG or normal PG for cases where they may care about
69
+ # real data from collectives
70
+ chunks = output_tensor.chunk(self._world_size)
71
+ for chunk in chunks:
72
+ chunk.copy_(input_tensor)
73
+ return ret_work(output_tensor)
74
+
75
+ def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()):
76
+ return ret_work(output_tensor)
77
+
78
+ def barrier(self, opts=BarrierOptions()):
79
+ # it should be no-op for fake pg
80
+ pass
81
+
82
+ def broadcast(self, tensors: List[Tensor], opts=BroadcastOptions()):
83
+ return ret_work(tensors)
84
+
85
+ def scatter(
86
+ self,
87
+ output_tensors: List[Tensor],
88
+ input_tensors: List[List[Tensor]],
89
+ opts=ScatterOptions(),
90
+ ):
91
+ return ret_work(output_tensors)
92
+
93
+ def alltoall(
94
+ self,
95
+ output_tensors: List[Tensor],
96
+ input_tensors: List[Tensor],
97
+ opts=AllToAllOptions(),
98
+ ):
99
+ return ret_work(output_tensors)
100
+
101
+ def alltoall_base(
102
+ self,
103
+ output_tensor: Tensor,
104
+ input_tensor: Tensor,
105
+ output_split_sizes: List[int],
106
+ input_split_sizes: List[int],
107
+ opts=AllToAllOptions(),
108
+ ):
109
+ return ret_work(output_tensor)
110
+
111
+ def send(
112
+ self,
113
+ tensors: List[Tensor],
114
+ dstRank: int,
115
+ tag: int,
116
+ ):
117
+ return ret_work(None)
118
+
119
+ def recv(
120
+ self,
121
+ tensors: List[Tensor],
122
+ srcRank: int,
123
+ tag: int,
124
+ ):
125
+ return ret_work(tensors)
126
+
127
+ def getBackendName(self):
128
+ return "fake"
129
+
130
+ def __repr__(self):
131
+ return f"FakePG world_size:{self._world_size} rank:{self._rank}"
132
+
133
+
134
+ class FakeStore(dist.Store):
135
+ """
136
+ A fake store is a fake Key-Value store simply for initialization usage
137
+ the of fake process group, one can either use FakeStore or HashStore.
138
+ """
139
+ pass
140
+
141
+ def _create_fake_pg(prefix_store, rank, world_size, timeout):
142
+ return FakeProcessGroup(rank, world_size)
143
+
144
+ dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda'])
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import threading
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+ from functools import partial, reduce
6
+
7
+ import torch
8
+ import torch.distributed as dist
9
+ import weakref
10
+ from torch._C._distributed_c10d import (
11
+ _create_work_from_future,
12
+ AllgatherOptions,
13
+ AllreduceOptions,
14
+ AllToAllOptions,
15
+ BarrierOptions,
16
+ BroadcastOptions,
17
+ ReduceScatterOptions,
18
+ ScatterOptions,
19
+ Store,
20
+ ReduceOp,
21
+ )
22
+ from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp
23
+ from torch.futures import Future
24
+ from torch.utils import _pytree as pytree
25
+
26
+ """
27
+ TODO:
28
+ Lots of missing collectives.
29
+ Collectives validation.
30
+ Make timeout robust by making collectives respect the test deadline.
31
+ Make tests robust by making collectives interruptible.
32
+ We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures.
33
+
34
+ """
35
+
36
+
37
+ def flatten_list(lst):
38
+ return pytree.tree_leaves(lst)
39
+
40
+
41
+ def ret_work(ret):
42
+ fut = Future()
43
+ fut.set_result(ret)
44
+ return _create_work_from_future(fut)
45
+
46
+ def binop_reduce(tensors, op):
47
+ res = op(torch.stack(tensors), dim=0)
48
+ if isinstance(res, torch.Tensor):
49
+ return res
50
+ # min/max return a namedtuple
51
+ return res.values
52
+
53
+ def bitwise_reduce(tensors, op):
54
+ return reduce(op, tensors)
55
+
56
+ _reduce_ops = {
57
+ ReduceOp.SUM: partial(binop_reduce, op=torch.sum),
58
+ ReduceOp.AVG: partial(binop_reduce, op=torch.mean),
59
+ ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod),
60
+ ReduceOp.MIN: partial(binop_reduce, op=torch.min),
61
+ ReduceOp.MAX: partial(binop_reduce, op=torch.max),
62
+ ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and),
63
+ ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or),
64
+ ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor),
65
+ }
66
+
67
+ class AllToAll:
68
+ @torch.no_grad()
69
+ def work(self, data):
70
+ world_size = len(data)
71
+ for dest_rank in range(world_size):
72
+ output_tensor_list, _ = data[dest_rank]
73
+ for src_rank in range(world_size):
74
+ _, input_tensor_list = data[src_rank]
75
+ output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank])
76
+
77
+ class AllReduce:
78
+ def __init__(self, op):
79
+ if op.op not in _reduce_ops:
80
+ raise NotImplementedError(
81
+ f"AllReduce op {op.op} not supported on multithreaded pg for now."
82
+ )
83
+ self.op = op.op
84
+
85
+ @torch.no_grad()
86
+ def work(self, data):
87
+ for i in range(len(data[0])):
88
+ tensors = []
89
+ # use rank0 as the device for sum
90
+ rank_0_device = data[0][i].device
91
+ # collect all data to the list and make them
92
+ # all on rank 0 device
93
+ for src_rank in range(0, len(data)):
94
+ tensors.append(data[src_rank][i].to(rank_0_device))
95
+
96
+ # now mimic reduce across all ranks
97
+ res = _reduce_ops[self.op](tensors)
98
+
99
+ # copy all the reduced value to each rank
100
+ for src_rank in range(len(data)):
101
+ data[src_rank][i].copy_(res.to(data[src_rank][i].device))
102
+
103
+
104
+ class AllGather:
105
+ @torch.no_grad()
106
+ def work(self, data):
107
+ for src_rank in range(len(data)):
108
+ in_tensor_list = data[src_rank][1]
109
+ # Can't handle all_gather with multiple tensors
110
+ assert len(in_tensor_list) == 1
111
+ src_tensor = in_tensor_list[0]
112
+
113
+ for dest in data:
114
+ dest_tensor = dest[0][0][src_rank]
115
+ dest_tensor.copy_(src_tensor)
116
+
117
+
118
+ class Scatter:
119
+ def __init__(self, src):
120
+ self.src = src
121
+
122
+ @torch.no_grad()
123
+ def work(self, data):
124
+ src_in_tensor_list = data[self.src][1]
125
+ # Can't handle scatter with multiple input tensor list
126
+ assert len(src_in_tensor_list) == 1
127
+ src_in_tensors = src_in_tensor_list[0]
128
+
129
+ for rank, each_rank_data in enumerate(data):
130
+ out_tensor_list = each_rank_data[0]
131
+ # Can't handle scatter with multiple output tensor
132
+ assert len(out_tensor_list) == 1
133
+ dest_tensor = out_tensor_list[0]
134
+ dest_tensor.copy_(src_in_tensors[rank])
135
+
136
+
137
+ class Gather:
138
+ def __init__(self, dst):
139
+ self.dst = dst
140
+
141
+ @torch.no_grad()
142
+ def work(self, data):
143
+ # Can't handle gather with multiple tensor lists
144
+ assert len(data[self.dst][0]) == 1
145
+ out_tensor_list = data[self.dst][0][0]
146
+ for rank, each_rank_data in enumerate(data):
147
+ src_in_tensor_list = each_rank_data[1]
148
+ # Can't handle gather with multiple tensor lists
149
+ assert len(src_in_tensor_list) == 1
150
+ dest_tensor = out_tensor_list[rank]
151
+ dest_tensor.copy_(src_in_tensor_list[0])
152
+
153
+ class ReduceScatter:
154
+ def __init__(self, op):
155
+ if op != dist.ReduceOp.SUM:
156
+ raise NotImplementedError("ReduceScatter only supports SUM on threaded pg for now.")
157
+ self.op = op
158
+
159
+ @torch.no_grad()
160
+ def work(self, data):
161
+ start_reduction = [False for _ in range(len(data))]
162
+ for each_rank_data in data:
163
+ # Can't handle reduce_scatter with multiple scatter list
164
+ assert len(each_rank_data[1]) == 1
165
+ to_scatter = each_rank_data[1][0]
166
+ for i in range(len(to_scatter)):
167
+ dest_tensor_on_rank_i = data[i][0]
168
+ # Can't handle reduce_scatter with multiple output tensor
169
+ assert len(dest_tensor_on_rank_i) == 1
170
+ dst_tensor_device = dest_tensor_on_rank_i[0].device
171
+ if not start_reduction[i]:
172
+ dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device))
173
+ start_reduction[i] = True
174
+ else:
175
+ dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device))
176
+
177
+ class Broadcast:
178
+ def __init__(self, src):
179
+ self.src = src
180
+
181
+ @torch.no_grad()
182
+ def work(self, data):
183
+ in_tensor_list = flatten_list(data[self.src])
184
+ for i in range(len(data)):
185
+ out_tensor_list = flatten_list(data[i])
186
+ for j in range(len(in_tensor_list)):
187
+ out_tensor_list[j].copy_(in_tensor_list[j])
188
+
189
+
190
+ class Collective:
191
+ def __init__(self, world_size, collective, pg):
192
+ self._world_size = world_size
193
+ self._collective = collective
194
+
195
+ self._start_cond = threading.Condition()
196
+ self._done_cond = threading.Condition()
197
+
198
+ self._data = [None] * world_size
199
+ self._count = 0
200
+ self._done = False
201
+
202
+ self._pg = pg
203
+
204
+ def join(self, rank, data):
205
+ with self._start_cond:
206
+ self._data[rank] = data
207
+ self._count += 1
208
+
209
+ # notify rank 0
210
+ if self._count == self._world_size:
211
+ if rank > 0:
212
+ self._start_cond.notify()
213
+
214
+ if rank == 0:
215
+ self._start_cond.wait_for(
216
+ lambda: self._count == self._world_size or self._pg._terminate.is_set()
217
+ )
218
+ # SystemExit is not a subclass of Exception but BaseException
219
+ # and can be distinguished from normal exception raised from program errors
220
+ # so that we can hide it from the exception queue
221
+ if self._pg._terminate.is_set():
222
+ sys.exit("Test termination event occurs.")
223
+
224
+ with self._done_cond:
225
+ # wait for rank 0 to finish
226
+ if rank > 0:
227
+ self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set())
228
+ if self._pg._terminate.is_set():
229
+ sys.exit("Test termination event occurs.")
230
+ else:
231
+ # copy data around
232
+ self._collective.work(self._data)
233
+ self._done = True
234
+ self._done_cond.notify_all()
235
+ return ret_work(data)
236
+
237
+
238
+ class ProcessLocalGroup(dist.ProcessGroup):
239
+ _coll_lock = threading.Lock()
240
+ _cur_coll_on_pgs = {}
241
+
242
+ _terminate = threading.Event()
243
+
244
+ @classmethod
245
+ def _start_coll(cls, collective, pg):
246
+ with cls._coll_lock:
247
+ # pg_name is unique, we use that to record the mapping between pg and collective
248
+ if pg.pg_name not in cls._cur_coll_on_pgs:
249
+ cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls)
250
+ return cls._cur_coll_on_pgs[pg.pg_name]
251
+
252
+ @classmethod
253
+ def _end_coll(cls, collective, pg):
254
+ # This is racily called by all ranks, so only one will work
255
+ with cls._coll_lock:
256
+ if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective:
257
+ cls._cur_coll_on_pgs.pop(pg.pg_name)
258
+
259
+ @classmethod
260
+ def exception_handle(cls, exc):
261
+ cls._terminate.set()
262
+ for coll in cls._cur_coll_on_pgs.values():
263
+ with coll._start_cond:
264
+ coll._start_cond.notify()
265
+ with coll._done_cond:
266
+ coll._done_cond.notify_all()
267
+
268
+ @classmethod
269
+ def reset(cls):
270
+ with cls._coll_lock:
271
+ cls._cur_coll_on_pgs = {}
272
+ cls._terminate.clear()
273
+
274
+ def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()):
275
+ coll = ProcessLocalGroup._start_coll(AllToAll(), self)
276
+ res = coll.join(self._rank, (output_tensor_list, input_tensor_list))
277
+ ProcessLocalGroup._end_coll(coll, self)
278
+ return res
279
+
280
+ def allreduce(self, tensor_list, opts=AllreduceOptions()):
281
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
282
+ res = coll.join(self._rank, tensor_list)
283
+ ProcessLocalGroup._end_coll(coll, self)
284
+ return res
285
+
286
+ def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()):
287
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
288
+ res = coll.join(self._rank, tensor_list)
289
+ ProcessLocalGroup._end_coll(coll, self)
290
+ return res
291
+
292
+ def barrier(self, opts=BarrierOptions()):
293
+ return self.allreduce(tensor_list=[torch.ones(1)])
294
+
295
+ def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()):
296
+ coll = ProcessLocalGroup._start_coll(AllGather(), self)
297
+ res = coll.join(self._rank, (output_tensors, input_tensor))
298
+ ProcessLocalGroup._end_coll(coll, self)
299
+ return res
300
+
301
+ def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
302
+ tensor_list = list(torch.chunk(output_tensor, self._world_size))
303
+ return self.allgather([tensor_list], [input_tensor], opts)
304
+
305
+ def broadcast(self, tensor_list, opts=BroadcastOptions()):
306
+ coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self)
307
+ res = coll.join(self._rank, tensor_list)
308
+ ProcessLocalGroup._end_coll(coll, self)
309
+ return res
310
+
311
+ def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()):
312
+ coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self)
313
+ res = coll.join(self._rank, (output_tensors, input_tensors))
314
+ ProcessLocalGroup._end_coll(coll, self)
315
+ return res
316
+
317
+ def gather(self, output_tensors, input_tensors, opts=ScatterOptions()):
318
+ coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self)
319
+ res = coll.join(self._rank, (output_tensors, input_tensors))
320
+ ProcessLocalGroup._end_coll(coll, self)
321
+ return res
322
+
323
+ def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()):
324
+ coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self)
325
+ res = coll.join(self._rank, (output_tensor, scatter_list))
326
+ ProcessLocalGroup._end_coll(coll, self)
327
+ return res
328
+
329
+ def _reduce_scatter_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
330
+ tensor_list = list(torch.chunk(input_tensor, self._world_size))
331
+ return self.reduce_scatter([output_tensor], [tensor_list], opts)
332
+
333
+ def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list):
334
+ res = None
335
+ for o_t, i_t in zip(output_tensor_list, input_tensor_list):
336
+ res = self._allgather_base(o_t, i_t)
337
+ return res
338
+
339
+ def __init__(self, rank, world_size):
340
+ super().__init__(rank, world_size)
341
+ self._rank = rank
342
+ self._world_size = world_size
343
+ world = dist.distributed_c10d._world
344
+ if isinstance(world, ThreadLocalWorld):
345
+ world = world._get_world()
346
+ self._world = weakref.ref(world)
347
+ self._ctx = torch.autograd.set_multithreading_enabled(False)
348
+
349
+ def size(self):
350
+ return self._world_size
351
+
352
+ @property
353
+ def pg_name(self):
354
+ """
355
+ return the global registered name of the current pg in the world
356
+ """
357
+ return self._world().pg_names[self]
358
+
359
+ def getBackendName(self):
360
+ return "threaded"
361
+
362
+ def __repr__(self):
363
+ return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}"
364
+
365
+
366
+ def _create_threaded_pg(prefix_store, rank, world_size, timeout):
367
+ pg = ProcessLocalGroup(rank, world_size)
368
+ # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional
369
+ # When device mesh involves sub groups while store based barrier is not enabled in c10d,
370
+ # even though threaded pg actual collectives are assumed to be single threaded,
371
+ # different threads may be initializing different groups,
372
+ # leading to race conditions.
373
+ # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups
374
+ # (dim 0 and 1) would be initialized in different threads independently.
375
+ # In this case we can no longer rely on class or global variables
376
+ # but have to rely on store based barrier to make sure each group
377
+ # is ready separately before we can invoke collectives in any of the groups.
378
+
379
+ # the prefix store is already per group so we pass an empty name here
380
+ _store_based_barrier(rank, prefix_store, "", world_size, timeout)
381
+ return pg
382
+
383
+
384
+ dist.Backend.register_backend("threaded", _create_threaded_pg)
385
+
386
+
387
+ @dataclass
388
+ class WorldData:
389
+ default_pg: dist.ProcessGroup
390
+ pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]]
391
+ pg_names: Dict[dist.ProcessGroup, str]
392
+ pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]]
393
+ pg_backend_config: Dict[dist.ProcessGroup, str]
394
+ group_count: int
395
+ tags_to_pg: Dict[str, List[dist.ProcessGroup]]
396
+ pg_to_tag: Dict[dist.ProcessGroup, str]
397
+ pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]
398
+ pg_default_device: Dict[dist.ProcessGroup, torch.device]
399
+
400
+
401
+ class ThreadLocalWorld:
402
+ _world = threading.local()
403
+
404
+ def _get_world(self) -> WorldData:
405
+ if not hasattr(ThreadLocalWorld._world, "world"):
406
+ ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {})
407
+ return ThreadLocalWorld._world.world
408
+
409
+ @property
410
+ def default_pg(self):
411
+ return self._get_world().default_pg
412
+
413
+ @default_pg.setter
414
+ def default_pg(self, value):
415
+ self._get_world().default_pg = value
416
+
417
+ @property
418
+ def pg_map(self):
419
+ return self._get_world().pg_map
420
+
421
+ @property
422
+ def pg_names(self):
423
+ return self._get_world().pg_names
424
+
425
+ @property
426
+ def pg_group_ranks(self):
427
+ return self._get_world().pg_group_ranks
428
+
429
+ @property
430
+ def pg_backend_config(self):
431
+ return self._get_world().pg_backend_config
432
+
433
+ @property
434
+ def group_count(self) -> int:
435
+ return self._get_world().group_count
436
+
437
+ @group_count.setter
438
+ def group_count(self, value):
439
+ self._get_world().group_count = value
440
+
441
+ @property
442
+ def tags_to_pg(self):
443
+ return self._get_world().tags_to_pg
444
+
445
+ @property
446
+ def pg_to_tag(self):
447
+ return self._get_world().pg_to_tag
448
+
449
+ @property
450
+ def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]:
451
+ return self._get_world().pg_coalesce_state
452
+
453
+ @property
454
+ def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]:
455
+ return self._get_world().pg_default_device
456
+
457
+
458
+ _old_pg_world = None
459
+ _ctx_manager = None
460
+
461
+
462
+ def _install_threaded_pg():
463
+ global _old_pg_world
464
+ global _ctx_manager
465
+ _old_pg_world = dist.distributed_c10d._world
466
+ dist.distributed_c10d._world = ThreadLocalWorld()
467
+ _ctx_manager = torch.autograd.set_multithreading_enabled(False)
468
+
469
+ return dist.distributed_c10d._world
470
+
471
+
472
+ def _uninstall_threaded_pg():
473
+ dist.distributed_c10d._world = _old_pg_world
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (207 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc ADDED
Binary file (21.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ import enum
3
+ from typing import Tuple
4
+
5
+ import torch
6
+ import torch.distributed.rpc as rpc
7
+ import torch.testing._internal.dist_utils as dist_utils
8
+ from torch import Tensor, nn
9
+ from torch._jit_internal import Future
10
+ from torch.distributed.nn import RemoteModule
11
+ from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES
12
+ from torch.distributed.nn.api.remote_module import _RemoteModule
13
+ from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
14
+ from torch.testing._internal.common_utils import TemporaryFileName
15
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
16
+ RpcAgentTestFixture,
17
+ )
18
+
19
+
20
+ _PARAM_VAL = torch.nn.Parameter(torch.ones(1))
21
+
22
+
23
+ # RPC handler for querying the device on the destination worker.
24
+ def remote_device(module_rref):
25
+ for param in module_rref.local_value().parameters():
26
+ return param.device
27
+
28
+
29
+ # RPC handler for querying __dict__ on the destination worker.
30
+ def remote_module_attributes(remote_module):
31
+ return remote_module.__dict__
32
+
33
+
34
+ # RPC handler for running forward on the destination worker.
35
+ def remote_forward(remote_module, args):
36
+ return remote_module.forward(*args)
37
+
38
+ # RPC handler for running forward_async on the destination worker.
39
+ def remote_forward_async(remote_module, args):
40
+ # Since future cannot be pickled and sent over the RPC layer,
41
+ # have to wait and behave just like ``forward_sync``.
42
+ return remote_module.forward_async(*args).wait()
43
+
44
+ # RPC handler for getting training mode on the destination worker.
45
+ def get_remote_training_arg(module_rref):
46
+ return module_rref.local_value().training
47
+
48
+ class ModuleCreationMode(enum.Enum):
49
+ MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface"
50
+ MODULE_CTOR = "module_ctor"
51
+
52
+
53
+ @torch.jit.interface
54
+ class MyModuleInterface:
55
+ def forward(
56
+ self, tensor: Tensor, number: int, word: str = "default"
57
+ ) -> Tuple[str, int, Tensor]:
58
+ # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
59
+ pass
60
+
61
+
62
+ @torch.jit.interface
63
+ class RemoteMyModuleInterface:
64
+ def forward(
65
+ self, tensor: Tensor, number: int, word: str = "default"
66
+ ) -> Tuple[str, int, Tensor]:
67
+ # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
68
+ pass
69
+
70
+ def forward_async(
71
+ self, tensor: Tensor, number: int, word: str = "default"
72
+ ) -> Future[Tuple[str, int, Tensor]]:
73
+ pass
74
+
75
+
76
+ class MyModule(nn.Module):
77
+ def __init__(self, first_arg, first_kwarg=-1):
78
+ super().__init__()
79
+ self.param1 = _PARAM_VAL
80
+
81
+ def forward(
82
+ self, tensor: Tensor, number: int, word: str = "default"
83
+ ) -> Tuple[str, int, Tensor]:
84
+ return word, number, tensor
85
+
86
+
87
+ class BadModule:
88
+ def __init__(self, first_arg, first_kwarg=-1):
89
+ pass
90
+
91
+
92
+ def create_scripted_module(first_arg, first_kwarg=-1):
93
+ module = MyModule(first_arg, first_kwarg=first_kwarg)
94
+ scripted_module = torch.jit.script(module)
95
+ return scripted_module
96
+
97
+
98
+ # Common utils for both CPU and CUDA test suites
99
+ class CommonRemoteModuleTest(RpcAgentTestFixture):
100
+ @property
101
+ def world_size(self): # Override setting in RpcAgentTestFixture
102
+ return 2
103
+
104
+ @staticmethod
105
+ def _create_remote_module_iter(remote_device, modes=None):
106
+ if modes is None:
107
+ modes = ModuleCreationMode.__members__.values()
108
+
109
+ args = (1,)
110
+ kwargs = dict(first_kwarg=2)
111
+
112
+ if ModuleCreationMode.MODULE_CTOR in modes:
113
+ remote_module = RemoteModule(remote_device, MyModule, args, kwargs)
114
+ yield remote_module
115
+
116
+ if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes:
117
+ remote_module = _RemoteModule(
118
+ remote_device,
119
+ create_scripted_module,
120
+ args,
121
+ kwargs,
122
+ _module_interface_cls=MyModuleInterface,
123
+ )
124
+ scripted_remote_module = torch.jit.script(remote_module)
125
+ yield scripted_remote_module
126
+
127
+
128
+ class RemoteModuleTest(CommonRemoteModuleTest):
129
+ @dist_utils.dist_init
130
+ def test_bad_module(self):
131
+ if self.rank != 0:
132
+ return
133
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
134
+ remote_device = f"{dst_worker_name}/cpu"
135
+ args = (1,)
136
+ kwargs = dict(first_kwarg=2)
137
+
138
+ with self.assertRaisesRegex(
139
+ ValueError,
140
+ r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
141
+ ):
142
+ RemoteModule(remote_device, BadModule, args, kwargs).forward()
143
+
144
+ with self.assertRaisesRegex(
145
+ ValueError,
146
+ r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
147
+ ):
148
+ RemoteModule(remote_device, BadModule, args, kwargs).forward()
149
+
150
+
151
+ @dist_utils.dist_init
152
+ def test_forward_async(self):
153
+ if self.rank != 0:
154
+ return
155
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
156
+ args = (torch.ones(1), 2, "3")
157
+ for remote_module in self._create_remote_module_iter(dst_worker_name):
158
+ ret_fut = remote_module.forward_async(*args)
159
+ ret = ret_fut.wait()
160
+ self.assertEqual(ret, tuple(reversed(args)))
161
+
162
+ @dist_utils.dist_init
163
+ def test_forward_async_script(self):
164
+ if self.rank != 0:
165
+ return
166
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
167
+
168
+ scripted_remote_module = next(
169
+ self._create_remote_module_iter(
170
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
171
+ )
172
+ )
173
+
174
+ @torch.jit.script
175
+ def run_forward_async(scripted_remote_module: RemoteMyModuleInterface):
176
+ ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3")
177
+ ret = ret_fut.wait()
178
+ return ret
179
+
180
+ ret = run_forward_async(scripted_remote_module)
181
+
182
+ self.assertEqual(ret, ("3", 2, torch.ones(1)))
183
+
184
+ @dist_utils.dist_init
185
+ def test_forward_sync(self):
186
+ if self.rank != 0:
187
+ return
188
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
189
+ args = (torch.ones(1), 2, "3")
190
+ for remote_module in self._create_remote_module_iter(dst_worker_name):
191
+ ret = remote_module.forward(*args)
192
+ self.assertEqual(ret, tuple(reversed(args)))
193
+
194
+ @dist_utils.dist_init
195
+ def test_forward_sync_script(self):
196
+ if self.rank != 0:
197
+ return
198
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
199
+
200
+ scripted_remote_module = next(
201
+ self._create_remote_module_iter(
202
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
203
+ )
204
+ )
205
+
206
+ @torch.jit.script
207
+ def run_forward(scripted_remote_module: MyModuleInterface):
208
+ ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
209
+ return ret
210
+
211
+ ret = run_forward(scripted_remote_module)
212
+
213
+ self.assertEqual(ret, ("3", 2, torch.ones(1)))
214
+
215
+ @dist_utils.dist_init
216
+ def test_forward_with_kwargs(self):
217
+ if self.rank != 0:
218
+ return
219
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
220
+ args = (torch.ones(1), 2)
221
+ kwargs = dict(word="3")
222
+ # Only test Python nn.Module, because script module methods don't support taking kwargs.
223
+ for remote_module in self._create_remote_module_iter(
224
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
225
+ ):
226
+ ret_fut = remote_module.forward_async(*args, **kwargs)
227
+ ret = ret_fut.wait()
228
+ self.assertEqual(ret, tuple(reversed(args + ("3",))))
229
+
230
+ ret = remote_module.forward(*args, **kwargs)
231
+ self.assertEqual(ret, tuple(reversed(args + ("3",))))
232
+
233
+ @dist_utils.dist_init
234
+ def test_remote_parameters(self):
235
+ if self.rank != 0:
236
+ return
237
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
238
+
239
+ # Only test Python nn.Module, because script module methods don't support ``remote_parameters``.
240
+ for remote_module in self._create_remote_module_iter(
241
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
242
+ ):
243
+ param_rrefs = remote_module.remote_parameters()
244
+ self.assertEqual(len(param_rrefs), 1)
245
+ self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL))
246
+
247
+ @dist_utils.dist_init
248
+ def test_get_module_rref(self):
249
+ if self.rank != 0:
250
+ return
251
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
252
+
253
+ # Only test Python nn.Module, because script module methods don't support ``get_module_rref``.
254
+ for remote_module in self._create_remote_module_iter(
255
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
256
+ ):
257
+ rref = remote_module.get_module_rref()
258
+ self.assertEqual(rref, remote_module.module_rref)
259
+ for param in rref.to_here().parameters():
260
+ self.assertTrue(torch.equal(param, _PARAM_VAL))
261
+
262
+ @dist_utils.dist_init
263
+ def test_train_eval(self):
264
+ if self.rank != 0:
265
+ return
266
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
267
+
268
+ for remote_module in self._create_remote_module_iter(
269
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
270
+ ):
271
+ remote_module.train()
272
+ ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
273
+ self.assertEqual(ret1, True)
274
+
275
+ remote_module.eval()
276
+ ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
277
+ self.assertEqual(ret2, False)
278
+
279
+ @dist_utils.dist_init
280
+ def test_unsupported_methods(self):
281
+ if self.rank != 0:
282
+ return
283
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
284
+
285
+ for remote_module in self._create_remote_module_iter(
286
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
287
+ ):
288
+ with self.assertRaisesRegex(
289
+ ValueError, r"Method ``register_buffer`` not supported for RemoteModule"
290
+ ):
291
+ remote_module.register_buffer("buffer", torch.ones(5))
292
+ with self.assertRaisesRegex(
293
+ ValueError,
294
+ r"Method ``register_parameter`` not supported for RemoteModule",
295
+ ):
296
+ remote_module.register_parameter(
297
+ "param", torch.nn.Parameter(torch.ones(1))
298
+ )
299
+ with self.assertRaisesRegex(
300
+ ValueError, r"Method ``add_module`` not supported for RemoteModule"
301
+ ):
302
+ remote_module.add_module("empty", None)
303
+
304
+ with self.assertRaisesRegex(
305
+ ValueError, r"Method ``apply`` not supported for RemoteModule"
306
+ ):
307
+ fn = torch.rand((3, 3), requires_grad=False)
308
+ remote_module.apply(fn)
309
+
310
+ with self.assertRaisesRegex(
311
+ ValueError, r"Method ``cuda`` not supported for RemoteModule"
312
+ ):
313
+ remote_module.cuda()
314
+ with self.assertRaisesRegex(
315
+ ValueError, r"Method ``cpu`` not supported for RemoteModule"
316
+ ):
317
+ remote_module.cpu()
318
+ with self.assertRaisesRegex(
319
+ ValueError, r"Method ``type`` not supported for RemoteModule"
320
+ ):
321
+ remote_module.type(torch.FloatTensor)
322
+ with self.assertRaisesRegex(
323
+ ValueError, r"Method ``float`` not supported for RemoteModule"
324
+ ):
325
+ remote_module.float()
326
+ with self.assertRaisesRegex(
327
+ ValueError, r"Method ``double`` not supported for RemoteModule"
328
+ ):
329
+ remote_module.double()
330
+ with self.assertRaisesRegex(
331
+ ValueError, r"Method ``bfloat16`` not supported for RemoteModule"
332
+ ):
333
+ remote_module.bfloat16()
334
+ with self.assertRaisesRegex(
335
+ ValueError, r"Method ``to`` not supported for RemoteModule"
336
+ ):
337
+ remote_module.to("cpu", dtype=torch.int32)
338
+
339
+ def hook(module, grad_input, grad_output):
340
+ pass
341
+
342
+ with self.assertRaisesRegex(
343
+ ValueError,
344
+ r"Method ``register_backward_hook`` not supported for RemoteModule",
345
+ ):
346
+ remote_module.register_backward_hook(hook)
347
+ with self.assertRaisesRegex(
348
+ ValueError,
349
+ r"Method ``register_forward_pre_hook`` not supported for RemoteModule",
350
+ ):
351
+ remote_module.register_forward_pre_hook(hook)
352
+ with self.assertRaisesRegex(
353
+ ValueError,
354
+ r"Method ``register_forward_hook`` not supported for RemoteModule",
355
+ ):
356
+ remote_module.register_forward_hook(hook)
357
+
358
+ with self.assertRaisesRegex(
359
+ ValueError, r"Method ``state_dict`` not supported for RemoteModule"
360
+ ):
361
+ remote_module.state_dict()
362
+ with self.assertRaisesRegex(
363
+ ValueError, r"Method ``load_state_dict`` not supported for RemoteModule"
364
+ ):
365
+ remote_module.load_state_dict({})
366
+
367
+ with self.assertRaisesRegex(
368
+ ValueError,
369
+ r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.",
370
+ ):
371
+ remote_module.parameters()
372
+ with self.assertRaisesRegex(
373
+ ValueError,
374
+ r"Method ``named_parameters`` not supported for RemoteModule",
375
+ ):
376
+ remote_module.named_parameters()
377
+ with self.assertRaisesRegex(
378
+ ValueError, r"Method ``buffers`` not supported for RemoteModule"
379
+ ):
380
+ remote_module.buffers()
381
+ with self.assertRaisesRegex(
382
+ ValueError, r"Method ``named_buffers`` not supported for RemoteModule"
383
+ ):
384
+ remote_module.named_buffers()
385
+ with self.assertRaisesRegex(
386
+ ValueError, r"Method ``children`` not supported for RemoteModule"
387
+ ):
388
+ remote_module.children()
389
+ with self.assertRaisesRegex(
390
+ ValueError, r"Method ``named_children`` not supported for RemoteModule"
391
+ ):
392
+ remote_module.named_children()
393
+ with self.assertRaisesRegex(
394
+ ValueError, r"Method ``modules`` not supported for RemoteModule"
395
+ ):
396
+ remote_module.modules()
397
+ with self.assertRaisesRegex(
398
+ ValueError, r"Method ``named_modules`` not supported for RemoteModule"
399
+ ):
400
+ remote_module.named_modules()
401
+
402
+ with self.assertRaisesRegex(
403
+ ValueError, r"Method ``requires_grad_`` not supported for RemoteModule"
404
+ ):
405
+ remote_module.requires_grad_()
406
+ with self.assertRaisesRegex(
407
+ ValueError, r"Method ``zero_grad`` not supported for RemoteModule"
408
+ ):
409
+ remote_module.zero_grad()
410
+ with self.assertRaisesRegex(
411
+ ValueError, r"Method ``share_memory`` not supported for RemoteModule"
412
+ ):
413
+ remote_module.share_memory()
414
+ with self.assertRaisesRegex(
415
+ ValueError, r"Method ``extra_repr`` not supported for RemoteModule"
416
+ ):
417
+ remote_module.extra_repr()
418
+
419
+ @dist_utils.dist_init
420
+ def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self):
421
+ if self.rank != 0:
422
+ return
423
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
424
+
425
+ # If a new attribute is added to this RemoteModule after the initialization,
426
+ # and it will be sent over the wire by RPC,
427
+ # this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES.
428
+ # Note that adding a new attribute out of constructor should rarely happen.
429
+ # If a new attribute is added to RemoteModule constructor,
430
+ # there is a sanity check to enforce developers to add this attribute to either
431
+ # _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
432
+ for remote_module in self._create_remote_module_iter(
433
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
434
+ ):
435
+ new_attr_name = "new_attr"
436
+ setattr(remote_module, new_attr_name, 1)
437
+
438
+ attrs = rpc.rpc_sync(
439
+ dst_worker_name, remote_module_attributes, (remote_module,)
440
+ )
441
+ self.assertNotIn(new_attr_name, attrs)
442
+
443
+ @dist_utils.dist_init
444
+ def test_remote_module_py_pickle_not_supported(self):
445
+ if self.rank != 0:
446
+ return
447
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
448
+
449
+ for remote_module in self._create_remote_module_iter(
450
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
451
+ ):
452
+ with TemporaryFileName() as fname:
453
+ with self.assertRaisesRegex(
454
+ RuntimeError,
455
+ "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC",
456
+ ):
457
+ torch.save(remote_module, fname)
458
+
459
+ @dist_utils.dist_init
460
+ def test_remote_module_py_pickle_not_supported_script(self):
461
+ if self.rank != 0:
462
+ return
463
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
464
+
465
+ for remote_module in self._create_remote_module_iter(
466
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
467
+ ):
468
+ with TemporaryFileName() as fname:
469
+ with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"):
470
+ torch.save(remote_module, fname)
471
+
472
+
473
+ class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
474
+ @property
475
+ def world_size(self): # Override setting in CommonRemoteModuleTest
476
+ return 3
477
+
478
+ @dist_utils.dist_init
479
+ def test_send_remote_module_over_the_wire(self):
480
+ if self.rank != 0:
481
+ return
482
+ dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
483
+ dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
484
+
485
+ # Unpickled attributes include both the inherent attributes of RemoteModule
486
+ # (not inherited from the superclass) and two installed methods.
487
+ expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
488
+ expected_unpickled_attrs.append("forward_async")
489
+ expected_unpickled_attrs.append("forward")
490
+
491
+ # Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
492
+ for remote_module in self._create_remote_module_iter(
493
+ dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
494
+ ):
495
+ # Test querying some simple attributes from worker2.
496
+ attrs = rpc.rpc_sync(
497
+ dst_worker2_name, remote_module_attributes, (remote_module,)
498
+ )
499
+ self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs)
500
+ self.assertEqual(attrs["on"], "worker1")
501
+ self.assertEqual(attrs["device"], "cpu")
502
+ self.assertFalse(attrs["is_device_map_set"])
503
+ self.assertFalse(attrs["is_scriptable"])
504
+
505
+ # Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
506
+ # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
507
+ # not have another worker to initiate forward over the RPC layer.
508
+ args = (torch.ones(1), 2, "3")
509
+ ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
510
+ self.assertEqual(ret1, tuple(reversed(args)))
511
+ ret2 = rpc.rpc_sync(
512
+ dst_worker2_name, remote_forward_async, (remote_module, args)
513
+ )
514
+ self.assertEqual(ret2, tuple(reversed(args)))
515
+
516
+ @dist_utils.dist_init
517
+ def test_send_remote_module_over_the_wire_script_not_supported(self):
518
+ if self.rank != 0:
519
+ return
520
+ dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
521
+ dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
522
+
523
+ # Unpickled attributes include both the inherent attributes of RemoteModule
524
+ # (not inherited from the superclass) and two installed methods.
525
+ expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
526
+ expected_unpickled_attrs.append("forward_async")
527
+ expected_unpickled_attrs.append("forward")
528
+
529
+ with self.assertRaisesRegex(
530
+ RuntimeError, "Passing a script RemoteModule over RPC is not supported."
531
+ ):
532
+ # Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
533
+ for remote_module in self._create_remote_module_iter(
534
+ dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
535
+ ):
536
+ # Test querying some simple attributes from worker2.
537
+ attrs = rpc.rpc_sync(
538
+ dst_worker2_name, remote_module_attributes, (remote_module,)
539
+ )
540
+
541
+ @dist_utils.dist_init
542
+ def test_create_remote_module_from_module_rref(self):
543
+ if self.rank != 0:
544
+ return
545
+ dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
546
+ dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
547
+
548
+ # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer.
549
+ for remote_module in self._create_remote_module_iter(
550
+ dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
551
+ ):
552
+ remote_module2 = rpc.rpc_sync(
553
+ dst_worker2_name,
554
+ RemoteModule.init_from_module_rref,
555
+ (dst_worker2_name, remote_module.get_module_rref()),
556
+ )
557
+
558
+ args = (torch.ones(1), 2, "3")
559
+ ret1 = rpc.rpc_sync(
560
+ dst_worker1_name, remote_forward, (remote_module, args)
561
+ )
562
+ ret2 = rpc.rpc_sync(
563
+ dst_worker2_name, remote_forward, (remote_module2, args)
564
+ )
565
+ self.assertEqual(ret2, ret2)
566
+
567
+
568
+ class CudaRemoteModuleTest(CommonRemoteModuleTest):
569
+ @skip_if_lt_x_gpu(1)
570
+ @dist_utils.dist_init
571
+ def test_valid_device(self):
572
+ if self.rank != 0:
573
+ return
574
+ dst_rank = (self.rank + 1) % self.world_size
575
+ dst_worker_name = dist_utils.worker_name(dst_rank)
576
+
577
+ for remote_module in self._create_remote_module_iter(
578
+ f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
579
+ ):
580
+ device = rpc.rpc_sync(
581
+ dst_worker_name, remote_device, (remote_module.module_rref,)
582
+ )
583
+ self.assertEqual(device.type, "cuda")
584
+ self.assertEqual(device.index, 0)
585
+
586
+ # Test rank works as well.
587
+ for remote_module in self._create_remote_module_iter(
588
+ f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
589
+ ):
590
+ device = rpc.rpc_sync(
591
+ dst_worker_name, remote_device, (remote_module.module_rref,)
592
+ )
593
+ self.assertEqual(device.type, "cuda")
594
+ self.assertEqual(device.index, 0)
595
+
596
+ @skip_if_lt_x_gpu(1)
597
+ @dist_utils.dist_init
598
+ def test_invalid_devices(self):
599
+ if self.rank != 0:
600
+ return
601
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
602
+
603
+ with self.assertRaisesRegex(
604
+ RuntimeError,
605
+ r"Expected one of .+ device type at start of device string",
606
+ ):
607
+ [
608
+ m.forward()
609
+ for m in self._create_remote_module_iter(
610
+ f"{dst_worker_name}/foo",
611
+ modes=[ModuleCreationMode.MODULE_CTOR],
612
+ )
613
+ ]
614
+
615
+ with self.assertRaisesRegex(
616
+ RuntimeError, r"CUDA error: invalid device ordinal"
617
+ ):
618
+ [
619
+ m.forward()
620
+ for m in self._create_remote_module_iter(
621
+ f"{dst_worker_name}/cuda:100",
622
+ modes=[ModuleCreationMode.MODULE_CTOR],
623
+ )
624
+ ]
625
+
626
+ with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"):
627
+ [
628
+ m.forward()
629
+ for m in self._create_remote_module_iter(
630
+ f"{dst_worker_name}/cpu2",
631
+ modes=[ModuleCreationMode.MODULE_CTOR],
632
+ )
633
+ ]
634
+
635
+ with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"):
636
+ [
637
+ m.forward()
638
+ for m in self._create_remote_module_iter(
639
+ f"{dst_worker_name}/",
640
+ modes=[ModuleCreationMode.MODULE_CTOR],
641
+ )
642
+ ]
643
+
644
+ with self.assertRaisesRegex(
645
+ ValueError,
646
+ r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '<workername>/<device>'",
647
+ ):
648
+ [
649
+ m.forward()
650
+ for m in self._create_remote_module_iter(
651
+ f"{dst_worker_name}/cuda:0/cuda:1",
652
+ modes=[ModuleCreationMode.MODULE_CTOR],
653
+ )
654
+ ]
655
+
656
+ with self.assertRaisesRegex(
657
+ ValueError,
658
+ r"Could not parse remote_device: /. The valid format is '<workername>/<device>'",
659
+ ):
660
+ [
661
+ m.forward()
662
+ for m in self._create_remote_module_iter(
663
+ "/",
664
+ modes=[ModuleCreationMode.MODULE_CTOR],
665
+ )
666
+ ]
667
+
668
+ with self.assertRaisesRegex(
669
+ ValueError,
670
+ r"Could not parse remote_device: /cuda:0. The valid format is '<workername>/<device>'",
671
+ ):
672
+ [
673
+ m.forward()
674
+ for m in self._create_remote_module_iter(
675
+ "/cuda:0",
676
+ modes=[ModuleCreationMode.MODULE_CTOR],
677
+ )
678
+ ]
679
+
680
+ @skip_if_lt_x_gpu(1)
681
+ @dist_utils.dist_init
682
+ def test_input_moved_to_cuda_device(self):
683
+ if self.rank != 0:
684
+ return
685
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
686
+
687
+ # These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device.
688
+ t1 = torch.ones(1)
689
+ args = (t1, 2)
690
+ t2 = t1 * 2
691
+ kwargs = dict(word=t2)
692
+
693
+ # Only test Python nn.Module, because script module methods don't support taking kwargs.
694
+ for remote_module in self._create_remote_module_iter(
695
+ f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
696
+ ):
697
+ ret_fut = remote_module.forward_async(*args, **kwargs)
698
+ ret = ret_fut.wait()
699
+ self.assertEqual(ret, tuple(reversed(args + (t2,))))
700
+ # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
701
+ self.assertEqual(ret[0].device.type, "cpu")
702
+ self.assertEqual(ret[2].device.type, "cpu")
703
+
704
+ ret = remote_module.forward(*args, **kwargs)
705
+ self.assertEqual(ret, tuple(reversed(args + (t2,))))
706
+ # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
707
+ self.assertEqual(ret[0].device.type, "cpu")
708
+ self.assertEqual(ret[2].device.type, "cpu")
709
+
710
+ @skip_if_lt_x_gpu(1)
711
+ @dist_utils.dist_init
712
+ def test_input_moved_to_cuda_device_script(self):
713
+ if self.rank != 0:
714
+ return
715
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
716
+
717
+ scripted_remote_module = next(
718
+ self._create_remote_module_iter(
719
+ f"{dst_worker_name}/cuda:0",
720
+ modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE],
721
+ )
722
+ )
723
+
724
+ @torch.jit.script
725
+ def run_forward(scripted_remote_module: MyModuleInterface):
726
+ ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
727
+ return ret
728
+
729
+ ret = run_forward(scripted_remote_module)
730
+
731
+ self.assertEqual(ret, ("3", 2, torch.ones(1)))
732
+ # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
733
+ self.assertEqual(ret[2].device.type, "cpu")
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+
4
+ from torch import nn
5
+ from torch.nn.parallel import DistributedDataParallel
6
+ from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
7
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
8
+ RpcAgentTestFixture,
9
+ )
10
+ from torch.testing._internal.common_distributed import (
11
+ requires_gloo,
12
+ requires_nccl,
13
+ skip_if_lt_x_gpu,
14
+ skip_if_rocm,
15
+ )
16
+ from torch.distributed.pipeline.sync import Pipe
17
+
18
+ class PipeWithDDPTest(RpcAgentTestFixture):
19
+ @property
20
+ def world_size(self) -> int:
21
+ return 2
22
+
23
+ @skip_if_lt_x_gpu(4)
24
+ @requires_nccl()
25
+ @dist_init
26
+ @skip_if_rocm
27
+ def test_basic_nccl_ckpt_never(self):
28
+ self._run_basic_test("nccl", "never")
29
+
30
+ @skip_if_lt_x_gpu(4)
31
+ @requires_nccl()
32
+ @dist_init
33
+ @skip_if_rocm
34
+ def test_basic_nccl_ckpt_never_find_unused(self):
35
+ self._run_basic_test("nccl", "never", find_unused_parameters=True)
36
+
37
+ @skip_if_lt_x_gpu(4)
38
+ @requires_nccl()
39
+ @dist_init
40
+ @skip_if_rocm
41
+ def test_basic_nccl_ckpt_always(self):
42
+ self._run_basic_test("nccl", "always", static_graph=True)
43
+
44
+ @skip_if_lt_x_gpu(4)
45
+ @requires_nccl()
46
+ @dist_init
47
+ @skip_if_rocm
48
+ def test_basic_nccl_ckpt_except_last(self):
49
+ self._run_basic_test("nccl", "except_last", static_graph=True)
50
+
51
+ @skip_if_lt_x_gpu(4)
52
+ @requires_gloo()
53
+ @dist_init
54
+ @skip_if_rocm
55
+ def test_basic_gloo_ckpt_never(self):
56
+ self._run_basic_test("gloo", "never")
57
+
58
+ @skip_if_lt_x_gpu(4)
59
+ @requires_gloo()
60
+ @dist_init
61
+ @skip_if_rocm
62
+ def test_basic_gloo_ckpt_never_find_unused(self):
63
+ self._run_basic_test("gloo", "never", find_unused_parameters=True)
64
+
65
+ @skip_if_lt_x_gpu(4)
66
+ @requires_gloo()
67
+ @dist_init
68
+ @skip_if_rocm
69
+ def test_basic_gloo_ckpt_always(self):
70
+ self._run_basic_test("gloo", "always", static_graph=True)
71
+
72
+ @skip_if_lt_x_gpu(4)
73
+ @requires_gloo()
74
+ @dist_init
75
+ @skip_if_rocm
76
+ def test_basic_gloo_ckpt_except_last(self):
77
+ self._run_basic_test("gloo", "except_last", static_graph=True)
78
+
79
+ def _run_basic_test(self, backend, checkpoint, find_unused_parameters=False, static_graph=False):
80
+ dist.init_process_group(
81
+ backend=backend,
82
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
83
+ world_size=self.world_size,
84
+ rank=self.rank,
85
+ )
86
+
87
+ # Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another
88
+ # pipe between GPU 2 and 3. Both replicas are replicated via DDP.
89
+ fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank)
90
+
91
+ class MyModule(nn.Module):
92
+ def __init__(self, device):
93
+ super().__init__()
94
+ self.fc2 = nn.Linear(8, 4, bias=False).cuda(device)
95
+ self.fc3 = nn.Linear(4, 2, bias=False).cuda(device)
96
+
97
+ def forward(self, inp):
98
+ if find_unused_parameters:
99
+ return self.fc2(inp)
100
+ else:
101
+ return self.fc3(self.fc2(inp))
102
+
103
+ layer2 = MyModule(2 * self.rank + 1)
104
+ model = nn.Sequential(
105
+ fc1,
106
+ layer2
107
+ )
108
+ model = Pipe(model, chunks=2, checkpoint=checkpoint)
109
+ model = DistributedDataParallel(
110
+ model,
111
+ find_unused_parameters=find_unused_parameters,
112
+ static_graph=static_graph,
113
+ )
114
+
115
+ # Ensure inputs are different across ranks to verify that gradient
116
+ # sync indeed occurs.
117
+ model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
118
+ out = model(model_input).local_value()
119
+ out.sum().backward()
120
+
121
+ # Run forward again for find_unused_parameters to trigger any potential errors.
122
+ if find_unused_parameters:
123
+ # Ensure inputs are different across ranks to verify that gradient
124
+ # sync indeed occurs.
125
+ unused_param_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
126
+ model(unused_param_input).local_value().sum().backward()
127
+
128
+ # Run a few more iterations of fwd + bwd to ensure gradient synchronization
129
+ # occurs properly across iterations via delay_all_reduce/bucketized allreduce.
130
+ for _ in range(3):
131
+ model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
132
+ out = model(model_input).local_value()
133
+ out.sum().backward()
134
+
135
+ # Check grads
136
+ output = [torch.empty_like(fc1.weight.grad), torch.empty_like(fc1.weight.grad)]
137
+ dist.all_gather(output, fc1.weight.grad)
138
+ self.assertEqual(output[0], output[1])
139
+
140
+ output = [torch.empty_like(layer2.fc2.weight.grad), torch.empty_like(layer2.fc2.weight.grad)]
141
+ dist.all_gather(output, layer2.fc2.weight.grad)
142
+ self.assertEqual(output[0], output[1])
143
+
144
+ if not find_unused_parameters:
145
+ output = [torch.empty_like(layer2.fc3.weight.grad), torch.empty_like(layer2.fc3.weight.grad)]
146
+ dist.all_gather(output, layer2.fc3.weight.grad)
147
+ self.assertEqual(output[0], output[1])
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (209 Bytes). View file