applied-ai-018 commited on
Commit
43345d4
·
verified ·
1 Parent(s): 5b794ca

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py +12 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py +9 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py +107 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py +68 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py +12 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py +215 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py +12 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py +209 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py +242 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py +202 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py +349 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py +293 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py +476 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py +12 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py +3 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/join.py +346 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/averagers.py +120 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/utils.py +72 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py +77 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__init__.py +0 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py +41 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py +954 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py +339 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py +112 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py +235 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py +873 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py +153 -0
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (321 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Keep old package for BC purposes, this file should be removed once
2
+ # everything moves to the `torch.distributed.checkpoint` package.
3
+ import sys
4
+ import torch
5
+ import warnings
6
+
7
+ from torch.distributed.checkpoint import * # noqa: F403
8
+ warnings.warn(
9
+ "torch.distributed._shard.checkpoint will be deprecated, use torch.distributed.checkpoint instead",
10
+ DeprecationWarning
11
+ )
12
+ sys.modules['torch.distributed._shard.checkpoint'] = torch.distributed.checkpoint
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (536 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch.distributed._shard.sharded_tensor._ops.misc_ops
2
+ import torch.distributed._shard.sharded_tensor._ops.tensor_ops
3
+
4
+ from .binary_cmp import equal, allclose
5
+ from .init import kaiming_uniform_, normal_, uniform_, constant_
6
+
7
+ # Import all ChunkShardingSpec ops
8
+ from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding import sharded_embedding
9
+ from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag import sharded_embedding_bag
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (758 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc ADDED
Binary file (534 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from torch.distributed._shard.sharded_tensor import (
3
+ _sharded_op_impl,
4
+ Shard,
5
+ ShardedTensor,
6
+ )
7
+ from torch.distributed._shard.common_op_utils import _basic_validation
8
+
9
+ def _sharded_op_common(op, early_stop_func, extra_check):
10
+ """
11
+ Inject sharded tensor op registration with common logics executed before
12
+ different behaviors are done on either local shards or a local tensor.
13
+
14
+ Example::
15
+ >>> # xdoctest: +SKIP("Undefined variables")
16
+ >>> op = torch.transpose
17
+ >>> @_sharded_op_impl(op)
18
+ >>> @_sharded_op_common(op, early_stop_func, extra_check)
19
+ >>> def sharded_tensor_op(types, args, kwargs, process_group):
20
+ >>> ...
21
+ >>>
22
+ >>> st = sharded_tensor.rand(32, 16)
23
+ >>> st.transpose(1, 2)
24
+ >>> # This will call '_sharded_op_common'
25
+
26
+ Args:
27
+ op: The op to be registered and applied to all shards of the st.
28
+ early_stop_func (Callable, optional): the func for early stop.
29
+ Default: if ``None``, no early stop.
30
+ extra_check (Callable, optional): the func for extra condition check.
31
+ Default: if ``None``, no extra check.
32
+
33
+ Return:
34
+ func (Callable): Torch function for which we want to provide a sharded
35
+ implementation (ex: torch.transpose)
36
+ """
37
+ def decorator_sharded_func(wrapped_func):
38
+ @functools.wraps(wrapped_func)
39
+ def wrapper(types, args=(), kwargs=None, pg=None):
40
+ _basic_validation(op, args, kwargs)
41
+
42
+ st = args[0]
43
+ if kwargs is None:
44
+ kwargs = {}
45
+ if extra_check:
46
+ extra_check(*args, **kwargs)
47
+ if early_stop_func:
48
+ early_stop = early_stop_func(*args, **kwargs)
49
+ if early_stop:
50
+ return st
51
+ return wrapped_func(types, args, kwargs, pg)
52
+
53
+ return wrapper
54
+
55
+ return decorator_sharded_func
56
+
57
+ def _register_sharded_op_on_local_shards(
58
+ op, early_stop_func=None, extra_check=None, customized_func=None
59
+ ):
60
+ """
61
+ Handles ``__torch_function__`` dispatch for ops which are performed on
62
+ each shard of the sharded tensor such as elementwise op like
63
+ ``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``.
64
+
65
+ For more complicated ops, a customized func can be used to generate
66
+ the new shards and sharded tensor size.
67
+
68
+ This function expects that the original ShardingSpec for the ShardedTensor
69
+ is preserved irrespective of whether or not a customized function is used.
70
+
71
+ Args:
72
+ op: The op to be registered and applied to all shards of the st.
73
+ early_stop_func (Callable, optional): the func for early stop.
74
+ Default: if ``None``, no early stop.
75
+ extra_check (Callable, optional): the func for extra condition check.
76
+ Default: if ``None``, no extra check.
77
+ customized_func (Callable, optional): the func for customized logic
78
+ to generate new shards and sharded tensor size.
79
+ Default: if ``None``, we simply lower to the real op call with
80
+ all local shards of the st.
81
+
82
+ Return:
83
+ func (Callable): registered implementation for sharded op for
84
+ ``__torch_function__`` dispatch.
85
+ """
86
+ @_sharded_op_impl(op)
87
+ @_sharded_op_common(op, early_stop_func, extra_check)
88
+ def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None):
89
+ st = args[0]
90
+ st_metadata = st.metadata()
91
+ local_shards = st.local_shards()
92
+ local_shards_new = []
93
+ if customized_func:
94
+ local_shards_new, st_metadata = customized_func(args, kwargs, pg)
95
+ else:
96
+ for local_shard in local_shards:
97
+ args = (local_shard.tensor, *args[1:])
98
+ local_shards_new.append(
99
+ Shard(op(*args, **kwargs), local_shard.metadata)
100
+ )
101
+ return ShardedTensor._init_from_local_shards_and_global_metadata(
102
+ local_shards_new,
103
+ st_metadata,
104
+ process_group=pg,
105
+ init_rrefs=st._init_rrefs,
106
+ sharding_spec=st.sharding_spec()
107
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+ import torch.distributed.distributed_c10d as distributed_c10d
4
+ from torch.distributed._shard.sharded_tensor import (
5
+ ShardedTensor,
6
+ _sharded_op_impl
7
+ )
8
+
9
+ def _communicate_result(result, pg):
10
+ # Gather results from all ranks.
11
+ if result:
12
+ result_tensor = torch.ones(1, device=torch.device(torch.cuda.current_device()))
13
+ else:
14
+ result_tensor = torch.zeros(1, device=torch.device(torch.cuda.current_device()))
15
+
16
+ dist.all_reduce(result_tensor, group=pg)
17
+
18
+ expected_result = torch.ones(1, device=torch.device(torch.cuda.current_device())) * dist.get_world_size(pg)
19
+
20
+ return torch.equal(result_tensor, expected_result)
21
+
22
+ def binary_cmp(cmp_fun, types, args, kwargs=None, process_group=None):
23
+ if len(args) != 2:
24
+ raise ValueError(f'Expected two arguments for torch.{cmp_fun.__name__}')
25
+
26
+ result = True
27
+ st1 = args[0]
28
+ st2 = args[1]
29
+ if not (isinstance(st1, ShardedTensor) and isinstance(st2, ShardedTensor)):
30
+ raise TypeError(f'Both arguments to torch.{cmp_fun.__name__} need to be of type ShardedTensor')
31
+
32
+ # Verify same PG
33
+ if st1._process_group != st2._process_group:
34
+ return False
35
+
36
+ if distributed_c10d._rank_not_in_group(st1._process_group) or distributed_c10d._rank_not_in_group(st2._process_group):
37
+ return distributed_c10d._rank_not_in_group(st1._process_group) == distributed_c10d._rank_not_in_group(st2._process_group)
38
+
39
+ # Verify metadata
40
+ if st1.metadata() != st2.metadata():
41
+ return _communicate_result(False, st1._process_group)
42
+
43
+ # Verify number of local shards
44
+ st1_local_shards = st1.local_shards()
45
+ st2_local_shards = st2.local_shards()
46
+ if len(st1_local_shards) != len(st2_local_shards):
47
+ return _communicate_result(False, st1._process_group)
48
+
49
+ # kwargs must be dict-like
50
+ if kwargs is None:
51
+ kwargs = {}
52
+ # Verify each local shard
53
+ for idx in range(len(st1_local_shards)):
54
+ if st1_local_shards[idx].metadata != st2_local_shards[idx].metadata:
55
+ return _communicate_result(False, st1._process_group)
56
+ if not cmp_fun(st1_local_shards[idx].tensor, st2_local_shards[idx].tensor, **kwargs):
57
+ return _communicate_result(False, st1._process_group)
58
+
59
+
60
+ return _communicate_result(True, st1._process_group)
61
+
62
+ @_sharded_op_impl(torch.equal)
63
+ def equal(types, args, kwargs, process_group):
64
+ return binary_cmp(torch.equal, types, args, kwargs, process_group)
65
+
66
+ @_sharded_op_impl(torch.allclose)
67
+ def allclose(types, args, kwargs, process_group):
68
+ return binary_cmp(torch.allclose, types, args, kwargs, process_group)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributed._shard.sharded_tensor import (
3
+ _sharded_op_impl,
4
+ )
5
+
6
+ # This is used by `_apply()` within module.py to set new
7
+ # parameters after apply a certain method, we should follow
8
+ # the future behavior of overwriting the existing tensor
9
+ # instead of doing in-place change using `.data = `.
10
+ @_sharded_op_impl(torch._has_compatible_shallow_copy_type)
11
+ def tensor_has_compatible_shallow_copy_type(types, args=(), kwargs=None, pg=None):
12
+ return False
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import torch
3
+ from torch.distributed._shard.sharded_tensor import (
4
+ _sharded_op_impl,
5
+ Shard,
6
+ ShardedTensor,
7
+ )
8
+ from ._common import (
9
+ _register_sharded_op_on_local_shards,
10
+ )
11
+ from torch.distributed._shard.common_op_utils import _register_default_op
12
+
13
+
14
+ # Tensor properties access
15
+ _register_default_op(torch.Tensor.shape.__get__, _sharded_op_impl) # type: ignore[attr-defined]
16
+ _register_default_op(torch.Tensor.dtype.__get__, _sharded_op_impl) # type: ignore[attr-defined]
17
+ _register_default_op(torch.Tensor.layout.__get__, _sharded_op_impl) # type: ignore[attr-defined]
18
+ _register_default_op(torch.Tensor.size, _sharded_op_impl)
19
+ _register_default_op(torch.Tensor.dim, _sharded_op_impl)
20
+ _register_default_op(torch.Tensor.ndim.__get__, _sharded_op_impl) # type: ignore[attr-defined]
21
+ _register_default_op(torch.Tensor.is_contiguous, _sharded_op_impl)
22
+ _register_default_op(torch.Tensor.contiguous, _sharded_op_impl)
23
+ _register_default_op(torch.Tensor.is_floating_point, _sharded_op_impl)
24
+
25
+ # __reduce_ex__ to dispatch to get_state/set_state
26
+ _register_default_op(torch.Tensor.__reduce_ex__, _sharded_op_impl)
27
+
28
+ # autograd related properties
29
+ _register_default_op(torch.Tensor.requires_grad.__get__, _sharded_op_impl) # type: ignore[attr-defined]
30
+ # TODO: set grad with a ShardedTensor that consists of all local grads
31
+ _register_default_op(torch.Tensor.grad.__get__, _sharded_op_impl) # type: ignore[union-attr]
32
+ _register_default_op(torch.Tensor.grad_fn.__get__, _sharded_op_impl) # type: ignore[union-attr]
33
+ _register_default_op(torch.Tensor.is_leaf.__get__, _sharded_op_impl) # type: ignore[attr-defined]
34
+
35
+ # device property is ambiguous as from a global prospective,
36
+ # ShardedTensor.device consists of multiple devices (might even across hosts)
37
+ # We choose to return the current device of the local tensor to represent
38
+ # the device property on each rank
39
+ @_sharded_op_impl(torch.Tensor.device.__get__)
40
+ def tensor_device(types, args=(), kwargs=None, pg=None):
41
+ self_st = args[0]
42
+ # Validate types
43
+ if not isinstance(self_st, ShardedTensor):
44
+ raise TypeError("input needs to be a ShardedTensor")
45
+ dev: torch.device
46
+ if self_st._local_shards:
47
+ dev = self_st._local_shards[0].tensor.device
48
+ elif pg and pg._get_backend_name() == "gloo":
49
+ dev = torch.device("cpu")
50
+ else:
51
+ dev = torch.device(torch.cuda.current_device())
52
+ return dev
53
+
54
+ @_sharded_op_impl(torch.Tensor.is_meta.__get__) # type: ignore[attr-defined]
55
+ def st_is_meta(types, args=(), kwargs=None, pg=None):
56
+ return args[0].local_tensor().is_meta
57
+
58
+
59
+ def sharded_type_as_check(*args, **kwargs):
60
+ """
61
+ Perform extra checks for the sharded_type_as op such as the input needs to
62
+ be either a Tensor or ShardedTensor.
63
+
64
+ Args: same as ``torch.Tensor.type_as``.
65
+
66
+ Return: None
67
+ """
68
+ if len(args) < 2:
69
+ raise ValueError("Needs to give a tensor to cast type as!")
70
+ if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor):
71
+ raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!")
72
+
73
+
74
+ def same_dtype(*args, **kwargs):
75
+ """
76
+ When the dtype is the same, return the original ShardedTensor.
77
+
78
+ Args: same as ``torch.Tensor.type_as``.
79
+
80
+ Return (bool): Whether to return early or not.
81
+ """
82
+ return args[0].dtype == args[1].dtype
83
+
84
+
85
+ def sharded_type_as(args, kwargs, pg):
86
+ """
87
+ Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op.
88
+
89
+ Args: same as ``torch.Tensor.type_as``.
90
+
91
+ Return:
92
+ new_local_shards (List[Shard]): Local shards for the new sharded tensor.
93
+ st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
94
+ """
95
+ st = args[0]
96
+ tensor = args[1]
97
+ if isinstance(tensor, ShardedTensor):
98
+ tensor = tensor.local_tensor()
99
+ new_local_shards = []
100
+ for shard in st.local_shards():
101
+ new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata))
102
+ st_meta = copy.deepcopy(st._metadata)
103
+ st_meta.tensor_properties.dtype = tensor.dtype
104
+ return new_local_shards, st_meta
105
+
106
+
107
+ _register_sharded_op_on_local_shards(
108
+ torch.Tensor.type_as,
109
+ early_stop_func=same_dtype,
110
+ extra_check=sharded_type_as_check,
111
+ customized_func=sharded_type_as,
112
+ )
113
+
114
+
115
+ def sharded_deepcopy(args, kwargs, pg):
116
+ # NOTE: we directly implement deepcopy magic method
117
+ # instead of using the default tensor.__deepcopy__
118
+ # and implement clone(). This is because the default
119
+ # tensor deepcopy copies every attribute, but the
120
+ # process_group in ShardedTensor cannot be deep copied.
121
+ self_st = args[0]
122
+ new_local_shards = copy.deepcopy(self_st.local_shards())
123
+ new_metadata = copy.deepcopy(self_st.metadata())
124
+ return new_local_shards, new_metadata
125
+
126
+
127
+ _register_sharded_op_on_local_shards(
128
+ torch.Tensor.__deepcopy__,
129
+ customized_func=sharded_deepcopy,
130
+ )
131
+
132
+
133
+ @_sharded_op_impl(torch.Tensor.copy_)
134
+ def sharded_inplace_copy(types, args, kwargs, pg):
135
+ # NOTE: inplace op don't need to rewrap
136
+ kwargs = {} if kwargs is None else kwargs
137
+ self_st = args[0]
138
+ new_st = args[1]
139
+ nonblocking = kwargs.get("non_blocking", False)
140
+ for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
141
+ if local_shard.metadata != new_shard.metadata:
142
+ raise RuntimeError(
143
+ "inplace copy can only happen between two ShardedTensor with same metadata!"
144
+ )
145
+ for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
146
+ local_shard.tensor.copy_(new_shard.tensor, nonblocking)
147
+
148
+ return self_st
149
+
150
+
151
+ def sharded_clone(args, kwargs, pg):
152
+ self_st = args[0]
153
+ desire_memory_format = kwargs.get("memory_format", None)
154
+ if desire_memory_format and desire_memory_format != torch.preserve_format:
155
+ raise RuntimeError("Only support torch.preserve_format for ShardedTensor!")
156
+ cloned_local_shards = [
157
+ Shard(
158
+ local_shard.tensor.clone(memory_format=desire_memory_format),
159
+ metadata=copy.deepcopy(local_shard.metadata),
160
+ )
161
+ for local_shard in self_st.local_shards()
162
+ ]
163
+ new_metadata = copy.deepcopy(self_st.metadata())
164
+ return cloned_local_shards, new_metadata
165
+
166
+
167
+ _register_sharded_op_on_local_shards(
168
+ torch.Tensor.clone,
169
+ customized_func=sharded_clone,
170
+ )
171
+
172
+
173
+ def sharded_detach(args, kwargs, pg):
174
+ self_st = args[0]
175
+ detached_local_shards = [
176
+ Shard(
177
+ local_shard.tensor.detach(),
178
+ metadata=copy.deepcopy(local_shard.metadata),
179
+ )
180
+ for local_shard in self_st.local_shards()
181
+ ]
182
+ new_metadata = copy.deepcopy(self_st.metadata())
183
+ new_metadata.tensor_properties.requires_grad = False
184
+ return detached_local_shards, new_metadata
185
+
186
+
187
+ _register_sharded_op_on_local_shards(
188
+ torch.Tensor.detach,
189
+ customized_func=sharded_detach,
190
+ )
191
+
192
+
193
+ @_sharded_op_impl(torch.Tensor.requires_grad_)
194
+ def tensor_requires_grad_set(types, args=(), kwargs=None, pg=None):
195
+ self_st = args[0]
196
+ # Validate types
197
+ if not isinstance(self_st, ShardedTensor):
198
+ raise TypeError("input needs to be a ShardedTensor")
199
+
200
+ if kwargs is None:
201
+ kwargs = {}
202
+
203
+ requires_grad = args[1] if len(args) > 1 else kwargs.get("requires_grad", True)
204
+ if requires_grad == self_st.requires_grad:
205
+ return self_st
206
+
207
+ for local_shard in self_st.local_shards():
208
+ local_shard.tensor.requires_grad_(requires_grad)
209
+
210
+ # update the wrapper class property
211
+ with torch._C.DisableTorchFunctionSubclass():
212
+ self_st.requires_grad_(requires_grad)
213
+ # update the metadata in the meanwhile
214
+ self_st._metadata.tensor_properties.requires_grad = requires_grad
215
+ return self_st
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .api import (
2
+ DevicePlacementSpec,
3
+ EnumerableShardingSpec,
4
+ PlacementSpec,
5
+ ShardingSpec,
6
+ _infer_sharding_spec_from_shards_metadata,
7
+ )
8
+ from .chunk_sharding_spec import (
9
+ ChunkShardingSpec as ChunkShardingSpec,
10
+ )
11
+
12
+ from torch.distributed._shard.metadata import ShardMetadata
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (527 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc ADDED
Binary file (6.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc ADDED
Binary file (9.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple
2
+
3
+ from torch.distributed._shard.metadata import ShardMetadata
4
+
5
+
6
+ def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):
7
+ """
8
+ Checks if two shards overlap.
9
+ """
10
+
11
+ # For each dim of each shard, check if one shard resides on the other
12
+ # end of second shard with respect to that dim. As an example for a 2D
13
+ # shard, we would check if one shard is above or on the left of the
14
+ # other shard.
15
+ ndims = len(shard1.shard_offsets)
16
+ for i in range(ndims):
17
+ if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_sizes[i]:
18
+ return False
19
+ if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_sizes[i]:
20
+ return False
21
+
22
+ return True
23
+
24
+
25
+ def _find_nd_overlapping_shards(
26
+ shards: List[ShardMetadata], sharded_dims: List[int]
27
+ ) -> Optional[Tuple[int, int]]:
28
+ # Each rank has len(sharded_dims) tuples. Each tuple represent the
29
+ # [begin, end] (inclusive) pair of that dimension.
30
+ shard_intervals = [
31
+ [
32
+ (s.shard_offsets[dim], s.shard_offsets[dim] + s.shard_sizes[dim] - 1)
33
+ for dim in sharded_dims
34
+ ]
35
+ for s in shards
36
+ ]
37
+
38
+ for i in range(len(shards)):
39
+ shard_i = shard_intervals[i]
40
+ for j in range(i + 1, len(shards)):
41
+ shard_j = shard_intervals[j]
42
+ # For each dim of each shard, check if one shard resides on the other
43
+ # end of second shard with respect to that dim. As an example for a 2D
44
+ # shard, we would check if one shard is above or on the left of the
45
+ # other shard.
46
+ overlap = True
47
+ for interval_i, interval_j in zip(shard_i, shard_j):
48
+ if interval_i[0] > interval_j[1] or interval_j[0] > interval_i[1]:
49
+ overlap = False
50
+ break
51
+ if overlap:
52
+ return (i, j)
53
+ return None
54
+
55
+
56
+ def _find_1d_overlapping_shards(
57
+ shards: List[ShardMetadata], dim: int
58
+ ) -> Optional[Tuple[int, int]]:
59
+ # (begin, end, index_in_shards). Begin and end are inclusive.
60
+ intervals = [
61
+ (s.shard_offsets[dim], s.shard_offsets[dim] + s.shard_sizes[dim] - 1, i)
62
+ for i, s in enumerate(shards)
63
+ ]
64
+ intervals.sort()
65
+ for i in range(len(shards) - 1):
66
+ if intervals[i][1] >= intervals[i + 1][0]:
67
+ return (intervals[i][2], intervals[i + 1][2])
68
+ return None
69
+
70
+
71
+ def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):
72
+ """
73
+ Ensures none of the shards overlap with each other.
74
+
75
+ Args:
76
+ shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
77
+ each shard.
78
+ Raises:
79
+ ``ValueError`` if there's overlap in any two shards.
80
+ """
81
+ if not shards or len(shards) == 1:
82
+ return
83
+
84
+ sharded_dims: List[int] = []
85
+ for dim in range(len(shards[0].shard_offsets)):
86
+ for i in range(1, len(shards)):
87
+ if (
88
+ shards[i].shard_offsets[dim] != shards[0].shard_offsets[dim] or
89
+ shards[i].shard_sizes[dim] != shards[0].shard_sizes[dim]
90
+ ):
91
+ sharded_dims.append(dim)
92
+ break
93
+
94
+ pair: Optional[Tuple[int, int]] = None
95
+ if len(sharded_dims) == 0:
96
+ # All shards are the same, all dims are not partitioned. Choose any 2.
97
+ pair = (0, 1)
98
+ elif len(sharded_dims) == 1:
99
+ # Shards are partitioned over only one dimension. Overlap can be found
100
+ # using a O(nlogn) overlapping interval algorithm.
101
+ pair = _find_1d_overlapping_shards(shards, sharded_dims[0])
102
+ else:
103
+ # Shards are partitioned over more than one dimension. Fall back to
104
+ # pair-wise check. Even though O(nlogn) algorithms (line sweep) exist
105
+ # for 2D overlap, the implementation is not trivial and may not justify
106
+ # the time saving in most cases.
107
+ pair = _find_nd_overlapping_shards(shards, sharded_dims)
108
+
109
+ if pair:
110
+ raise ValueError(f'Shards {shards[pair[0]]} and {shards[pair[1]]} overlap')
111
+
112
+
113
+ def check_tensor(shards_metadata, tensor_dims) -> None:
114
+ """
115
+ Checks if the shards_metadata is compatible with the provided tensor dims.
116
+
117
+ Args:
118
+ shards_metadata(List[ShardMetadata]): List of :class:`ShardMetadata`
119
+ objects representing each shard of the tensor.
120
+ tensor_dims(Sequence of int): Dimensions of tensor to verify
121
+ Raises:
122
+ ``ValueError`` if not compatible.
123
+ """
124
+
125
+ # If the tensor's volume matches the total volume of all shards and
126
+ # all shard boundaries are within tensor dims, we have a compatible
127
+ # sharding spec for this tensor. Note that we have already verified
128
+ # we don't have overlapping shards.
129
+ tensor_rank = len(tensor_dims)
130
+ shards_rank = len(shards_metadata[0].shard_offsets)
131
+ if tensor_rank != shards_rank:
132
+ raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}')
133
+
134
+ total_shard_volume = 0
135
+ for shard in shards_metadata:
136
+ shard_volume = 1
137
+ for i, shard_length in enumerate(shard.shard_sizes):
138
+ shard_volume *= shard_length
139
+ if shard.shard_offsets[i] + shard.shard_sizes[i] > tensor_dims[i]:
140
+ raise ValueError(
141
+ f'Shard offset {shard.shard_offsets[i]} and length '
142
+ f'{shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}')
143
+ total_shard_volume += shard_volume
144
+
145
+ tensor_volume = 1
146
+ for size in tensor_dims:
147
+ tensor_volume *= size
148
+
149
+ if total_shard_volume != tensor_volume:
150
+ # TODO: Can we improve this error message to point out the gaps?
151
+ raise ValueError(
152
+ f'Total volume of shards: {total_shard_volume} '
153
+ f'does not match tensor volume: {tensor_volume}, in other words '
154
+ f'all the individual shards do not cover the entire tensor')
155
+
156
+ def get_split_size(dim_size, chunks):
157
+ """
158
+ Computes the split size inline with ``torch.chunk``
159
+
160
+ Args:
161
+ dim_size(int): Size of the dimension being chunked.
162
+ chunks(int): Number of chunks to create for ``dim_size``.
163
+
164
+ Returns:
165
+ An int indicating the split size to use.
166
+ """
167
+ return (dim_size + chunks - 1) // chunks
168
+
169
+ def get_chunked_dim_size(dim_size, split_size, idx):
170
+ """
171
+ Computes the dim size of the chunk for provided ``idx`` given ``dim_size``
172
+ and ``split_size``.
173
+
174
+ Args:
175
+ dim_size(int): Size of the dimension being chunked.
176
+ split_size(int): The chunk size for each chunk of ``dim_size``.
177
+ idx(int): The index of chunk whose dim size is being requested.
178
+
179
+ Returns:
180
+ An int indicating the dim size of the chunk.
181
+ """
182
+ return max(min(dim_size, split_size * (idx + 1)) - split_size * idx, 0)
183
+
184
+ def get_chunk_sharding_params(sharding_dim_size, world_size, spec, rank):
185
+ """
186
+ Generate the start pos and offset length for the current rank for
187
+ chunk sharding.
188
+
189
+ Args:
190
+ sharding_dim_size(int): The dimension length which we shard on.
191
+ world_size(int): number of ranks.
192
+ spec (:class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec`):
193
+ sharding spec.
194
+ rank(int): # of cuda process.
195
+
196
+ Returns:
197
+ start_pos(int): start position of sharded tensor on the given rank.
198
+ chunk_size(int): chunk size of sharded tensor on the given rank.
199
+ """
200
+ split_size = get_split_size(sharding_dim_size, world_size)
201
+ current_offsets = 0
202
+ start_pos = current_offsets
203
+ for idx, placement in enumerate(spec.placements):
204
+ chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
205
+ if rank == placement.rank():
206
+ start_pos = current_offsets
207
+ break
208
+ current_offsets += chunk_size
209
+ return start_pos, chunk_size # type: ignore[possibly-undefined]
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from dataclasses import dataclass
3
+ import functools
4
+ from typing import Callable, Dict, List, TYPE_CHECKING
5
+
6
+ import torch
7
+
8
+ from ._internals import (
9
+ check_tensor,
10
+ get_chunked_dim_size,
11
+ get_split_size,
12
+ validate_non_overlapping_shards_metadata
13
+ )
14
+ from torch.distributed._shard.metadata import ShardMetadata
15
+
16
+ import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
17
+ from torch.distributed._shard.op_registry_utils import _decorator_func
18
+
19
+ if TYPE_CHECKING:
20
+ # Only include ShardedTensor when do type checking, exclude it
21
+ # from run-time to resolve circular dependency.
22
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
23
+
24
+ class PlacementSpec(ABC): # noqa: B024
25
+ """
26
+ Base class representing the placement of an entity. Subclasses of this
27
+ class can be used to specify customized placements which might not be
28
+ covered by existing APIs.
29
+ """
30
+ pass
31
+
32
+
33
+ @dataclass
34
+ class DevicePlacementSpec(PlacementSpec):
35
+ """
36
+ Associates placement of an entity with a single device.
37
+
38
+ Args:
39
+ device(:class:`torch.distributed._remote_device`): The device to place the entity on.
40
+ """
41
+
42
+ device: torch.distributed._remote_device
43
+
44
+ def __post_init__(self):
45
+ if not isinstance(self.device, torch.distributed._remote_device):
46
+ self.device = torch.distributed._remote_device(self.device)
47
+
48
+ class ShardingSpec(ABC):
49
+ """
50
+ Base class representing sharding specifications.
51
+ """
52
+ @abstractmethod
53
+ def build_metadata(self,
54
+ tensor_sizes: torch.Size,
55
+ tensor_properties: sharded_tensor_meta.TensorProperties,
56
+ ) -> sharded_tensor_meta.ShardedTensorMetadata:
57
+ """
58
+ Given a global tensor size, define how to shard a tensor like this shape
59
+ across ranks, return ShardedTensorMetadata
60
+ Args:
61
+ tensor_sizes (:class:`torch.Size`):
62
+ The tensor shape to shard on, a `torch.Size` object that represents the
63
+ tensor shape to be sharded according to the ShardingSpec.
64
+ tensor_properties(:class:`torch.distributed._shard.sharded_tensor.TensorProperties):
65
+ Tensor properties used to create a ShardedTensor.
66
+ Returns:
67
+ A :class:`ShardedTensorMetadata` object that encodes the information about
68
+ the layout of the ShardedTensor and its properties.
69
+ """
70
+
71
+ @abstractmethod
72
+ def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
73
+ """
74
+ Given a global tensor on src_rank, shard this tensor
75
+ across ranks within the process group, return a ShardedTensor.
76
+ Args:
77
+ tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
78
+ Keyword args:
79
+ src_rank (int, optional): The source rank which is used as the ground truth of
80
+ the data for the parameter that would be sharded and scattered
81
+ across the rest of the ranks.
82
+ Default: 0.
83
+ process_group (ProcessGroup, optional): The process group to work on. If None,
84
+ the default process group will be used.
85
+ Returns:
86
+ A :class:`ShardedTensor` sharded from the given tensor.
87
+ """
88
+
89
+ # Ops customized for a particular ShardingSpec.
90
+ _CUSTOM_SHARDING_SPEC_OPS: Dict[str, Dict[Callable, Callable]] = {}
91
+
92
+ def _has_custom_op(sharding_spec, op):
93
+ """
94
+ Returns whether or not the ShardingSpec has a custom op implementation.
95
+ """
96
+ class_name = type(sharding_spec).__qualname__
97
+ return class_name in _CUSTOM_SHARDING_SPEC_OPS and op in _CUSTOM_SHARDING_SPEC_OPS[class_name]
98
+
99
+ def _dispatch_custom_op(sharding_spec, op: Callable, types, args, kwargs, process_group):
100
+ """
101
+ Calls the custom op for this ShardingSpec if it exists.
102
+ """
103
+ class_name = type(sharding_spec).__qualname__
104
+ if not _has_custom_op(sharding_spec, op):
105
+ raise RuntimeError(f'Custom op: {op} not registered for {class_name}')
106
+ func = _CUSTOM_SHARDING_SPEC_OPS[class_name][op]
107
+ return func(types, args, kwargs, process_group)
108
+
109
+ def custom_sharding_spec_op(sharding_spec_class, func):
110
+ """
111
+ Decorator to allow custom registration of ops.
112
+ Args:
113
+ sharding_spec_class(type): The ShardingSpec for which we need to add this custom op.
114
+ func(Callable): The op to override (ex: torch.bmm)
115
+ """
116
+ class_name = sharding_spec_class.__qualname__
117
+ if class_name not in _CUSTOM_SHARDING_SPEC_OPS:
118
+ _CUSTOM_SHARDING_SPEC_OPS[class_name] = {}
119
+ return functools.partial(
120
+ _decorator_func,
121
+ op=func,
122
+ op_table=_CUSTOM_SHARDING_SPEC_OPS[class_name]
123
+ )
124
+
125
+
126
+ @dataclass
127
+ class EnumerableShardingSpec(ShardingSpec):
128
+ """
129
+ This is a type of PlacementSpec that allows users to specify a generic
130
+ sharding scheme by enumerating exactly how each shard is laid out.
131
+
132
+ Args:
133
+ shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
134
+ each shard. Note that none of the shards should overlap.
135
+ """
136
+
137
+ shards: List[ShardMetadata]
138
+
139
+ def __post_init__(self):
140
+ if len(self.shards) == 0:
141
+ raise ValueError(f'Empty shard list provided: {self.shards}')
142
+
143
+ # Validate each shard has same rank.
144
+ rank = -1
145
+ for shard in self.shards:
146
+ if rank != -1 and rank != len(shard.shard_offsets):
147
+ raise ValueError(f'Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}')
148
+ rank = len(shard.shard_offsets)
149
+
150
+ validate_non_overlapping_shards_metadata(self.shards)
151
+
152
+ def build_metadata(self,
153
+ tensor_sizes: torch.Size,
154
+ tensor_properties: sharded_tensor_meta.TensorProperties,
155
+ ) -> sharded_tensor_meta.ShardedTensorMetadata:
156
+ # check if shards form a valid tensor
157
+ check_tensor(self.shards, tensor_sizes)
158
+ return sharded_tensor_meta.ShardedTensorMetadata(
159
+ self.shards,
160
+ tensor_sizes,
161
+ tensor_properties
162
+ )
163
+
164
+ def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
165
+ # TODO: figure out a generic and efficient way to scatter the shards for EnumerableShardingSpec
166
+ raise NotImplementedError("EnumerableShardingSpec.shard not implemented yet!")
167
+
168
+
169
+ def _infer_sharding_spec_from_shards_metadata(shards_metadata):
170
+ """
171
+ Infer the sharding spec from the metadata of each shard of a ShardedTensor.
172
+ If the tensor is sharded only on one dimension, we can then verify whether it's
173
+ a ChunkShardingSpec or not. The way to verify it is to first get the total length
174
+ and perform a chunk sharding with the given placements to see if we can have the
175
+ same chunk size as the given shards_metadata. If not, we assume it's enum sharded.
176
+
177
+ Args:
178
+ shards_metadata (List[ShardMetadata]): List of Metadata of local shards.
179
+
180
+ Returns:
181
+ A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding
182
+ spec for one sharded tensor.
183
+ """
184
+ placements = []
185
+ chunk_sharding_dim = None
186
+ chunk_offset_list = []
187
+ shard_size_list = []
188
+ shard_offset_list = []
189
+ # collect local shard metadatas from the global sharded_tensor_metadata
190
+ for shard_metadata in shards_metadata: # type: ignore[attr-defined]
191
+ placements.append(shard_metadata.placement)
192
+ local_offsets = shard_metadata.shard_offsets
193
+ chunk_offset_list.append(sum(local_offsets))
194
+ shard_size_list.append(shard_metadata.shard_sizes)
195
+ shard_offset_list.append(shard_metadata.shard_offsets)
196
+ shard_dims = [idx for idx, e in enumerate(local_offsets) if e != 0]
197
+ # If the offset is [0, 0, ..., 0] (all zeros),
198
+ # we cannot decide whether how the tensor is sharded.
199
+ if len(shard_dims) == 0:
200
+ continue
201
+ # If the offset is [0, N, .,0, M, 0, .., 0],
202
+ # we are sure it's sharded by more than one dimension.
203
+ if len(shard_dims) != 1:
204
+ chunk_sharding_dim = None
205
+ break
206
+ # If the offset is [0, 0, .,0, M, 0, .., 0], aka, it's sharded by just
207
+ # one dimension, we need to make sure all ranks share the same dimension.
208
+ if not chunk_sharding_dim:
209
+ chunk_sharding_dim = shard_dims[0]
210
+ elif chunk_sharding_dim != shard_dims[0]:
211
+ chunk_sharding_dim = None
212
+ break
213
+
214
+ if chunk_sharding_dim is not None:
215
+ # Ensure we infer the correct placement order from offsets
216
+ placements = [
217
+ x for _, x in sorted(zip(chunk_offset_list, placements), key=lambda e: e[0])
218
+ ]
219
+
220
+ from .chunk_sharding_spec import ChunkShardingSpec
221
+ chunk_spec = ChunkShardingSpec(
222
+ dim=chunk_sharding_dim,
223
+ placements=placements,
224
+ )
225
+
226
+ shard_sizes = sorted([x[chunk_sharding_dim] for x in shard_size_list])
227
+ shard_total_length = sum(shard_sizes)
228
+ shard_offsets = sorted([x[chunk_sharding_dim] for x in shard_offset_list])
229
+
230
+ chunks = len(placements)
231
+ split_size = get_split_size(shard_total_length, chunks)
232
+ chunk_shard_sizes = sorted(
233
+ [
234
+ get_chunked_dim_size(shard_total_length, split_size, idx)
235
+ for idx in range(chunks)
236
+ ]
237
+ )
238
+ # Should match ChunkShardingSpec offsets calculation
239
+ chunk_shard_offsets = [split_size * idx for idx in range(chunks)]
240
+ if shard_sizes == chunk_shard_sizes and shard_offsets == chunk_shard_offsets:
241
+ return chunk_spec
242
+ return EnumerableShardingSpec(shards_metadata)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import torch
3
+ import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
4
+ from torch.distributed._shard.metadata import ShardMetadata
5
+ from torch.distributed._shard.sharded_tensor.shard import Shard
6
+ from torch.distributed._shard.sharded_tensor.utils import (
7
+ _parse_and_validate_remote_device
8
+ )
9
+ from torch.distributed._shard._utils import narrow_tensor
10
+ import torch.distributed as dist
11
+ import torch.distributed.distributed_c10d as distributed_c10d
12
+ from typing import List, Union, TYPE_CHECKING
13
+ from ._internals import (
14
+ get_chunked_dim_size,
15
+ get_split_size,
16
+ )
17
+
18
+ from .api import ShardingSpec
19
+
20
+ if TYPE_CHECKING:
21
+ # Only include ShardedTensor when do type checking, exclude it
22
+ # from run-time to resolve circular dependency.
23
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
24
+
25
+ @dataclass
26
+ class ChunkShardingSpec(ShardingSpec):
27
+ """
28
+ This is a type of PlacementSpec that defines the placement as being sharded
29
+ across multiple devices. In particular, it represents sharding a Tensor
30
+ along a single dimension into equal chunks (similar to :meth:`torch.chunk`).
31
+
32
+ The semantics of how a tensor is partitioned is inline with
33
+ :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the
34
+ specified ``dim`` and ``chunks`` in torch.chunk is the number of elements
35
+ in the placement specified.
36
+
37
+ Args:
38
+ dim (int or str):
39
+ The dimension to shard on, could be an integer representing the
40
+ dimension or a string in case of named tensors where dimensions are
41
+ named. Note that named tensor support is not added yet.
42
+ placement(List[Union[_remote_device, str]]):
43
+ Specifies the placement of each shard of the Tensor. The size of
44
+ the list represents the number of shards to be created. This could
45
+ be a list of
46
+ :class:`torch.distributed._remote_device`'s. This list
47
+ could also contain a string which represents remote
48
+ device as accepted by
49
+ :class:`torch.distributed._remote_device`
50
+ """
51
+
52
+ ShardingDim = Union[int, str]
53
+
54
+ dim: ShardingDim
55
+ placements: List[Union[torch.distributed._remote_device, str]]
56
+
57
+ def __post_init__(self):
58
+ self._verify_dim(self.dim)
59
+ for i, remote_device in enumerate(self.placements):
60
+ if not isinstance(remote_device, torch.distributed._remote_device):
61
+ self.placements[i] = torch.distributed._remote_device(remote_device)
62
+
63
+ @staticmethod
64
+ def _verify_dim(dim):
65
+ # Validate the sharding spec.
66
+ # TODO: support named dimension
67
+ if isinstance(dim, str):
68
+ raise NotImplementedError(
69
+ "ChunkShardingSpec does not support named dimension yet!"
70
+ )
71
+
72
+ if not isinstance(dim, int):
73
+ raise ValueError(
74
+ f"Sharding dim needs to be an integer, found: {dim}"
75
+ )
76
+
77
+ def build_metadata(self,
78
+ tensor_sizes: torch.Size,
79
+ tensor_properties: sharded_tensor_meta.TensorProperties,
80
+ ) -> sharded_tensor_meta.ShardedTensorMetadata:
81
+ tensor_num_dim = len(tensor_sizes)
82
+
83
+ self._verify_dim(self.dim)
84
+ if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]
85
+ raise ValueError(f"Invalid sharding dim: {self.dim}")
86
+
87
+ shards_metadata = []
88
+ sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]
89
+ chunks = len(self.placements)
90
+ split_size = get_split_size(sharding_dim_size, chunks)
91
+ for idx, placement in enumerate(self.placements):
92
+ # generate ShardMetadata for each placement device
93
+ chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
94
+ shard_size = list(tensor_sizes)
95
+ current_offsets = [0] * tensor_num_dim
96
+ current_offsets[self.dim] = split_size * idx # type: ignore[index]
97
+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]
98
+
99
+ shard_metadata = ShardMetadata(
100
+ shard_offsets=current_offsets,
101
+ shard_sizes=shard_size,
102
+ placement=placement,
103
+ )
104
+ shards_metadata.append(shard_metadata)
105
+
106
+ return sharded_tensor_meta.ShardedTensorMetadata(
107
+ shards_metadata,
108
+ tensor_sizes,
109
+ tensor_properties
110
+ )
111
+
112
+
113
+ def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
114
+ """
115
+ Args:
116
+ src_rank: group rank relative to ``process_group``
117
+
118
+ N.B. If ``process_group`` is None, ``src_rank`` is a global rank.
119
+ """
120
+ # relative imports to avoid circular dependency
121
+ from torch.distributed._shard.sharded_tensor import (
122
+ ShardedTensor
123
+ )
124
+ tensor_properties = sharded_tensor_meta.TensorProperties(
125
+ dtype=tensor.dtype,
126
+ layout=tensor.layout,
127
+ requires_grad=tensor.requires_grad,
128
+ memory_format=torch.contiguous_format,
129
+ pin_memory=tensor.is_pinned()
130
+ )
131
+ current_rank = dist.get_rank(process_group)
132
+ tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
133
+ local_shards = []
134
+ local_tensor = None
135
+ local_metadata = None
136
+ tensors_to_scatter = [None] * dist.get_world_size(process_group)
137
+
138
+ sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
139
+ chunks = len(self.placements)
140
+ split_size = get_split_size(sharding_dim_size, chunks)
141
+ scatter_shape = list(tensor.size())
142
+ scatter_shape[self.dim] = split_size # type: ignore[index]
143
+
144
+ for shard_meta in tensor_meta.shards_metadata:
145
+ rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
146
+ if current_rank == src_rank:
147
+ # Reshape to get shard for this rank and we don't want autograd
148
+ # recording here for the narrow op and 'local_shard' should be a
149
+ # leaf variable in the autograd graph.
150
+ narrowed_tensor = narrow_tensor(tensor, shard_meta)
151
+ if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]
152
+ # for the last shard that might be smaller to other shards
153
+ # resize the narrowed tensor to the same size and use it for
154
+ # the scatter collective as dist.scatter requires same size
155
+ # inputs on every rank
156
+ tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)
157
+ else:
158
+ tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
159
+
160
+ tensors_to_scatter[rank] = tensor_to_scatter
161
+
162
+ if current_rank == rank:
163
+ local_tensor = torch.empty(
164
+ scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)
165
+ local_metadata = shard_meta
166
+
167
+ # each rank should have local_tensor and local_metadata initialized if we build
168
+ # the metadata list in a correct way.
169
+ assert local_tensor is not None
170
+ assert local_metadata is not None
171
+
172
+ # Scatter the shards to all ranks in the pg
173
+ # scatter takes the global rank as ``src``
174
+ src_for_scatter = src_rank
175
+ if process_group is not None and process_group is not distributed_c10d._get_default_group():
176
+ src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)
177
+
178
+ dist.scatter(
179
+ local_tensor,
180
+ scatter_list=tensors_to_scatter if current_rank == src_rank else None,
181
+ src=src_for_scatter,
182
+ group=process_group
183
+ )
184
+
185
+ if list(local_tensor.size()) != local_metadata.shard_sizes:
186
+ # detach again after receiving to ensure local shards remain a leaf node
187
+ local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()
188
+
189
+ # Sync requires_grad to local_shard.
190
+ local_tensor.requires_grad = tensor.requires_grad
191
+
192
+ local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))
193
+
194
+ st = ShardedTensor._init_from_local_shards_and_global_metadata(
195
+ local_shards,
196
+ tensor_meta,
197
+ process_group=process_group)
198
+
199
+ # Manually set sharding_spec
200
+ st._sharding_spec = self
201
+
202
+ return st
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (235 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.distributed as dist
4
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
5
+ from torch.distributed._shard.sharded_tensor._ops._common import _sharded_op_common
6
+ from torch.distributed._shard.sharding_spec import ChunkShardingSpec
7
+ from torch.distributed._shard.sharding_spec._internals import (
8
+ get_chunk_sharding_params,
9
+ get_chunked_dim_size,
10
+ get_split_size,
11
+ )
12
+ from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
13
+ from torch.distributed.nn.functional import (
14
+ _all_gather_base,
15
+ all_reduce,
16
+ all_to_all_single,
17
+ )
18
+
19
+
20
+ def _chunk_sharding_spec_check(spec, op):
21
+ """
22
+ For the given op implementation check if the sharding spec is ChunkShardingSpec.
23
+ """
24
+ if not isinstance(spec, ChunkShardingSpec):
25
+ raise NotImplementedError(
26
+ f"Only ChunkShardingSpec supported for '{op.__name__}'."
27
+ )
28
+
29
+
30
+ def _register_sharded_op_on_local_tensor(
31
+ op, early_stop_func=None, extra_check=None, customized_func=None
32
+ ):
33
+ """
34
+ Handles ``__torch_function__`` dispatch for ops which are performed on
35
+ the single local tensor of the sharded tensor such as op like
36
+ ``torch.nn.functional.softmax`` or ``torch.Tensor.view``.
37
+
38
+ For more complicated ops, a customized func can be used to generate
39
+ the new local tensor, sharding spec and sharded tensor size.
40
+
41
+ Args:
42
+ op: The op to be registered and applied to all shards of the st.
43
+ early_stop_func (Callable, optional): the func for early stop.
44
+ Default: if ``None``, no early stop.
45
+ extra_check (Callable, optional): the func for extra condition check.
46
+ Default: if ``None``, no extra check.
47
+ customized_func (Callable, optional): the func for customized logic
48
+ to generate the new local tensor, sharding spec and sharded tensor size.
49
+ Default: if ``None``, we simply lower to the real op call with
50
+ the single local tensor of the st.
51
+
52
+ Return:
53
+ func (Callable): registered implementation for sharded op for
54
+ ``__torch_function__`` dispatch.
55
+ """
56
+
57
+ @custom_sharding_spec_op(ChunkShardingSpec, op)
58
+ @_sharded_op_common(op, early_stop_func, extra_check)
59
+ def sharded_tensor_op_on_local_tensor(types, args=(), kwargs=None, pg=None):
60
+ st = args[0]
61
+ sharding_spec = st.sharding_spec()
62
+ if len(st.local_shards()) != 1:
63
+ raise TypeError(
64
+ f"torch function '{op.__name__}', with args: {args} and "
65
+ f"kwargs: {kwargs} only supported for single local tensor!"
66
+ )
67
+ st_size = st.size()
68
+ if customized_func:
69
+ local_tensor, sharding_spec, st_size = customized_func(args, kwargs, pg)
70
+ else:
71
+ args = (st.local_tensor(), *args[1:])
72
+ local_tensor = op(*args, **kwargs)
73
+ return ShardedTensor._init_from_local_tensor(
74
+ local_tensor.contiguous(),
75
+ sharding_spec,
76
+ st_size, # type: ignore[arg-type]
77
+ process_group=pg,
78
+ init_rrefs=st._init_rrefs,
79
+ )
80
+
81
+
82
+ def _handle_col_wise_sharding_base(
83
+ op_func,
84
+ col_dim,
85
+ input,
86
+ world_size,
87
+ weight,
88
+ local_shard,
89
+ pg,
90
+ gathered_inputs,
91
+ mode=None,
92
+ gathered_per_sample_weights=None,
93
+ gathered_offsets=None,
94
+ padding_idx=None,
95
+ ):
96
+ """
97
+ For col-wise sharding of weight, lots of logic are common.
98
+ So we extract the common logic and put in this function:
99
+ Step 1. To get input from each rank and
100
+ Step 2. To perform the op on the concatenated tensor.
101
+ Step 3. To distribute results to each rank with col rearrangement.
102
+ Step 4. To concatenate all results from all ranks.
103
+
104
+ Args:
105
+ op_func: operator which is applied to the input tensor.
106
+ col_dim: dim of result tensor after the operation.
107
+ input: tensor to be applied op on.
108
+ world_size: number of ranks.
109
+ weight: sharded weight tensor.
110
+ local_shard: col-wise sharded weight tensor.
111
+ pg: process group.
112
+ gathered_inputs: list of inputs from all ranks. If specified, we
113
+ don't need to communicate with each rank any more.
114
+ mode: aggregation mode of EmbeddingBag.
115
+ gathered_per_sample_weights: per_sample_weights across all ranks.
116
+ gathered_offsets: offsets across all ranks.
117
+ padding_idx: If specified, the entries at padding_idx do
118
+ not contribute to the gradient; therefore, the embedding
119
+ vector at padding_idx is not updated during training,
120
+ i.e. it remains as a fixed “pad”.
121
+ Note that the embedding vector at padding_idx is
122
+ excluded from the reduction.
123
+
124
+ Return: final result of input being applied with the op.
125
+ """
126
+ # run the operator's function for all the inputs.
127
+ results = []
128
+ for i, inp in enumerate(gathered_inputs):
129
+ if op_func == torch.nn.functional.embedding_bag:
130
+ result = op_func(
131
+ inp,
132
+ local_shard,
133
+ offsets=gathered_offsets[i] if gathered_offsets is not None else None,
134
+ mode=mode,
135
+ per_sample_weights=gathered_per_sample_weights[i]
136
+ if gathered_per_sample_weights is not None
137
+ else None,
138
+ padding_idx=padding_idx,
139
+ )
140
+ elif op_func == torch.nn.functional.embedding:
141
+ result = op_func(
142
+ inp,
143
+ local_shard,
144
+ padding_idx=padding_idx,
145
+ )
146
+ else:
147
+ result = op_func(inp, local_shard)
148
+ results.append(torch.transpose(result, 0, col_dim))
149
+
150
+ # Distribute results to each rank with col rearrangement.
151
+ output = _result_distribute_with_col_rearrange(
152
+ results, input, world_size, weight, pg
153
+ )
154
+
155
+ # transpose the output and return result.
156
+ return torch.transpose(output, 0, col_dim)
157
+
158
+
159
+ def _result_distribute_with_col_rearrange(results, input, world_size, weight, pg):
160
+ """
161
+ For col-wise sharding of weight, we need to distribute
162
+ results to each rank. We do them in this function.
163
+ Note that, if the index in the Sharding Spec is not equal to
164
+ the rank number, we need to do the rearrangement based on the
165
+ order given by the Sharding Spec (placement).
166
+
167
+ Args:
168
+ results: results from ops applied to inputs from all ranks.
169
+ We need to distribute them back to their original ranks.
170
+ input: tensor to be applied op to.
171
+ world_size: number of ranks.
172
+ weight: sharded weight tensor.
173
+ pg: process group.
174
+
175
+ Return: column rearranged result.
176
+ """
177
+ # Process results and outputs for all2all.
178
+ sharding_dim = weight._sharding_spec.dim
179
+ sharding_dim_size = weight.size(sharding_dim)
180
+ dims = list(results[0].size())
181
+ dims[0] = sharding_dim_size
182
+ combined_results = torch.cat(results)
183
+ output = torch.empty(
184
+ *dims, device=combined_results.device, dtype=combined_results.dtype
185
+ )
186
+
187
+ # Compute output splits
188
+ split_size = get_split_size(sharding_dim_size, world_size)
189
+ output_split_sizes = [0] * world_size
190
+ for idx, placement in enumerate(weight._sharding_spec.placements):
191
+ output_split_sizes[placement.rank()] = get_chunked_dim_size(
192
+ sharding_dim_size, split_size, idx
193
+ )
194
+
195
+ # distribute the outputs using all2all.
196
+ output = all_to_all_single(
197
+ output, combined_results, output_split_sizes=output_split_sizes, group=pg
198
+ )
199
+
200
+ # Check if we need to rearrange columns appropriately for output.
201
+ rearrange_columns = any(
202
+ idx != placement.rank()
203
+ for idx, placement in enumerate(weight._sharding_spec.placements)
204
+ )
205
+ if not rearrange_columns:
206
+ return output
207
+
208
+ indices = []
209
+ for placement in weight._sharding_spec.placements:
210
+ dim_size = output_split_sizes[placement.rank()]
211
+ start = sum(
212
+ [
213
+ split_size if i < placement.rank() else 0
214
+ for i, split_size in enumerate(output_split_sizes)
215
+ ]
216
+ )
217
+ indices += list(range(start, start + dim_size))
218
+
219
+ return output.index_select(0, torch.tensor(indices, device=output.device))
220
+
221
+
222
+ def _handle_max_norm_col_wise(
223
+ max_norm,
224
+ norm_type,
225
+ local_shard,
226
+ input,
227
+ world_size,
228
+ gathered_inputs,
229
+ pg,
230
+ ):
231
+ """
232
+ For col-wise sharding of weight, we need to aggregate the
233
+ norm across all ranks before we can perform the proper re-norm.
234
+ Note that, the max_norm logic is only applied to the embedding
235
+ indices that are looked up and not the whole shard.
236
+
237
+ Args:
238
+ max_norm: If given, each embedding vector with norm larger
239
+ than max_norm is renormalized to have norm max_norm.
240
+ Note: this will modify weight in-place.
241
+ norm_type: The p in the p-norm to compute for the max_norm option.
242
+ local_shard: col-wise shared local weight used for lookup.
243
+ input: tensor to be applied op to.
244
+ world_size: number of ranks.
245
+ gathered_inputs: list of inputs from all ranks.
246
+ pg: process group.
247
+
248
+ Return:
249
+ local_shard_norm_renormed: local_shard re-normed to max_norm if the norm is larger
250
+ than it.
251
+
252
+ """
253
+ norm_type = norm_type if norm_type is not None else 2.0
254
+ unique_inp = torch.unique(torch.cat(gathered_inputs))
255
+ local_shard_sum = torch.sum(
256
+ torch.pow(torch.abs(local_shard), norm_type), dim=1, dtype=local_shard.dtype
257
+ )
258
+ # For col-wise sharding, we need to first aggregate the powered sum
259
+ # from each rank first and then calculate the norm.
260
+ local_shard_sum = all_reduce(local_shard_sum, group=pg)
261
+ local_shard_norm = torch.pow(local_shard_sum, 1.0 / norm_type)
262
+ max_norm_tensor = torch.full(
263
+ (local_shard.size(0),),
264
+ float("inf"),
265
+ dtype=local_shard.dtype,
266
+ device=input.device,
267
+ )
268
+ max_norm_tensor[unique_inp] = max_norm
269
+ local_shard_t = local_shard.t().contiguous()
270
+ normalized_tensor = torch.where(
271
+ local_shard_norm > max_norm_tensor, max_norm_tensor, local_shard_norm
272
+ )
273
+ # Make sure divisor is not zero.
274
+ local_shard_norm[local_shard_norm == 0.0] = 1.0
275
+ local_shard_norm_renormed = (
276
+ torch.div(torch.mul(local_shard_t, normalized_tensor), local_shard_norm)
277
+ .t()
278
+ .contiguous()
279
+ )
280
+ return local_shard_norm_renormed
281
+
282
+
283
+ def _all_gather_base_input(input, pg):
284
+ """
285
+ Use _all_gather_base to get a concatenated input from each rank.
286
+
287
+ Args:
288
+ input: tensor to be applied op on.
289
+ pg: process group.
290
+
291
+ Returns:
292
+ gathered_inputs: input gathered from each rank and concat by dim 0.
293
+ """
294
+ # allgather the inputs first.
295
+ gather_inp_size = list(input.size())
296
+ gather_inp_size[0] = input.size(0) * dist.get_world_size(pg)
297
+ gather_inp = torch.empty(gather_inp_size, device=input.device, dtype=input.dtype)
298
+ return _all_gather_base(gather_inp, input, group=pg)
299
+
300
+
301
+ def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank):
302
+ """
303
+ Mask the input for embedding look-up for IDs which are not stored
304
+ on the current rank. This function also adjust the ``padding_idx``
305
+ so that it is only used on the rank where the corresponding row is
306
+ stored.
307
+
308
+ Note that, with ``max_norm`` flag on, only weights of rows being
309
+ looked up will be re-normed. So we need an extra row for masked ID
310
+ so that it does not affect the final result and ``max_norm``.
311
+
312
+ Args:
313
+ gather_inp: tensor to be applied op on gathered from all ranks.
314
+ padding_idx: If specified, the entries at padding_idx do
315
+ not contribute to the gradient; therefore, the embedding
316
+ vector at padding_idx is not updated during training,
317
+ i.e. it remains as a fixed “pad”.
318
+ Note that the embedding vector at padding_idx is
319
+ excluded from the reduction.
320
+ weight: weight tensor of Embedding look-up table.
321
+ world_size: number of ranks.
322
+ rank: # of cuda process.
323
+
324
+ Returns:
325
+ lookup_input: Tensor of masked input.
326
+ padding_idx: adjusted padding_idx.
327
+ padding_row: The extra row we used during lookup so that
328
+ looking up does not affect ``max_norm``.
329
+ """
330
+ (start_pos, chunk_size) = get_chunk_sharding_params(
331
+ weight.size(0), world_size, weight._sharding_spec, rank
332
+ )
333
+ mask = (gather_inp < start_pos) | (gather_inp >= start_pos + chunk_size)
334
+ lookup_input = gather_inp.clone() - start_pos
335
+ lookup_input[mask] = chunk_size
336
+ if (
337
+ padding_idx is not None
338
+ and padding_idx >= start_pos
339
+ and padding_idx < (start_pos + chunk_size)
340
+ ):
341
+ padding_idx = padding_idx - start_pos
342
+ else:
343
+ padding_idx = None
344
+
345
+ # When max_norm is set, it will only re-norm the row being looked up.
346
+ padding_row = torch.zeros(
347
+ 1, weight.size(1), device=gather_inp.device, dtype=weight.dtype
348
+ )
349
+ return lookup_input, padding_idx, padding_row
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.distributed as dist
4
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
5
+ from torch.distributed._shard.sharding_spec import ChunkShardingSpec
6
+ from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
7
+ from torch.distributed.nn.functional import all_gather, reduce_scatter
8
+
9
+ from ._common import (
10
+ _all_gather_base_input,
11
+ _handle_col_wise_sharding_base,
12
+ _handle_max_norm_col_wise,
13
+ _handle_row_wise_mask,
14
+ )
15
+
16
+
17
+ @custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding)
18
+ def sharded_embedding(types, args, kwargs, pg):
19
+ """
20
+ Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding``.
21
+ This method computes a sharded embedding lookup and has the following limitations:
22
+
23
+ 1. Supports only sharding of ``weight``.
24
+ 2. Supports only ``ChunkShardingSpec``.
25
+ 3. Supports only a single local shard per rank.
26
+ 4. Supports all specs except for scale_grad_by_freq, sparse, etc.
27
+
28
+ Based on the dimension that the weight is sharded on, there are two
29
+ algorithms:
30
+
31
+ ROWWISE SHARDING
32
+ ================
33
+ For row-wise sharding the weight is sharded on dimension 0.
34
+
35
+ The overall algorithm can be best explained with an example. Let's assume
36
+ the dims for input are (4 x 6) and W are (10 x 17) and W is sharded across
37
+ 4 GPUs creating 3 shard of (3 x 17) and 1 shard of (1 x 17).
38
+ The algorithm is as follows:
39
+
40
+ 1. First the input is all gathered to all ranks, since this is SPMD and
41
+ input is actually sharded across all ranks. The inputs then become a
42
+ 4 (4 x 6) tensor on each rank. For example if the given input is
43
+ tensor([[6, 5, 2, 9, 6, 3],
44
+ [3, 1, 2, 4, 7, 6],
45
+ [4, 0, 4, 9, 8, 9],
46
+ [8, 6, 6, 4, 6, 1]])
47
+ on rank 0.
48
+ Then on every rank, we will have this tensor.
49
+ If input itself is already replicated, no all-gather will be done.
50
+ 2. Next, we mask the ID which are not stored on that rank.
51
+ For example on rank 0, we store ID [0, 1, 2]. We only keep the ID
52
+ inside the set of numbers. The rest of them will be masked to an extra row.
53
+ The masked matrix will be used for embedding look up and is like:
54
+ tensor([[4, 4, 2, 4, 4, 4],
55
+ [4, 1, 2, 4, 4, 4],
56
+ [4, 0, 4, 4, 4, 4],
57
+ [4, 4, 4, 4, 4, 1]])
58
+ The reason of having an extra row (aka, number 4 in the example) is
59
+ because when max_norm is specified only weight which has looked will
60
+ be re-normed so mask IDs whose embeddings are not stored in current
61
+ rank will to an extra row will ensure max_norm still works as expected.
62
+ 3. If max_norm is specified, the extra row guarantees that the mask ID will
63
+ not affect the behavior of weigh re-norm.
64
+
65
+ COLWISE SHARDING
66
+ ================
67
+ For col-wise sharding the weight is sharded on dimension 1.
68
+
69
+ The overall algorithm can be best explained with an example. Let's assume
70
+ the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across
71
+ 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2).
72
+ The algorithm is as follows:
73
+
74
+ 1. First the input is broadcasted to all ranks, since this is SPMD we
75
+ actually do an all_gather for all the inputs resulting in 4 (4 x 6)
76
+ inputs on each rank.
77
+ 2. Next we perform local embedding lookup operation by apply each
78
+ input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last).
79
+ This results in 4 (5 x 6 x 4) ((2 x 6 x 4) for the last) matrices
80
+ on each rank. We transpose dim 0 and dim 2.
81
+ 3. Next, we concat these 4 matrices and perform an all2all to share the
82
+ appropriate (5 x 6 x 4) or (2 x 6 x 4) matrices to each rank.
83
+ 4. Now, each rank receives a (17 x 6 x 4) matrix which is basically the
84
+ size of the result we need.
85
+ 5. If placements are not in order any appropriate rearrangement of columns
86
+ are done for the (17 x 6 x 4) matrix and finally we transpose the
87
+ dim 0 and dim 2 again.
88
+ 6. If max_norm is specified, we manually sum up the norm and renorm. Because
89
+ the renorm must be in place, we need to override the local_shard to mimic
90
+ this behavior.
91
+ """
92
+ # Validate input params
93
+ _validate_embedding_param(args, kwargs)
94
+
95
+ input = args[0]
96
+ weight = args[1]
97
+ max_norm = kwargs.get("max_norm")
98
+ norm_type = kwargs.get("norm_type")
99
+ padding_idx = kwargs.get("padding_idx")
100
+
101
+ local_shard = weight.local_tensor().contiguous()
102
+ sharding_dim = weight._sharding_spec.dim
103
+ world_size = dist.get_world_size(pg)
104
+ rank = dist.get_rank(pg)
105
+
106
+ if sharding_dim == 1:
107
+ output, local_shard = _handle_col_wise_sharding(
108
+ input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg
109
+ )
110
+ weight.local_shards()[0].tensor = local_shard
111
+ return output
112
+ elif sharding_dim == 0:
113
+ return _handle_row_wise_sharding(
114
+ input,
115
+ world_size,
116
+ weight,
117
+ local_shard,
118
+ max_norm,
119
+ norm_type,
120
+ padding_idx,
121
+ rank,
122
+ pg,
123
+ )
124
+ else:
125
+ raise RuntimeError(
126
+ f"nn.Embedding weight sharded on dim {sharding_dim} not supported!"
127
+ )
128
+
129
+
130
+ def _validate_embedding_param(args, kwargs):
131
+ """
132
+ Validate input params of sharded embedding op.
133
+
134
+ Args:
135
+ input: list of ID used for lookup.
136
+ weight: sharded weight tensor.
137
+ kwargs: same as normal Embedding.
138
+
139
+ Return: None.
140
+ """
141
+
142
+ input = args[0]
143
+ weight = args[1]
144
+ max_norm = kwargs.get("max_norm")
145
+ scale_grad_by_freq = kwargs.get("scale_grad_by_freq")
146
+ sparse = kwargs.get("sparse")
147
+
148
+ # Validate types
149
+ if not isinstance(input, torch.Tensor):
150
+ raise TypeError("input need to be torch.Tensor")
151
+ if not isinstance(weight, ShardedTensor):
152
+ raise TypeError("weight needs to be ShardedTensor")
153
+ weight_size = weight.size()
154
+ if len(weight_size) != 2:
155
+ raise ValueError("Weight needs to have exactly 2 dims")
156
+ if int(torch.min(input).item()) < 0:
157
+ raise ValueError(
158
+ "Index out of range in Input %d %d",
159
+ int(torch.min(input).item()),
160
+ weight_size[1],
161
+ )
162
+ if int(torch.max(input).item()) >= weight_size[0]:
163
+ raise ValueError(
164
+ "Index out of range in Input %d %d",
165
+ int(torch.max(input).item()),
166
+ weight_size[1],
167
+ )
168
+ if scale_grad_by_freq:
169
+ raise RuntimeError(
170
+ 'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!'
171
+ )
172
+ if sparse:
173
+ raise RuntimeError(
174
+ 'nn.Embedding weight sharded with flag on "sparse" not supported!'
175
+ )
176
+ if max_norm and max_norm <= 0.0:
177
+ raise ValueError('"max_norm" must be larger than zero!')
178
+
179
+ if not isinstance(weight._sharding_spec, ChunkShardingSpec):
180
+ raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!")
181
+ if len(weight.local_shards()) != 1:
182
+ raise ValueError("Only one local shard supported!")
183
+
184
+
185
+ def _handle_col_wise_sharding(
186
+ input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg
187
+ ):
188
+ """
189
+ Entry-point function to handle the logic of col-wise sharding of weight
190
+ for embedding. (Detailed explanations of the logic can be found in
191
+ the comment for sharded_embedding.)
192
+
193
+ Args:
194
+ input: list of ID used for lookup and aggregation.
195
+ world_size: number of ranks.
196
+ weight: sharded weight tensor.
197
+ local_shard: col-wise shared local weight used for lookup.
198
+ max_norm: If given, each embedding vector with norm larger
199
+ than max_norm is renormalized to have norm max_norm.
200
+ Note: this will modify weight in-place.
201
+ norm_type: The p in the p-norm to compute for the max_norm option.
202
+ padding_idx: If specified, the entries at padding_idx do
203
+ not contribute to the gradient; therefore, the embedding
204
+ vector at padding_idx is not updated during training,
205
+ i.e. it remains as a fixed “pad”.
206
+ pg: process group.
207
+
208
+ Returns: final result of lookup.
209
+ """
210
+ # allgather the inputs first for non Replicated Tensor.
211
+ gathered_inputs = all_gather(input, group=pg)
212
+
213
+ if max_norm is not None:
214
+ # max_norm changes the weight in-place
215
+ local_shard = _handle_max_norm_col_wise(
216
+ max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg
217
+ )
218
+
219
+ output = _handle_col_wise_sharding_base(
220
+ torch.nn.functional.embedding,
221
+ len(input.size()),
222
+ input,
223
+ world_size,
224
+ weight,
225
+ local_shard,
226
+ pg,
227
+ gathered_inputs,
228
+ padding_idx=padding_idx,
229
+ )
230
+ return (output, local_shard)
231
+
232
+
233
+ def _handle_row_wise_sharding(
234
+ input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg
235
+ ):
236
+ """
237
+ Entry-point function to handle the logic of row-wise sharding of weight
238
+ for embedding. (Detailed explanations of the logic can be found in
239
+ the comment for sharded_embedding.)
240
+
241
+ Args:
242
+ input: list of ID used for lookup and aggregation.
243
+ world_size: number of ranks.
244
+ weight: sharded weight tensor.
245
+ local_shard: row-wise shared local weight used for lookup.
246
+ max_norm: If given, each embedding vector with norm larger
247
+ than max_norm is renormalized to have norm max_norm.
248
+ Note: this will modify weight in-place.
249
+ norm_type: The p in the p-norm to compute for the max_norm option.
250
+ padding_idx: If specified, the entries at padding_idx do
251
+ not contribute to the gradient; therefore, the embedding
252
+ vector at padding_idx is not updated during training,
253
+ i.e. it remains as a fixed “pad”.
254
+ rank: # of cuda process.
255
+ pg: process group.
256
+
257
+ Returns: final result of lookup.
258
+ """
259
+ # allgather the inputs first for non Replicated Tensor.
260
+ gather_inp = _all_gather_base_input(input, pg)
261
+
262
+ # Mask the input according to sharding spec.
263
+ lookup_input, padding_idx, padding_row = _handle_row_wise_mask(
264
+ gather_inp, padding_idx, weight, world_size, rank
265
+ )
266
+
267
+ # When input is a large tensor, the value of weight is changed.
268
+ # This is a walk-around for now. GH issue: #81717
269
+ if max_norm is not None:
270
+ torch.nn.functional.embedding(
271
+ torch.unique(lookup_input)[:-1],
272
+ local_shard,
273
+ padding_idx=padding_idx,
274
+ max_norm=max_norm,
275
+ norm_type=norm_type,
276
+ )
277
+ max_norm = None
278
+
279
+ local_input_embeddings = torch.nn.functional.embedding(
280
+ lookup_input,
281
+ torch.cat([local_shard, padding_row]),
282
+ padding_idx=padding_idx,
283
+ max_norm=max_norm,
284
+ norm_type=norm_type,
285
+ )
286
+
287
+ # TODO: Make the result a PartialTensor.
288
+ local_shards = local_input_embeddings.chunk(pg.size())
289
+ return reduce_scatter(
290
+ torch.empty_like(local_shards[0]),
291
+ list(local_shards),
292
+ group=pg,
293
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import cast, List
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ from torch._C._distributed_c10d import ReduceOp
7
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
8
+ from torch.distributed._shard.sharding_spec import ChunkShardingSpec
9
+ from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
10
+ from torch.distributed.nn.functional import all_gather, reduce_scatter
11
+
12
+ from ._common import (
13
+ _all_gather_base_input,
14
+ _handle_col_wise_sharding_base,
15
+ _handle_max_norm_col_wise,
16
+ _handle_row_wise_mask,
17
+ )
18
+
19
+
20
+ @custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding_bag)
21
+ def sharded_embedding_bag(types, args, kwargs, pg):
22
+ """
23
+ Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding_bag``.
24
+ This method computes a sharded embedding bag aggregation and has the following limitations:
25
+
26
+ 1. Supports only sharding of ``weight``.
27
+ 2. Supports only ``ChunkShardingSpec``.
28
+ 3. Supports only a single local shard per rank.
29
+ 4. Supports all specs except for scale_grad_by_freq, sparse, etc.
30
+
31
+ Based on the dimension that the weight is sharded on, there are two
32
+ algorithms:
33
+
34
+ ROWWISE SHARDING
35
+ ================
36
+ For row-wise sharding the weight is sharded on dimension 0.
37
+
38
+ The overall algorithm can be best explained with an example. Let's assume
39
+ the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across
40
+ 4 GPUs creating 4 shard of (4 x 17).
41
+ The algorithm is as follows:
42
+
43
+ 1. First the input is all gathered to all ranks, since this is SPMD and
44
+ input is actually sharded across all ranks. The inputs then become a
45
+ 4 (4 x 6) tensor on each rank. For example if the given input is
46
+ tensor([[6, 5, 2, 9, 6, 3],
47
+ [3, 1, 2, 4, 7, 6],
48
+ [4, 0, 4, 9, 8, 9],
49
+ [8, 6, 6, 4, 6, 1]])
50
+ on rank 0.
51
+ Then on every rank, we will have this tensor.
52
+ If input itself is already replicated, no all-gather will be done.
53
+ 2. Next, we mask the ID which are not stored on that rank.
54
+ For example on rank 0, we store ID [0, 1, 2]. We only keep the ID
55
+ inside the set of numbers. The rest of them will be masked to an extra row.
56
+ The masked matrix will be used for embedding look up and is like:
57
+ tensor([[4, 4, 2, 4, 4, 4],
58
+ [4, 1, 2, 4, 4, 4],
59
+ [4, 0, 4, 4, 4, 4],
60
+ [4, 4, 4, 4, 4, 1]])
61
+ 3. If ``max_norm`` is specified, the extra row guarantees that the mask ID will
62
+ not affect the behavior of weigh re-norm.
63
+ 4. The example above only happens in one rank and each rank does a very similar thing.
64
+ For "Mean" mode we need to divide by either column size (2D) or the interval length
65
+ defined by the offset (excluding the row specified in ``padding_idx``).
66
+ We also need to mask the unexisting row to neg Inf so that negative value does not
67
+ gets wiped out in the "Max" mode.
68
+
69
+ COLWISE SHARDING
70
+ ================
71
+ For col-wise sharding the weight is sharded on dimension 1.
72
+
73
+ The overall algorithm can be best explained with an example. Let's assume
74
+ the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across
75
+ 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2).
76
+ The algorithm is as follows:
77
+
78
+ 1. First the input is broadcasted to all ranks, since this is SPMD we
79
+ actually do an all_gather for all the inputs resulting in 4 (4 x 6)
80
+ inputs on each rank.
81
+ 2. Next we perform local embedding bag operation under the given mode by
82
+ apply each input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last).
83
+ This results in 4 (5 x 4) ((2 x 4) for the last) matrices on each rank.
84
+ We transpose the aggregation result.
85
+ 3. Next, we concatenate these 4 matrices and perform an all2all to share the
86
+ appropriate (5 x 4) or (2 x 4) matrices to each rank.
87
+ 4. Now, each rank receives a (17 x 4) matrix which is basically the
88
+ size of the result we need.
89
+ 5. If placements are not in order any appropriate rearrangement of columns
90
+ are done for the (17 x 4) matrix and finally we transpose the output again.
91
+ 6. If max_norm is specified, we manually sum up the norm and renorm. Because
92
+ the renorm must be in place, we need to override the local_shard to mimic
93
+ this behavior.
94
+ """
95
+ # Validate input params
96
+ _validate_embedding_bag_param(args, kwargs)
97
+
98
+ input = args[0]
99
+ weight = args[1]
100
+ offsets = kwargs.get("offsets")
101
+ per_sample_weights = kwargs.get("per_sample_weights")
102
+ mode = kwargs.get("mode")
103
+ max_norm = kwargs.get("max_norm")
104
+ norm_type = kwargs.get("norm_type")
105
+ include_last_offset = kwargs.get("include_last_offset")
106
+ padding_idx = kwargs.get("padding_idx")
107
+
108
+ local_shard = weight.local_tensor().contiguous()
109
+ sharding_dim = weight._sharding_spec.dim
110
+ world_size = dist.get_world_size(pg)
111
+ rank = dist.get_rank(pg)
112
+ if include_last_offset:
113
+ offsets = offsets[:-1]
114
+
115
+ if sharding_dim == 1:
116
+ output, local_shard = _handle_col_wise_sharding(
117
+ input,
118
+ world_size,
119
+ weight,
120
+ local_shard,
121
+ offsets,
122
+ per_sample_weights,
123
+ mode,
124
+ max_norm,
125
+ norm_type,
126
+ padding_idx,
127
+ pg,
128
+ )
129
+ weight.local_shards()[0].tensor = local_shard
130
+ return output
131
+ elif sharding_dim == 0:
132
+ return _handle_row_wise_sharding(
133
+ input,
134
+ world_size,
135
+ weight,
136
+ local_shard,
137
+ offsets,
138
+ per_sample_weights,
139
+ mode,
140
+ max_norm,
141
+ norm_type,
142
+ padding_idx,
143
+ rank,
144
+ pg,
145
+ )
146
+ else:
147
+ raise RuntimeError(
148
+ f"nn.EmbeddingBag weight sharded on dim {sharding_dim} not supported!"
149
+ )
150
+
151
+
152
+ def _validate_embedding_bag_param(args, kwargs):
153
+ """
154
+ Validate input params of sharded embeddingBag op.
155
+
156
+ Args:
157
+ input: list of ID used for lookup and aggregation.
158
+ weight: sharded weight tensor.
159
+ kwargs: same as normal EmbeddingBag.
160
+
161
+ Return: None.
162
+ """
163
+
164
+ input = args[0]
165
+ weight = args[1]
166
+ offsets = kwargs.get("offsets")
167
+ per_sample_weights = kwargs.get("per_sample_weights")
168
+ mode = kwargs.get("mode")
169
+ max_norm = kwargs.get("max_norm")
170
+ scale_grad_by_freq = kwargs.get("scale_grad_by_freq")
171
+ sparse = kwargs.get("sparse")
172
+ include_last_offset = kwargs.get("include_last_offset")
173
+
174
+ # Validate types
175
+ if not isinstance(input, torch.Tensor):
176
+ raise TypeError("input need to be torch.Tensor")
177
+ if offsets is not None and not isinstance(offsets, torch.Tensor):
178
+ raise TypeError("offsets need to be torch.Tensor")
179
+ if per_sample_weights is not None and not isinstance(
180
+ per_sample_weights, torch.Tensor
181
+ ):
182
+ raise TypeError("per_sample_weights need to be torch.Tensor")
183
+ if not isinstance(weight, ShardedTensor):
184
+ raise TypeError("weight needs to be ShardedTensor")
185
+ if len(input.size()) > 2:
186
+ raise ValueError("Input more than 2 dims not supported")
187
+ weight_size = weight.size()
188
+ if len(weight_size) != 2:
189
+ raise ValueError("Weight needs to have exactly 2 dims")
190
+ if int(torch.min(input).item()) < 0:
191
+ raise ValueError(
192
+ "Index out of range in Input %d %d",
193
+ int(torch.min(input).item()),
194
+ weight_size[1],
195
+ )
196
+ if int(torch.max(input).item()) >= weight_size[0]:
197
+ raise ValueError(
198
+ "Index out of range in Input %d %d",
199
+ int(torch.max(input).item()),
200
+ weight_size[1],
201
+ )
202
+ if offsets is not None and len(input.size()) != 1:
203
+ raise ValueError("Input dimension needs to be exactly 1 dim")
204
+ if len(input.size()) == 1 and offsets is None:
205
+ raise ValueError("offsets is required for 1D input")
206
+ if per_sample_weights is not None and per_sample_weights.size() != input.size():
207
+ raise ValueError(
208
+ f"per_sample_weights size {per_sample_weights.size()} not equal to input size {input.size()}"
209
+ )
210
+ if mode is None:
211
+ mode = "mean"
212
+ if mode not in ["sum", "mean", "max"]:
213
+ raise ValueError(f"mode '{mode}' is not supported")
214
+ if scale_grad_by_freq:
215
+ raise RuntimeError(
216
+ 'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!'
217
+ )
218
+ if sparse:
219
+ raise RuntimeError(
220
+ 'nn.Embedding weight sharded with flag on "sparse" not supported!'
221
+ )
222
+ if include_last_offset and offsets is None:
223
+ raise ValueError('offsets is required for flag "include_last_offset"!')
224
+ if include_last_offset and cast(List[int], offsets)[-1] != input.size(0):
225
+ raise ValueError(
226
+ 'offsets need to have the input size in the end when the flag "include_last_offset" is on!'
227
+ )
228
+
229
+ if max_norm and max_norm <= 0.0:
230
+ raise ValueError('"max_norm" must be larger than zero!')
231
+
232
+ if not isinstance(weight._sharding_spec, ChunkShardingSpec):
233
+ raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!")
234
+ if len(weight.local_shards()) != 1:
235
+ raise ValueError("Only one local shard supported!")
236
+
237
+
238
+ def _handle_col_wise_sharding(
239
+ input,
240
+ world_size,
241
+ weight,
242
+ local_shard,
243
+ offsets,
244
+ per_sample_weights,
245
+ mode,
246
+ max_norm,
247
+ norm_type,
248
+ padding_idx,
249
+ pg,
250
+ ):
251
+ """
252
+ Entry-point function to handle the logic of col-wise sharding of weight
253
+ for embeddingBag. (Detailed explanations of the logic can be found in
254
+ the comment for sharded_embedding_bag.)
255
+
256
+ Args:
257
+ input: list of ID used for lookup and aggregation.
258
+ world_size: number of ranks.
259
+ weight: sharded weight tensor.
260
+ local_shard: col-wise shared local weight used for lookup.
261
+ offsets: list of start positions of each bag for 1D input.
262
+ per_sample_weights: weights for weighted sum mode.
263
+ mode: aggregation method of each bag.
264
+ max_norm: If given, each embedding vector with norm larger
265
+ than max_norm is renormalized to have norm max_norm.
266
+ Note: this will modify weight in-place.
267
+ norm_type: The p in the p-norm to compute for the max_norm option.
268
+ padding_idx: If specified, the entries at padding_idx do
269
+ not contribute to the gradient; therefore, the embedding
270
+ vector at padding_idx is not updated during training,
271
+ i.e. it remains as a fixed “pad”.
272
+ Note that the embedding vector at padding_idx is
273
+ excluded from the reduction.
274
+ pg: process group.
275
+
276
+ Return:
277
+ output: final result of lookup and aggregation.
278
+ local_shard: col-wise shared local weight used for lookup.
279
+ If max_norm, this will be the renormed weight.
280
+ """
281
+ # allgather the special input of embedding bag first.
282
+ (
283
+ gathered_inputs,
284
+ gathered_per_sample_weights,
285
+ gathered_offsets,
286
+ ) = _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg)
287
+
288
+ if max_norm is not None:
289
+ # max_norm changes the weight in-place
290
+ local_shard = _handle_max_norm_col_wise(
291
+ max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg
292
+ )
293
+
294
+ output = _handle_col_wise_sharding_base(
295
+ torch.nn.functional.embedding_bag,
296
+ 1,
297
+ input,
298
+ world_size,
299
+ weight,
300
+ local_shard,
301
+ pg,
302
+ gathered_inputs,
303
+ mode=mode,
304
+ gathered_per_sample_weights=gathered_per_sample_weights,
305
+ gathered_offsets=gathered_offsets,
306
+ padding_idx=padding_idx,
307
+ )
308
+ return (output, local_shard)
309
+
310
+
311
+ def _handle_row_wise_sharding(
312
+ input,
313
+ world_size,
314
+ weight,
315
+ local_shard,
316
+ offsets,
317
+ per_sample_weights,
318
+ mode,
319
+ max_norm,
320
+ norm_type,
321
+ padding_idx,
322
+ rank,
323
+ pg,
324
+ ):
325
+ """
326
+ Entry-point function to handle the logic of row-wise sharding of weight
327
+ for embeddingBag. (Detailed explanations of the logic can be found in
328
+ the comment for sharded_embedding_bag.)
329
+
330
+ Args:
331
+ input: list of ID used for lookup and aggregation.
332
+ world_size: number of ranks.
333
+ weight: sharded weight tensor.
334
+ local_shard: row-wise shared local weight used for lookup.
335
+ offsets: list of start positions of each bag for 1D input.
336
+ per_sample_weights: weights for weighted sum mode.
337
+ mode: aggregation method of each bag.
338
+ max_norm: If given, each embedding vector with norm larger
339
+ than max_norm is renormalized to have norm max_norm.
340
+ Note: this will modify weight in-place.
341
+ norm_type: The p in the p-norm to compute for the max_norm option.
342
+ padding_idx: If specified, the entries at padding_idx do
343
+ not contribute to the gradient; therefore, the embedding
344
+ vector at padding_idx is not updated during training,
345
+ i.e. it remains as a fixed “pad”.
346
+ Note that the embedding vector at padding_idx is
347
+ excluded from the reduction.
348
+ rank: # of cuda process.
349
+ pg: process group.
350
+
351
+ Returns:
352
+ gathered_output: final result of lookup and aggregation.
353
+ """
354
+ if input.dim() > 1 and per_sample_weights is None:
355
+ # allgather the inputs first for non Replicated Tensor.
356
+ gather_inp = _all_gather_base_input(input, pg)
357
+ else:
358
+ (
359
+ gathered_inputs,
360
+ gathered_per_sample_weights,
361
+ gathered_offsets,
362
+ ) = _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg)
363
+ cat_dim = 0 if input.dim() != 1 else -1
364
+ gather_inp = torch.cat(gathered_inputs, dim=cat_dim)
365
+ if per_sample_weights is not None:
366
+ per_sample_weights = torch.cat(gathered_per_sample_weights, dim=cat_dim)
367
+ offset_add = 0 if input.dim() > 1 else input.size(0)
368
+ if offsets is not None:
369
+ offsets_list = torch.cat(
370
+ [gathered_offsets[i] + (offset_add * i) for i in range(pg.size())],
371
+ dim=cat_dim,
372
+ )
373
+
374
+ # Mask the input according to sharding spec.
375
+ lookup_input, padding_local, padding_row = _handle_row_wise_mask(
376
+ gather_inp, padding_idx, weight, world_size, rank
377
+ )
378
+ if mode == "max":
379
+ padding_row[:] = -float("Inf")
380
+
381
+ # When input is a large tensor, the value of weight is changed.
382
+ # This is a walk-around for now. GH issue: #81717.
383
+ if max_norm is not None:
384
+ torch.nn.functional.embedding_bag(
385
+ torch.unique(lookup_input)[:-1],
386
+ local_shard,
387
+ offsets=torch.tensor([0], device=local_shard.device, dtype=torch.long),
388
+ mode=mode,
389
+ per_sample_weights=None,
390
+ max_norm=max_norm,
391
+ norm_type=norm_type,
392
+ padding_idx=padding_local,
393
+ )
394
+ max_norm = None
395
+ result = torch.nn.functional.embedding_bag(
396
+ lookup_input,
397
+ torch.cat([local_shard, padding_row]),
398
+ offsets=offsets_list if offsets is not None else offsets, # type: ignore[possibly-undefined]
399
+ mode=mode if mode != "mean" else "sum",
400
+ per_sample_weights=per_sample_weights,
401
+ max_norm=max_norm,
402
+ norm_type=norm_type,
403
+ padding_idx=padding_local,
404
+ )
405
+
406
+ op = ReduceOp.SUM if mode != "max" else ReduceOp.MAX
407
+ # TODO: Make the result a PartialTensor and move the logic below there.
408
+ local_shards = result.chunk(pg.size())
409
+ result = reduce_scatter(
410
+ torch.empty_like(local_shards[0]),
411
+ list(local_shards),
412
+ op=op,
413
+ group=pg,
414
+ )
415
+
416
+ # For Mean, we cannot do the division until very end because the sum of means
417
+ # not equal to the mean of sum. (Divisor is different)
418
+ if mode == "mean":
419
+ if input.dim() > 1:
420
+ padding_idx = padding_idx if padding_idx is not None else -1
421
+ split_sizes = torch.sum(
422
+ torch.ne(input, padding_idx), dim=-1, dtype=local_shard.dtype
423
+ )
424
+ else:
425
+ split_sizes = torch.cat(
426
+ (
427
+ offsets[1 : offsets.size(0)] - offsets[0:-1],
428
+ (input.size(0) - offsets[-1]).unsqueeze(0),
429
+ ),
430
+ dim=-1,
431
+ )
432
+ return torch.div(result, split_sizes.unsqueeze(1))
433
+
434
+ # Return the appropriate local result.
435
+ return result
436
+
437
+
438
+ def _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg):
439
+ """
440
+ In case we need to gather input and all other parameters of embeddingBag
441
+ ops, we need to stack all input together to perform ``all_gather``
442
+ collective communication just once.
443
+
444
+ Note that since offsets does not share the same size as input and
445
+ is always smaller than input, we resize it during the communication.
446
+
447
+ Args:
448
+ input: tensor to be applied op on.
449
+ per_sample_weights: weights for weighted sum mode.
450
+ offsets: when input is 1D. offsets determines the starting
451
+ index position of each bag (sequence) in input.
452
+ pg: process group.
453
+
454
+ Returns:
455
+ gathered_inputs: list of input tensor gathered from each rank.
456
+ gathered_per_sample_weights: list of per_sample_weights from each rank.
457
+ gathered_offsets: list of offsets from each rank.
458
+ """
459
+ input_to_gather = [input]
460
+ if per_sample_weights is not None:
461
+ input_to_gather.append(per_sample_weights)
462
+ if offsets is not None:
463
+ input_to_gather.append(offsets.clone().resize_(input.size()))
464
+ gathered_inputs = all_gather(torch.stack(input_to_gather), group=pg)
465
+
466
+ gathered_per_sample_weights = None
467
+ if per_sample_weights is not None:
468
+ gathered_per_sample_weights = [t[1] for t in gathered_inputs]
469
+ gathered_offsets = None
470
+ if offsets is not None:
471
+ idx = 2 if per_sample_weights is not None else 1
472
+ gathered_offsets = [
473
+ t[idx].resize_(offsets.size()).to(offsets.dtype) for t in gathered_inputs
474
+ ]
475
+ gathered_inputs = [t[0].to(input.dtype) for t in gathered_inputs]
476
+ return gathered_inputs, gathered_per_sample_weights, gathered_offsets
llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Keep old package for BC purposes, this file should be removed once
2
+ # everything moves to the `torch.distributed._shard` package.
3
+ import sys
4
+ import torch
5
+ import warnings
6
+
7
+ from torch.distributed._shard.sharded_tensor import * # noqa: F403
8
+ warnings.warn(
9
+ "torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead",
10
+ DeprecationWarning
11
+ )
12
+ sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor
llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (566 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .join import Join
2
+ from .join import Joinable
3
+ from .join import JoinHook
llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/join.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from abc import ABC, abstractmethod
3
+ from types import TracebackType
4
+ from typing import Any, List, NamedTuple, Optional, Type
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+
9
+ __all__ = ['JoinHook', 'Joinable', 'Join']
10
+
11
+ class JoinHook:
12
+ r"""
13
+ This defines a join hook, which provides two entry points in the join context manager.
14
+
15
+ Entry points : a main hook, which is called repeatedly while there exists a non-joined
16
+ process, and a post-hook, which is called once all processes have joined.
17
+
18
+ To implement a join hook for the generic join context manager, define a
19
+ class that inherits from :class:`JoinHook` and override ``main_hook()`` and
20
+ ``post_hook()`` as appropriate.
21
+ """
22
+
23
+ def main_hook(self) -> None:
24
+ r"""Call this hook while there exists a non-joined process to shadow collective communications in a training iteration.
25
+
26
+ Training iteration i.e., in one forward pass, backward pass, and optimizer step.
27
+ """
28
+ ...
29
+
30
+ def post_hook(self, is_last_joiner: bool) -> None:
31
+ r"""
32
+ Call hook after all processes have joined.
33
+
34
+ It is passed an additional ``bool`` argument ``is_last_joiner``, which indicates if the rank is one of the last to join.
35
+
36
+ Arguments:
37
+ is_last_joiner (bool): ``True`` if the rank is one of the last to
38
+ join; ``False`` otherwise.
39
+ """
40
+ ...
41
+
42
+
43
+ class Joinable(ABC):
44
+ r"""
45
+ This defines an abstract base class for joinable classes.
46
+
47
+ A joinable class
48
+ (inheriting from :class:`Joinable`) should implement :meth:`join_hook`,
49
+ which returns a :class:`JoinHook` instance, in addition to
50
+ :meth:`join_device` and :meth:`join_process_group` that return device and
51
+ process group information, respectively.
52
+ """
53
+
54
+ @abstractmethod
55
+ def __init__(self):
56
+ super().__init__()
57
+ self._join_config = _JoinConfig.construct_disabled_join_config()
58
+
59
+ @abstractmethod
60
+ def join_hook(self, **kwargs) -> JoinHook:
61
+ r"""
62
+ Return a :class:`JoinHook` instance for the given :class:`Joinable`.
63
+
64
+ Arguments:
65
+ kwargs (dict): a :class:`dict` containing any keyword arguments
66
+ to modify the behavior of the join hook at run time; all
67
+ :class:`Joinable` instances sharing the same join context
68
+ manager are forwarded the same value for ``kwargs``.
69
+ """
70
+ ...
71
+
72
+ @property
73
+ @abstractmethod
74
+ def join_device(self) -> torch.device:
75
+ r"""Return the device from which to perform collective communications needed by the join context manager."""
76
+ ...
77
+
78
+ @property
79
+ @abstractmethod
80
+ def join_process_group(self) -> Any:
81
+ r"""Returns the process group for the collective communications needed by the join context manager itself."""
82
+ ...
83
+
84
+
85
+ class _JoinConfig(NamedTuple):
86
+ r"""This includes all fields needed from a :class:`Joinable` instance for the join context manager side."""
87
+
88
+ enable: bool
89
+ throw_on_early_termination: bool
90
+ is_first_joinable: bool
91
+
92
+ @staticmethod
93
+ def construct_disabled_join_config():
94
+ r"""Return a :class:`_JoinConfig` instance indicating that join-related logic should be disabled.
95
+
96
+ e.g. if the caller is not in a join context manager.
97
+ """
98
+ return _JoinConfig(
99
+ enable=False,
100
+ throw_on_early_termination=False,
101
+ is_first_joinable=False
102
+ )
103
+
104
+
105
+
106
+ class Join:
107
+ r"""
108
+ This class defines the generic join context manager, which allows custom hooks to be called after a process joins.
109
+
110
+ These hooks should shadow the
111
+ collective communications of non-joined processes to prevent hanging and
112
+ erroring and to ensure algorithmic correctness. Refer to :class:`JoinHook`
113
+ for details about the hook definition.
114
+
115
+ .. warning::
116
+ The context manager requires each participating :class:`Joinable` to
117
+ call the method :meth:`notify_join_context()` before its own per-
118
+ iteration collective communications to ensure correctness.
119
+
120
+ .. warning::
121
+ The context manager requires that all ``process_group`` attributes in
122
+ the :class:`JoinHook` objects are the same. If there are multiple
123
+ :class:`JoinHook` objects, then the ``device`` of the first is used.
124
+ The process group and device information is used for checking for non-
125
+ joined processes and for notifying processes to throw an exception if
126
+ ``throw_on_early_termination`` is enabled, both of which using an all-
127
+ reduce.
128
+
129
+ Arguments:
130
+ joinables (List[Joinable]): a list of the participating
131
+ :class:`Joinable` s; their hooks are iterated over in the given
132
+ order.
133
+
134
+ enable (bool): a flag enabling uneven input detection; setting to
135
+ ``False`` disables the context manager's functionality and should
136
+ only be set when the user knows the inputs will not be uneven
137
+ (default: ``True``).
138
+
139
+ throw_on_early_termination (bool): a flag controlling whether to throw an
140
+ exception upon detecting uneven inputs (default: ``False``).
141
+
142
+ Example::
143
+
144
+ >>> import os
145
+ >>> import torch
146
+ >>> import torch.distributed as dist
147
+ >>> import torch.multiprocessing as mp
148
+ >>> # xdoctest: +SKIP
149
+ >>> import torch.nn.parallel.DistributedDataParallel as DDP
150
+ >>> import torch.distributed.optim.ZeroRedundancyOptimizer as ZeRO
151
+ >>> from torch.distributed.algorithms.join import Join
152
+ >>>
153
+ >>> # On each spawned worker
154
+ >>> def worker(rank):
155
+ >>> dist.init_process_group("nccl", rank=rank, world_size=2)
156
+ >>> model = DDP(torch.nn.Linear(1, 1).to(rank), device_ids=[rank])
157
+ >>> optim = ZeRO(model.parameters(), torch.optim.Adam, lr=0.01)
158
+ >>> # Rank 1 gets one more input than rank 0
159
+ >>> inputs = [torch.tensor([1.]).to(rank) for _ in range(10 + rank)]
160
+ >>> with Join([model, optim]):
161
+ >>> for input in inputs:
162
+ >>> loss = model(input).sum()
163
+ >>> loss.backward()
164
+ >>> optim.step()
165
+ >>> # All ranks reach here without hanging/erroring
166
+ """
167
+
168
+ def __init__(
169
+ self,
170
+ joinables: List[Joinable],
171
+ enable: bool = True,
172
+ throw_on_early_termination: bool = False,
173
+ **kwargs,
174
+ ):
175
+ if len(joinables) == 0:
176
+ raise ValueError("The join context manager requires at least one joinable")
177
+ self._joinables = joinables
178
+ self._join_hooks = [joinable.join_hook(**kwargs) for joinable in self._joinables]
179
+ self._enable = enable
180
+ self._throw_on_early_termination = throw_on_early_termination
181
+ self._set_joinable_configs()
182
+ self._extract_dist_info()
183
+
184
+ def _set_joinable_configs(self) -> None:
185
+ r"""Set the :class:`_JoinConfig` of each participating :class:`Joinable`."""
186
+ assert len(self._joinables) > 0
187
+ is_first_joinable = True
188
+ for joinable in self._joinables:
189
+ joinable._join_config = _JoinConfig(
190
+ enable=self._enable,
191
+ throw_on_early_termination=self._throw_on_early_termination,
192
+ is_first_joinable=is_first_joinable
193
+ )
194
+ is_first_joinable = False
195
+
196
+ def _extract_dist_info(self) -> None:
197
+ r"""
198
+ Extract the process group and device information from the joinables.
199
+
200
+ If there are multiple joinables, then the context manager uses the
201
+ first specified device.
202
+
203
+ Preconditions:
204
+ ``self._joinables`` is not ``None`` and is non-empty.
205
+
206
+ Raises:
207
+ ValueError
208
+ If there are multiple conflicting ``process_group`` attributes
209
+ among the ``Joinable`` objects.
210
+ """
211
+ process_group = None
212
+ device = None
213
+ for joinable in self._joinables:
214
+ if process_group is None:
215
+ process_group = joinable.join_process_group
216
+ elif process_group != joinable.join_process_group:
217
+ raise ValueError("Using join context manager with multiple process groups")
218
+ if device is None:
219
+ device = joinable.join_device
220
+ self._process_group = process_group
221
+ self._rank = dist.get_rank(self._process_group)
222
+ self._device = device
223
+
224
+ def __enter__(self):
225
+ ...
226
+
227
+ def __exit__(
228
+ self,
229
+ type: Optional[Type[BaseException]],
230
+ value: Optional[BaseException],
231
+ traceback: Optional[TracebackType]
232
+ ):
233
+ r"""
234
+ Repeatedly runs the main hooks until all processes join; then, runs the post-hooks.
235
+
236
+ Raises:
237
+ RuntimeError
238
+ If ``throw_on_early_termination=True``.
239
+ """
240
+ if not self._enable or type:
241
+ return # propagate the exception directly if one was raised
242
+
243
+ all_procs_joined = False
244
+ is_last_joiner = True
245
+
246
+ i = 0
247
+ WARN_THRESHOLD = 1000
248
+ warnings.simplefilter("once")
249
+
250
+ while not all_procs_joined:
251
+ if i > WARN_THRESHOLD:
252
+ warnings.warn(
253
+ "Detected uneven input skew of greater than "
254
+ f"{WARN_THRESHOLD}. This means that rank "
255
+ f"{self._rank} has at least {WARN_THRESHOLD} "
256
+ f"fewer inputs than other currently-active ranks. "
257
+ "This level of skew could lead to performance "
258
+ "degradation during training."
259
+ )
260
+ # Shadow the all-reduce in non-joined processes
261
+ num_nonjoined_procs = self._get_num_nonjoined_procs()
262
+ if num_nonjoined_procs == 0:
263
+ all_procs_joined = True
264
+ else:
265
+ if self._throw_on_early_termination:
266
+ self._notify_procs_to_terminate()
267
+
268
+ # Run main hooks
269
+ for join_hook in self._join_hooks:
270
+ join_hook.main_hook()
271
+
272
+ is_last_joiner = False
273
+ i += 1
274
+
275
+ # Run post-hooks
276
+ for join_hook in self._join_hooks:
277
+ join_hook.post_hook(is_last_joiner)
278
+
279
+ def _get_num_nonjoined_procs(self):
280
+ r"""Return the number of non-joined processes by shadowing an all-reduce in the non-joined processes."""
281
+ num_nonjoined_procs = torch.zeros(1, device=self._device)
282
+ dist.all_reduce(num_nonjoined_procs, group=self._process_group)
283
+ return num_nonjoined_procs.item()
284
+
285
+ def _notify_procs_to_terminate(self):
286
+ r"""Schedule an all-reduce to notify non-joined processes to terminate.
287
+
288
+ Also raise a ``RuntimeError`` indicating that the current process has exhausted its inputs.
289
+ """
290
+ ones = torch.ones(1, device=self._device)
291
+ dist.all_reduce(ones, group=self._process_group)
292
+ raise RuntimeError(f"Rank {self._rank} exhausted all inputs.")
293
+
294
+ @staticmethod
295
+ def notify_join_context(joinable: Joinable):
296
+ r"""
297
+ Notifies the join context manager that the calling process has not yet joined.
298
+
299
+ Then, if ``throw_on_early_termination=True``, checks if uneven inputs have been detected
300
+ (i.e. if one process has already joined) and throws an exception if so.
301
+
302
+ This method should be called from a :class:`Joinable` object before
303
+ its per-iteration collective communications. For example, this should
304
+ be called at the beginning of the forward pass in
305
+ :class:`DistributedDataParallel`.
306
+
307
+ Only the first :class:`Joinable` object passed into the context
308
+ manager performs the collective communications in this method, and
309
+ for the others, this method is vacuous.
310
+
311
+ Arguments:
312
+ joinable (Joinable): the :class:`Joinable` object calling this
313
+ method.
314
+
315
+ Returns:
316
+ An async work handle for the all-reduce meant to notify the context
317
+ manager that the process has not yet joined if ``joinable`` is the
318
+ first one passed into the context manager; ``None`` otherwise.
319
+ """
320
+ assert hasattr(joinable, "_join_config"), \
321
+ f"Check that the {type(joinable)} constructor calls the " \
322
+ "``Joinable`` constructor"
323
+
324
+ join_config = joinable._join_config
325
+ # First joinable is responsible for the collective communications
326
+ if not join_config.is_first_joinable or not join_config.enable:
327
+ return None
328
+
329
+ device = joinable.join_device
330
+ process_group = joinable.join_process_group
331
+
332
+ # Schedule an all-reduce to indicate that the caller has not yet joined
333
+ ones = torch.ones(1, device=device)
334
+ work = dist.all_reduce(ones, group=process_group, async_op=True)
335
+
336
+ if join_config.throw_on_early_termination:
337
+ # Check if uneven inputs have been detected
338
+ zeros = torch.zeros(1, device=device)
339
+ dist.all_reduce(zeros, group=process_group)
340
+ should_throw = zeros.item()
341
+ if should_throw:
342
+ raise RuntimeError(
343
+ "Detected at least one rank that exhausted inputs. "
344
+ "Throwing across all ranks."
345
+ )
346
+ return work
llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/averagers.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from abc import ABC, abstractmethod
3
+ from typing import Union, Iterable, Dict
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.distributed.algorithms.model_averaging.utils as utils
7
+
8
+ __all__ = ['ModelAverager', 'PeriodicModelAverager']
9
+
10
+ class ModelAverager(ABC):
11
+ r"""Base class for all model averagers.
12
+
13
+ Args:
14
+ process_group: The process group to be used for all-reduce.
15
+ If ``None``, the default process group, which
16
+ is created by :func:`torch.distributed.init_process_group`,
17
+ will be used. (default: ``None``)
18
+ """
19
+
20
+ def __init__(self, process_group=None):
21
+ self.process_group = (
22
+ process_group if process_group is not None else dist.group.WORLD
23
+ )
24
+ self.step = 0
25
+
26
+ @abstractmethod
27
+ def average_parameters(self, params):
28
+ raise NotImplementedError
29
+
30
+
31
+ class PeriodicModelAverager(ModelAverager):
32
+ r"""
33
+ Averages parameters periodically after the warm-up stage.
34
+
35
+ This can be used for running `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
36
+ by running :class:`~torch.nn.DistributedDataParallel` (DDP)
37
+ using the subgroups created by :meth:`~torch.distributed.new_subgroups`.
38
+
39
+ Args:
40
+ period (int): The number of steps per model averaging.
41
+ Usually the period should be greater than ``1`` to reduce the communication cost.
42
+ Otherwise, only DDP needs to be used.
43
+ warmup_steps (int): The number of warm-up steps. During this stage,
44
+ model averaging is skipped.
45
+ process_group: The process group to be used for all-reduce.
46
+ If ``None``, the default process group, which
47
+ is created by :func:`torch.distributed.init_process_group`,
48
+ will be used. (default: ``None``)
49
+
50
+ Example::
51
+
52
+ >>> # xdoctest: +SKIP("undefined variables")
53
+ >>> import torch
54
+ >>> import torch.distributed as dist
55
+ >>> import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
56
+ >>> import torch.distributed.algorithms.model_averaging.averagers as averagers
57
+ >>> import torch.nn as nn
58
+ >>>
59
+ >>> dist.init_process_group("nccl", rank=rank, world_size=16)
60
+ >>> torch.cuda.set_device(rank)
61
+ >>> module = nn.Linear(1, 1, bias=False).cuda()
62
+ >>> model = nn.parallel.DistributedDataParallel(
63
+ >>> module, device_ids=[rank], output_device=rank
64
+ >>> )
65
+ >>> # Register a post-localSGD communication hook.
66
+ >>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
67
+ >>> model.register_comm_hook(state, post_localSGD_hook)
68
+ >>>
69
+ >>> # In the first 100 steps, run global gradient averaging like normal DDP at every step.
70
+ >>> # After 100 steps, run model averaging every 4 steps.
71
+ >>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``.
72
+ >>> averager = averagers.PeriodicModelAverager(period=4, warmup_steps=100)
73
+ >>> for step in range(0, 200):
74
+ >>> optimizer.zero_grad()
75
+ >>> loss = loss_fn(output, labels)
76
+ >>> loss.backward()
77
+ >>> optimizer.step()
78
+ >>> # Will average model parameters globally every 4 steps. Thus,
79
+ >>> # inter-node communication only occurs every 4 iterations after
80
+ >>> # the initial ``warmup_steps`` period.
81
+ >>> averager.average_parameters(model.parameters())
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ period,
87
+ warmup_steps=0,
88
+ process_group=None
89
+ ):
90
+ super().__init__(process_group)
91
+ if warmup_steps < 0:
92
+ raise ValueError("Arg ``warmup_steps`` must be a non-negative number.")
93
+ self.warmup_steps = warmup_steps
94
+ if period < 1:
95
+ raise ValueError("Arg ``period`` must be a positive value.")
96
+ elif period == 1:
97
+ warnings.warn(
98
+ "When period is 1, no need to use model averaging because the communication cost "
99
+ "of all-reducing parameters will be no less than the cost of all-reducing gradients "
100
+ "by DistributedDataParallel in the backward pass. Therefore, only "
101
+ "DistributedDataParallel should be used for this case."
102
+ )
103
+ self.period = period
104
+
105
+ def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]):
106
+ """
107
+ Averages parameters or parameter groups of an optimizer if ``step`` is no less than ``warmup_steps``.
108
+
109
+ Can be divided by ``period``, where ``step`` is increased by 1
110
+ at each iteration in the training loop.
111
+ Args:
112
+ params: The parameters of a model or parameter groups of an optimizer.
113
+
114
+ """
115
+ if (
116
+ self.step >= self.warmup_steps
117
+ and (self.step - self.warmup_steps) % self.period == 0
118
+ ):
119
+ utils.average_parameters_or_parameter_groups(params, self.process_group)
120
+ self.step += 1
llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/utils.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa C101
2
+ import itertools
3
+ from typing import Union, Iterable, Dict, Iterator
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ # The two imports below are not always available depending on the
8
+ # USE_DISTRIBUTED compile flag. Make sure they raise import error
9
+ # if we're trying to use them.
10
+ from torch.distributed import ProcessGroup, group
11
+
12
+ __all__ = ["average_parameters", "get_params_to_average", "average_parameters_or_parameter_groups"]
13
+
14
+ def average_parameters(
15
+ params: Iterator[torch.nn.Parameter], process_group: ProcessGroup
16
+ ):
17
+ """
18
+ Averages all the given parameters.
19
+
20
+ For allreduce efficiency, all the parameters are flattened into a contiguous buffer.
21
+ Thus, it requires extra memory of the same size as the given parameters.
22
+ """
23
+ group_to_use = process_group if process_group is not None else group.WORLD
24
+ # Do not update any parameter if not in the process group.
25
+ if dist._rank_not_in_group(group_to_use):
26
+ return
27
+
28
+ params_it1, params_it2 = itertools.tee(params)
29
+ # If the input parameters have different data types,
30
+ # packing these parameters will trigger an implicit type up-casting.
31
+ # The original parameter data types will be restored during the subsequent unpacking.
32
+ flat_params = torch.cat([p.data.reshape(-1) for p in params_it1])
33
+ flat_params /= dist.get_world_size(group_to_use)
34
+ # Make sure the allreduce will not conflict with any other ongoing process group.
35
+ if torch.cuda.is_available():
36
+ torch.cuda.synchronize()
37
+ dist.all_reduce(flat_params, group=group_to_use)
38
+
39
+ offset = 0
40
+ for p in params_it2:
41
+ p.data = flat_params[offset : offset + p.numel()].view_as(p).type_as(p)
42
+ offset += p.numel()
43
+
44
+
45
+ def get_params_to_average(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]):
46
+ """
47
+ Return a list of parameters that need to average.
48
+
49
+ This filters out the parameters that do not contain any gradients.
50
+ Args:
51
+ params: The parameters of a model or parameter groups of an optimizer.
52
+ """
53
+ filtered_params = []
54
+ for param in params:
55
+ if isinstance(param, torch.nn.Parameter):
56
+ # model.parameters() input
57
+ param_data = param
58
+ if param_data.grad is not None:
59
+ filtered_params.append(param_data)
60
+ elif isinstance(param, dict):
61
+ # optimizer.param_groups input
62
+ for param_data in param["params"]:
63
+ if param_data.grad is not None:
64
+ filtered_params.append(param_data)
65
+ else:
66
+ raise NotImplementedError(f"Parameter input of type {type(param)} is not supported")
67
+ return filtered_params
68
+
69
+
70
+ def average_parameters_or_parameter_groups(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]], process_group: ProcessGroup):
71
+ """Averages parameters of a model or parameter groups of an optimizer."""
72
+ average_parameters(iter(get_params_to_average(params)), process_group)
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env/python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ """
10
+
11
+ Torchelastic agent and user worker failover contract:
12
+
13
+ **TL;DR;**:
14
+
15
+ * TE(torchelastic) expects user workers to finish with the 5 minutes drift
16
+ * It is better to design DDP app to fail for all workers, rather than a single one.
17
+ * TE does not synchronize number of restarts between agents
18
+ * TE re-rendezvous does not trigger restart decrease
19
+ * When a single agent finishes its job(successfully or not), it will close rendezvous.
20
+ If other agents still have workers in progress, they will be terminated.
21
+ * Based on above, scale down does not work if at least single agent finishes the job.
22
+ * When Scale up is detected by agents, it will not decrease ``max_restarts``
23
+
24
+
25
+ In general TE(torchelastic) can launch arbitrary user code, but there is some
26
+ clarifications need to be done around what failover mechanism torchelastic
27
+ provides and what failover mechanism it expects from user workers.
28
+
29
+ Torchelastic currently supports DDP style applications. That means that
30
+ TE expects *ALL* workers finish approximately at the same time. In practice,
31
+ it is nearly to impossible to guarantee that all workers in arbitrary
32
+ DDP application finish at the time, so TE provides a finalization barrier
33
+ that waits for TIMEOUT(5 minutes) for worker finalization.
34
+
35
+ **Worker Failure**
36
+
37
+ When worker fails, TE will check the number of restarts
38
+ available, if there is more than 0 restarts, TE will start a new rendezvous
39
+ round and restart the worker process. New rendezvous round will other
40
+ TE agents to terminate their workers.
41
+
42
+ .. note:: The TE agent does not synchronize restarts between themselves.
43
+ When a single agent performs restart, it will trigger a local ``max_restarts``
44
+ decrease, other agent will not decrease their ``max_restarts``.
45
+ the user to run the distributed application locally on a dev host.
46
+
47
+ A single worker failure can cause the whole cluster to fail:
48
+ If a single worker is constantly failing, it will cause the TE agent
49
+ ``max_restarts`` to go to zero. This will cause an agent to finish its
50
+ work and close rendezvous. If there are any other workers on different
51
+ agents, they will be terminated.
52
+
53
+
54
+ **Re-Rendezvous**
55
+
56
+ Re-rendezvous occurs when TE agents detect a new node
57
+ trying to joint a cluster. TE will not decrease ``max_restarts``. TE agents
58
+ will terminate its workers and start a new rendezvous round.
59
+
60
+ Note about DynamicRendezvous(etcd-v2, c10d-experimental): If the rendezvous
61
+ has already max_nodes, the new node won't be added to the wait list right
62
+ away since there is no need to tear down a rendezvous that is already fully
63
+ utilized. The new node will wait until its timeout (600 secs by default)
64
+ and periodically check the number of participants. If the number becomes
65
+ less than max_nodes, it will be added to the wait list; otherwise, it will time out after 600 secs.
66
+
67
+ *Scale up event*. When scale up event happens, torchelastic rendezvous
68
+ will detect that there are new nodes trying to join. Torchelastic agent
69
+ will stop all workers and perform re-rendezvous. Note: when scale up event
70
+ happens, *``max_restarts``* will *not* decrease.
71
+
72
+ *Scale down event*. When scale down event happens, rendezvous will not
73
+ notify the torchelastic agent about it. If TE agent launched with ``max_restarts=0`` ,
74
+ it relies on the underlying scheduler to handle job restart. If the ``max_restarts>0`` ,
75
+ TE agent will terminate workers and start a new rdzv round, which is a *Scale up event*.
76
+
77
+ """
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ """
10
+ The elastic agent is the control plane of torchelastic.
11
+
12
+ It is a process that launches and manages underlying worker processes.
13
+ The agent is responsible for:
14
+
15
+ 1. Working with distributed torch: the workers are started with all the
16
+ necessary information to successfully and trivially call
17
+ ``torch.distributed.init_process_group()``.
18
+
19
+ 2. Fault tolerance: monitors workers and upon detecting worker failures
20
+ or unhealthiness, tears down all workers and restarts everyone.
21
+
22
+ 3. Elasticity: Reacts to membership changes and restarts workers with the new
23
+ members.
24
+
25
+ The simplest agents are deployed per node and works with local processes.
26
+ A more advanced agent can launch and manage workers remotely. Agents can
27
+ be completely decentralized, making decisions based on the workers it manages.
28
+ Or can be coordinated, communicating to other agents (that manage workers
29
+ in the same job) to make a collective decision.
30
+ """
31
+
32
+ from .api import ( # noqa: F401
33
+ ElasticAgent,
34
+ RunResult,
35
+ SimpleElasticAgent,
36
+ Worker,
37
+ WorkerGroup,
38
+ WorkerSpec,
39
+ WorkerState,
40
+ )
41
+ from .local_elastic_agent import TORCHELASTIC_ENABLE_FILE_TIMER, TORCHELASTIC_TIMER_FILE
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py ADDED
@@ -0,0 +1,954 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import abc
10
+ import functools
11
+ import json
12
+ import os
13
+ import signal
14
+ import socket
15
+ import time
16
+ import traceback
17
+ import warnings
18
+ from contextlib import closing
19
+ from dataclasses import dataclass, field
20
+ from enum import Enum
21
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
22
+
23
+ import torch.distributed.elastic.rendezvous as rdzv
24
+ import torch.distributed.elastic.utils.store as store_util
25
+ from torch.distributed.elastic.rendezvous import RendezvousGracefulExitError
26
+ from torch.distributed import Store
27
+ from torch.distributed.elastic.events import Event, EventSource, record
28
+ from torch.distributed.elastic.metrics import prof, put_metric
29
+ from torch.distributed.elastic.multiprocessing import (
30
+ ProcessFailure,
31
+ SignalException,
32
+ )
33
+ from torch.distributed.elastic.utils.logging import get_logger
34
+
35
+ __all__ = [
36
+ "WorkerSpec",
37
+ "Worker",
38
+ "WorkerState",
39
+ "WorkerGroup",
40
+ "RunResult",
41
+ "ElasticAgent",
42
+ "SimpleElasticAgent",
43
+ ]
44
+ _TERMINAL_STATE_SYNC_ID = "torchelastic/agent/terminal_state"
45
+
46
+ DEFAULT_ROLE = "default"
47
+ log = get_logger(__name__)
48
+
49
+
50
+ @dataclass
51
+ class WorkerSpec:
52
+ """Blueprint information about a particular type of worker.
53
+
54
+ For a given role, there must only exist a single worker spec.
55
+ Worker spec is expected to be homogeneous across all nodes (machine),
56
+ that is each node runs the same number of workers for a particular spec.
57
+
58
+ Args:
59
+ role: user-defined role for the workers with this spec
60
+ local_world_size: number local workers to run
61
+ fn: (deprecated use entrypoint instead)
62
+ entrypoint: worker function or command
63
+ args: arguments to pass to ``entrypoint``
64
+ rdzv_handler: handles rdzv for this set of workers
65
+ max_restarts: number of max retries for the workers
66
+ monitor_interval: monitor status of workers every ``n`` seconds
67
+ master_port: fixed port to run the c10d store on rank 0
68
+ if not specified then will chose a random free port
69
+ master_addr: fixed master_addr to run the c10d store on rank 0
70
+ if not specified then will chose hostname on agent rank 0
71
+ redirects: redirect std streams to a file,
72
+ selectively redirect for a particular
73
+ local rank by passing a map
74
+ tee: tees the specified std stream(s) to console + file,
75
+ selectively tee for a particular local rank by passing a map,
76
+ takes precedence over ``redirects`` settings.
77
+
78
+ """
79
+
80
+ role: str
81
+ local_world_size: int
82
+ rdzv_handler: rdzv.RendezvousHandler
83
+ fn: Optional[Callable] = None
84
+ # TODO @kiuk - make entrypoint a required field
85
+ entrypoint: Union[Callable, str, None] = None
86
+ args: Tuple = ()
87
+ max_restarts: int = 3
88
+ monitor_interval: float = 30.0
89
+ master_port: Optional[int] = None
90
+ master_addr: Optional[str] = None
91
+ local_addr: Optional[str] = None
92
+
93
+ def __post_init__(self):
94
+ assert self.local_world_size > 0
95
+ assert self.monitor_interval > 0
96
+
97
+ if self.fn:
98
+ warnings.warn(
99
+ "WorkerSpec.fn will be deprecated,"
100
+ " please use WorkerSpec.entrypoint instead",
101
+ category=DeprecationWarning,
102
+ )
103
+ self.entrypoint = self.fn
104
+ assert self.entrypoint
105
+
106
+ def get_entrypoint_name(self):
107
+ """Get the entry point name.
108
+
109
+ If the entrypoint is a function (e.g. ``Callable``) returns its ``__qualname__``
110
+ else if the entrypoint is a binary (e.g. ``str``), returns the binary name.
111
+ """
112
+ if isinstance(self.entrypoint, str):
113
+ return os.path.basename(self.entrypoint)
114
+ else:
115
+ assert self.entrypoint is not None
116
+ return self.entrypoint.__qualname__
117
+
118
+
119
+ class Worker:
120
+ """A worker instance.
121
+
122
+ Contrast this with ``WorkerSpec`` that represents the specifications of a
123
+ worker. A ``Worker`` is created from a ``WorkerSpec``. A ``Worker`` is to
124
+ a ``WorkerSpec`` as an object is to a class.
125
+
126
+ The ``id`` of the worker is interpreted
127
+ by the specific implementation of ``ElasticAgent``. For a local
128
+ agent, it could be the ``pid (int)`` of the worker, for a remote
129
+ agent it could be encoded as ``host:port (string)``.
130
+
131
+ Args:
132
+ id (Any): uniquely identifies a worker (interpreted by the agent)
133
+ local_rank (int): local rank of the worker
134
+ global_rank (int): global rank of the worker
135
+ role_rank (int): rank of the worker across all workers that have the same role
136
+ world_size (int): number of workers (globally)
137
+ role_world_size (int): number of workers that have the same role
138
+ """
139
+
140
+ __slots__ = [
141
+ "id",
142
+ "local_rank",
143
+ "global_rank",
144
+ "role_rank",
145
+ "world_size",
146
+ "role_world_size",
147
+ ]
148
+
149
+ def __init__(
150
+ self,
151
+ local_rank: int,
152
+ global_rank: int = -1,
153
+ role_rank: int = -1,
154
+ world_size: int = -1,
155
+ role_world_size: int = -1,
156
+ ):
157
+ # unique identifier for this worker
158
+ self.id: Any = None
159
+
160
+ # rank of the worker among workers with the same role being monitored
161
+ # by the same ``agent`` instance.
162
+ self.local_rank: int = local_rank
163
+
164
+ # rank of the worker among all the workers across all roles
165
+ # across all ``agent`` instances.
166
+ # Global rank is not stable between re-rendezvous.
167
+ self.global_rank: int = global_rank
168
+
169
+ # rank of the worker among all the workers with the same role
170
+ # across all ``agent`` instances.
171
+ # Role rank is not stable between re-rendezvous.
172
+ self.role_rank: int = role_rank
173
+
174
+ # total number of workers (globally). Due to elasticity
175
+ # the world size may change between re-rendezvous.
176
+ self.world_size: int = world_size
177
+
178
+ # total number of workers that share the same role. Due to elasticity
179
+ # the role world size may change between re-rendezvous.
180
+ self.role_world_size: int = role_world_size
181
+
182
+ def __str__(self):
183
+ return (
184
+ f"local_rank={self.local_rank},global_rank={self.global_rank}"
185
+ f",role_rank={self.role_rank},world_size={self.world_size}"
186
+ f",role_world_size={self.role_world_size}"
187
+ )
188
+
189
+ def __repr__(self):
190
+ return str(self)
191
+
192
+
193
+ class WorkerState(str, Enum):
194
+ """A state of the ``WorkerGroup``.
195
+
196
+ Workers in a worker group change state as a unit. If a single worker
197
+ in a worker group fails the entire set is considered failed::
198
+
199
+ UNKNOWN - agent lost track of worker group state, unrecoverable
200
+ INIT - worker group object created not yet started
201
+ HEALTHY - workers running and healthy
202
+ UNHEALTHY - workers running and unhealthy
203
+ STOPPED - workers stopped (interrupted) by the agent
204
+ SUCCEEDED - workers finished running (exit 0)
205
+ FAILED - workers failed to successfully finish (exit !0)
206
+
207
+
208
+ A worker group starts from an initial ``INIT`` state,
209
+ then progresses to ``HEALTHY`` or ``UNHEALTHY`` states,
210
+ and finally reaches a terminal ``SUCCEEDED`` or ``FAILED`` state.
211
+
212
+ Worker groups can be interrupted and temporarily put into ``STOPPED`` state
213
+ by the agent. Workers in ``STOPPED`` state are scheduled to be restarted
214
+ in the near future by the agent. Some examples of workers being put into
215
+ ``STOPPED`` state are:
216
+
217
+ 1. Worker group failure|unhealthy observed
218
+ 2. Membership change detected
219
+
220
+ When actions (start, stop, rdzv, retry, etc) on worker group fails
221
+ and results in the action being partially applied to the worker group
222
+ the state will be ``UNKNOWN``. Typically this happens on uncaught/unhandled
223
+ exceptions during state change events on the agent. The agent is not
224
+ expected to recover worker groups in ``UNKNOWN`` state and is better off
225
+ self terminating and allowing the job manager to retry the node.
226
+ """
227
+
228
+ UNKNOWN = "UNKNOWN"
229
+ INIT = "INIT"
230
+ HEALTHY = "HEALTHY"
231
+ UNHEALTHY = "UNHEALTHY"
232
+ STOPPED = "STOPPED"
233
+ SUCCEEDED = "SUCCEEDED"
234
+ FAILED = "FAILED"
235
+
236
+ @staticmethod
237
+ def is_running(state: "WorkerState") -> bool:
238
+ """Return the state of the Worker.
239
+
240
+ Returns:
241
+ True if the worker state represents workers still running
242
+ (e.g. that the process exists but not necessarily healthy).
243
+ """
244
+ return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY}
245
+
246
+
247
+ class WorkerGroup:
248
+ """A set of ``Worker`` instances.
249
+
250
+ The class defines a set of ``Worker`` instances for the given ``WorkerSpec`` managed by ``ElasticAgent``. Whether the worker
251
+ group contains cross instance workers or not depends on the implementation of the agent.
252
+ """
253
+
254
+ __slots__ = ["spec", "workers", "store", "group_rank", "group_world_size", "state"]
255
+
256
+ def __init__(self, spec: WorkerSpec):
257
+ self.spec = spec
258
+ self.workers = [Worker(local_rank=i) for i in range(self.spec.local_world_size)]
259
+
260
+ # assigned after rdzv
261
+ self.store = None
262
+ self.group_rank = None
263
+ self.group_world_size = None
264
+
265
+ self.state = WorkerState.INIT
266
+
267
+
268
+ class _RoleInstanceInfo:
269
+ """The class is used by the agent to exchange the information with other agents.
270
+
271
+ The information is used to determine the rank of the workers that agent
272
+ manages in heterogeneous environments, where different agents can have
273
+ different number of workers.
274
+ """
275
+
276
+ __slots__ = ["role", "rank", "local_world_size"]
277
+
278
+ def __init__(self, role: str, rank: int, local_world_size: int):
279
+ r"""Initialize the agent class instance.
280
+
281
+ Args:
282
+ role (str): user-defined role for the workers with this spec
283
+ rank (int): the rank of the agent
284
+ local_world_size (int): number of local workers to run
285
+ """
286
+ self.role = role
287
+ self.rank = rank
288
+ self.local_world_size = local_world_size
289
+
290
+ def serialize(self) -> bytes:
291
+ dict_data = {
292
+ "role": self.role,
293
+ "rank": self.rank,
294
+ "local_world_size": self.local_world_size,
295
+ }
296
+ return json.dumps(dict_data).encode(encoding="UTF-8")
297
+
298
+ @staticmethod
299
+ def deserialize(data: bytes):
300
+ dict_data = json.loads(data.decode(encoding="UTF-8"))
301
+ return _RoleInstanceInfo(
302
+ dict_data["role"], dict_data["rank"], dict_data["local_world_size"]
303
+ )
304
+
305
+ @staticmethod
306
+ def compare(obj1, obj2) -> int:
307
+ if obj1.role == obj2.role:
308
+ return obj1.rank - obj2.rank
309
+ elif obj1.role > obj2.role:
310
+ return 1
311
+ else:
312
+ return -1
313
+
314
+ @staticmethod
315
+ def find_role_boundaries(roles_infos: List, role: str) -> Tuple[int, int]:
316
+ start_idx, end_idx = -1, -1
317
+ for idx, role_info in enumerate(roles_infos):
318
+ if role_info.role == role:
319
+ if start_idx == -1:
320
+ start_idx = idx
321
+ end_idx = idx
322
+ return (start_idx, end_idx)
323
+
324
+
325
+ @dataclass
326
+ class RunResult:
327
+ """Return results of the worker executions.
328
+
329
+ Run results follow an "all-or-nothing" policy where the run is successful if and
330
+ only if ALL local workers managed by this agent complete successfully.
331
+
332
+ If the result is successful (e.g. ``is_failed() = False``) then the ``return_values``
333
+ field contains the outputs (return values) of the workers managed by THIS agent mapped
334
+ by their GLOBAL ranks. That is ``result.return_values[0]`` is the return value of
335
+ global rank 0.
336
+
337
+ .. note:: ``return_values`` are only meaningful for when the worker entrypoint
338
+ is a function. Workers specified as a binary entrypoint do not canonically
339
+ have a return value and the ``return_values`` field is meaningless and
340
+ may be empty.
341
+
342
+ If ``is_failed()`` returns ``True`` then the ``failures`` field contains the
343
+ failure information, again, mapped by the GLOBAL rank of the worker that failed.
344
+
345
+ The keys in ``return_values`` and ``failures`` are mutually exclusive, that is,
346
+ a worker's final state can only be one of: succeeded, failed. Workers intentionally
347
+ terminated by the agent according to the agent's restart policy, are not represented
348
+ in either ``return_values`` nor ``failures``.
349
+ """
350
+
351
+ state: WorkerState
352
+ return_values: Dict[int, Any] = field(default_factory=dict)
353
+ failures: Dict[int, ProcessFailure] = field(default_factory=dict)
354
+
355
+ def is_failed(self) -> bool:
356
+ return self.state == WorkerState.FAILED
357
+
358
+
359
+ def _get_socket_with_port() -> socket.socket:
360
+ """Return a free port on localhost.
361
+
362
+ The free port is "reserved" by binding a temporary socket on it.
363
+ Close the socket before passing the port to the entity that
364
+ requires it. Usage example::
365
+
366
+ sock = _get_socket_with_port()
367
+ with closing(sock):
368
+ port = sock.getsockname()[1]
369
+ sock.close()
370
+ # there is still a race-condition that some other process
371
+ # may grab this port before func() runs
372
+ func(port)
373
+ """
374
+ addrs = socket.getaddrinfo(
375
+ host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
376
+ )
377
+ for addr in addrs:
378
+ family, type, proto, _, _ = addr
379
+ s = socket.socket(family, type, proto)
380
+ try:
381
+ s.bind(("localhost", 0))
382
+ s.listen(0)
383
+ return s
384
+ except OSError as e:
385
+ s.close()
386
+ log.info("Socket creation attempt failed.", exc_info=e)
387
+ raise RuntimeError("Failed to create a socket")
388
+
389
+
390
+ def _get_fq_hostname() -> str:
391
+ return socket.getfqdn(socket.gethostname())
392
+
393
+
394
+ class ElasticAgent(abc.ABC):
395
+ """An agent process responsible for managing one or more worker processes.
396
+
397
+ The worker processes are assumed to be regular distributed PyTorch scripts.
398
+ When the worker process is created by the agent, the agent provides the
399
+ necessary information for the worker processes to properly initialize
400
+ a torch process group.
401
+
402
+ The exact deployment topology and ratio of agent-to-worker is dependent
403
+ on the specific implementation of the agent and the user's job placement
404
+ preferences. For instance, to run a distributed training job on GPU with
405
+ 8 trainers (one per GPU) one can:
406
+
407
+ 1. Use 8 x single GPU instances, place an agent per instance, managing
408
+ 1 worker per agent.
409
+ 2. Use 4 x double GPU instances, place an agent per instance, managing
410
+ 2 workers per agent.
411
+ 3. Use 2 x quad GPU instances, place an agent per instance, managing
412
+ 4 workers per agent.
413
+ 4. Use 1 x 8 GPU instance, place an agent per instance, managing
414
+ 8 workers per agent.
415
+
416
+ Usage
417
+ ::
418
+
419
+ group_result = agent.run()
420
+ if group_result.is_failed():
421
+ # workers failed
422
+ failure = group_result.failures[0]
423
+ log.exception("worker 0 failed with exit code : %s", failure.exit_code)
424
+ else:
425
+ return group_result.return_values[0] # return rank 0's results
426
+
427
+ """
428
+
429
+ @abc.abstractmethod
430
+ def run(self, role: str = DEFAULT_ROLE) -> RunResult:
431
+ """Run the agent.
432
+
433
+ Supports retrying the worker group on failures up to ``max_restarts``.
434
+
435
+ Returns:
436
+ The result of the execution, containing the return values or
437
+ failure details for each worker mapped by the worker's global rank.
438
+
439
+ Raises:
440
+ Exception - any other failures NOT related to worker process
441
+ """
442
+ raise NotImplementedError()
443
+
444
+ @abc.abstractmethod
445
+ def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
446
+ """Return the ``WorkerGroup`` for the given ``role``.
447
+
448
+ Note that the worker group is a mutable object and hence in a
449
+ multi-threaded/process environment it may change state.
450
+ Implementors are encouraged (but not required) to return
451
+ a defensive read-only copy.
452
+ """
453
+ raise NotImplementedError()
454
+
455
+
456
+ class SimpleElasticAgent(ElasticAgent):
457
+ """An ``ElasticAgent`` that manages one particular type of worker role.
458
+
459
+ An ``ElasticAgent`` that manages workers (``WorkerGroup``) for a single ``WorkerSpec``
460
+ such as one particular type of worker role.
461
+ """
462
+
463
+ def __init__(self, spec: WorkerSpec, exit_barrier_timeout: float = 300):
464
+ self._worker_group = WorkerGroup(spec)
465
+ self._remaining_restarts = self._worker_group.spec.max_restarts
466
+ self._store = None
467
+ self._exit_barrier_timeout = exit_barrier_timeout
468
+ self._total_execution_time = 0
469
+
470
+ def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
471
+ return self._worker_group
472
+
473
+ @abc.abstractmethod
474
+ def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
475
+ r"""Start ``worker_group.spec.local_world_size`` number of workers.
476
+
477
+ This is according to worker spec for the worker group .
478
+ Returns a map of ``local_rank`` to worker ``id``.
479
+ """
480
+ raise NotImplementedError()
481
+
482
+ @abc.abstractmethod
483
+ def _stop_workers(self, worker_group: WorkerGroup) -> None:
484
+ r"""Stop all workers in the given worker group.
485
+
486
+ Implementors must deal with workers in all states defined by
487
+ ``WorkerState``. That is, it must gracefully handle stopping
488
+ non-existent workers, unhealthy (stuck) workers, etc.
489
+ """
490
+ raise NotImplementedError()
491
+
492
+ @abc.abstractmethod
493
+ def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
494
+ r"""Check on the workers for the ``worker_group``.
495
+
496
+ This function also returns the new state of the worker group.
497
+ """
498
+ raise NotImplementedError()
499
+
500
+ @abc.abstractmethod
501
+ def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
502
+ """Clean up any resources that were allocated during the agent's work.
503
+
504
+ Args:
505
+ death_sig: Signal to send to the child process, SIGTERM is default
506
+ """
507
+ raise NotImplementedError()
508
+
509
+ @staticmethod
510
+ def _set_master_addr_port(
511
+ store: Store,
512
+ master_addr: Optional[str],
513
+ master_port: Optional[int],
514
+ local_addr: Optional[str],
515
+ ):
516
+ if master_port is None:
517
+ sock = _get_socket_with_port()
518
+ with closing(sock):
519
+ master_port = sock.getsockname()[1]
520
+
521
+ if master_addr is None:
522
+ # If user specified the address for the local node, use it as the master addr if not exist
523
+ if local_addr:
524
+ master_addr = local_addr
525
+ else:
526
+ master_addr = _get_fq_hostname()
527
+
528
+ store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
529
+ store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
530
+
531
+ @staticmethod
532
+ def _get_master_addr_port(store: Store) -> Tuple[str, int]:
533
+ master_addr = store.get("MASTER_ADDR").decode(encoding="UTF-8")
534
+ master_port = int(store.get("MASTER_PORT").decode(encoding="UTF-8"))
535
+ return (master_addr, master_port)
536
+
537
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
538
+ # `torch.distributed.elastic.metrics.prof`.
539
+ @prof
540
+ def _rendezvous(self, worker_group: WorkerGroup) -> None:
541
+ r"""Run rendezvous for the workers specified by the worker spec.
542
+
543
+ Assigns workers a new global rank and world size.
544
+ Updates the rendezvous store for the worker group.
545
+ """
546
+ spec = worker_group.spec
547
+
548
+ store, group_rank, group_world_size = spec.rdzv_handler.next_rendezvous()
549
+ self._store = store
550
+
551
+ workers = self._assign_worker_ranks(store, group_rank, group_world_size, spec)
552
+ worker_group.workers = workers
553
+ worker_group.store = store
554
+ worker_group.group_rank = group_rank
555
+ worker_group.group_world_size = group_world_size
556
+
557
+ if group_rank == 0:
558
+ self._set_master_addr_port(
559
+ store,
560
+ spec.master_addr,
561
+ spec.master_port,
562
+ spec.local_addr,
563
+ )
564
+
565
+ master_addr, master_port = self._get_master_addr_port(store)
566
+ restart_count = spec.max_restarts - self._remaining_restarts
567
+
568
+ log.info(
569
+ "[%(role)s] Rendezvous complete for workers. Result:\n"
570
+ " restart_count=%(restart_count)s\n"
571
+ " master_addr=%(master_addr)s\n"
572
+ " master_port=%(master_port)s\n"
573
+ " group_rank=%(group_rank)s\n"
574
+ " group_world_size=%(group_world_size)s\n"
575
+ " local_ranks=%(local_ranks)s\n"
576
+ " role_ranks=%(role_ranks)s\n"
577
+ " global_ranks=%(global_ranks)s\n"
578
+ " role_world_sizes=%(role_world_sizes)s\n"
579
+ " global_world_sizes=%(global_world_sizes)s\n",
580
+ {
581
+ "role": spec.role,
582
+ "restart_count": restart_count,
583
+ "master_addr": master_addr,
584
+ "master_port": master_port,
585
+ "group_rank": group_rank,
586
+ "group_world_size": group_world_size,
587
+ "local_ranks": [worker.local_rank for worker in workers],
588
+ "role_ranks": [worker.role_rank for worker in workers],
589
+ "global_ranks": [worker.global_rank for worker in workers],
590
+ "role_world_sizes": [worker.role_world_size for worker in workers],
591
+ "global_world_sizes": [worker.world_size for worker in workers]
592
+ }
593
+ )
594
+
595
+ def _get_ranks(
596
+ self,
597
+ role_infos: List[_RoleInstanceInfo],
598
+ role_idx: int,
599
+ start_idx: int = 0,
600
+ end_idx: int = -1,
601
+ ) -> Tuple[int, List[int]]:
602
+ if end_idx == -1:
603
+ end_idx = len(role_infos)
604
+ prefix_sum = 0
605
+ total_sum = 0
606
+ for idx in range(start_idx, end_idx):
607
+ if role_idx > idx:
608
+ prefix_sum += role_infos[idx].local_world_size
609
+ total_sum += role_infos[idx].local_world_size
610
+ return (
611
+ total_sum,
612
+ list(range(prefix_sum, prefix_sum + role_infos[role_idx].local_world_size)),
613
+ )
614
+
615
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
616
+ # `torch.distributed.elastic.metrics.prof`.
617
+ @prof
618
+ def _assign_worker_ranks(
619
+ self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
620
+ ) -> List[Worker]:
621
+ """Determine proper ranks for worker processes.
622
+
623
+ The rank assignment is done according to the following algorithm:
624
+
625
+ 1. Each agent writes its configuration(group_rank, group_world_size
626
+ , num_workers) to the common store.
627
+ 2. Each agent retrieves configuration for all agents
628
+ and performs two level sort using role and rank.
629
+ 3. Determine the global rank: the global rank of the workers for the current
630
+ agent is the offset of the infos array up to group_rank of the agent.
631
+ The offset is computed as a sum of local_world_size of all agents that
632
+ have rank less than the group_rank. The workers would have the ranks:
633
+ [offset, offset+local_world_size)
634
+ 4. Determine the role rank: The role rank is determined using the algorithms
635
+ in the point 3 with the exception that the offset is done from the first
636
+ agent that has the same role as current one and has the minimum group rank.
637
+ """
638
+ role_infos = self._share_and_gather(store, group_rank, group_world_size, spec)
639
+ my_role_info = role_infos[group_rank]
640
+ worker_world_size, worker_global_ranks = self._get_ranks(role_infos, group_rank)
641
+ role_infos = sorted(
642
+ role_infos, key=functools.cmp_to_key(_RoleInstanceInfo.compare)
643
+ )
644
+ role_start_idx, role_end_idx = _RoleInstanceInfo.find_role_boundaries(
645
+ role_infos, my_role_info.role
646
+ )
647
+ role_pos = next(
648
+ idx
649
+ for idx, role_info in enumerate(role_infos)
650
+ if _RoleInstanceInfo.compare(role_info, my_role_info) == 0
651
+ )
652
+ role_world_size, role_ranks = self._get_ranks(
653
+ role_infos, role_pos, role_start_idx, role_end_idx + 1
654
+ )
655
+ workers = []
656
+ for ind in range(spec.local_world_size):
657
+ worker = Worker(
658
+ local_rank=ind,
659
+ global_rank=worker_global_ranks[ind],
660
+ role_rank=role_ranks[ind],
661
+ world_size=worker_world_size,
662
+ role_world_size=role_world_size,
663
+ )
664
+ workers.append(worker)
665
+ return workers
666
+
667
+ def _share_and_gather(
668
+ self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
669
+ ) -> List:
670
+ agent_role_info = _RoleInstanceInfo(
671
+ spec.role, group_rank, spec.local_world_size
672
+ )
673
+ key_prefix = "torchelastic/role_info"
674
+ agent_config_enc = agent_role_info.serialize()
675
+ role_infos_bytes = store_util.synchronize(
676
+ store, agent_config_enc, group_rank, group_world_size, key_prefix
677
+ )
678
+ role_infos = [
679
+ _RoleInstanceInfo.deserialize(role_info_bytes)
680
+ for role_info_bytes in role_infos_bytes
681
+ ]
682
+ return role_infos
683
+
684
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
685
+ # `torch.distributed.elastic.metrics.prof`.
686
+ @prof
687
+ def _initialize_workers(self, worker_group: WorkerGroup) -> None:
688
+ r"""Start a fresh set of workers for the worker_group.
689
+
690
+ Essentially, a rendezvous followed by a ``start_workers``.
691
+ The caller should first call ``_stop_workers()`` to stop running workers
692
+ prior to calling this method.
693
+
694
+ Optimistically sets the state of the worker group that
695
+ just started as ``HEALTHY`` and delegates the actual monitoring
696
+ of state to ``_monitor_workers()`` method
697
+ """
698
+ role = worker_group.spec.role
699
+ log.info("[%s] Rendezvous'ing worker group", role)
700
+
701
+ # TODO after stopping workers, wait at least monitor_interval*2 for
702
+ # workers on different nodes to fail on a collective op before waiting
703
+ # on the rdzv barrier, this way we ensure that nodes enter rdzv
704
+ # at around the same time and reduce false positive rdzv timeout errors
705
+ self._rendezvous(worker_group)
706
+
707
+ log.info("[%s] Starting worker group", role)
708
+ worker_ids = self._start_workers(worker_group)
709
+ for local_rank, w_id in worker_ids.items():
710
+ worker = worker_group.workers[local_rank]
711
+ worker.id = w_id
712
+
713
+ worker_group.state = WorkerState.HEALTHY
714
+
715
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
716
+ # `torch.distributed.elastic.metrics.prof`.
717
+ @prof
718
+ def _restart_workers(self, worker_group: WorkerGroup) -> None:
719
+ """Restart (stops, rendezvous, starts) all local workers in the group."""
720
+ role = worker_group.spec.role
721
+ log.info("[%s] Stopping worker group", role)
722
+ self._stop_workers(worker_group)
723
+ worker_group.state = WorkerState.STOPPED
724
+ self._initialize_workers(worker_group)
725
+
726
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
727
+ # `torch.distributed.elastic.metrics.prof`.
728
+ @prof
729
+ def run(self, role: str = DEFAULT_ROLE) -> RunResult:
730
+ start_time = time.monotonic()
731
+ shutdown_called: bool = False
732
+ try:
733
+ result = self._invoke_run(role)
734
+ self._total_execution_time = int(time.monotonic() - start_time)
735
+ self._record_metrics(result)
736
+ self._record_worker_events(result)
737
+ return result
738
+ except RendezvousGracefulExitError as e:
739
+ log.info("Rendezvous gracefully exited: %s", e)
740
+ except SignalException as e:
741
+ log.warning("Received %s death signal, shutting down workers", e.sigval)
742
+ self._shutdown(e.sigval)
743
+ shutdown_called = True
744
+ raise
745
+ finally:
746
+ if not shutdown_called:
747
+ self._shutdown()
748
+ # record the execution time in case there were any exceptions during run.
749
+ self._total_execution_time = int(time.monotonic() - start_time)
750
+
751
+ def get_event_failed(self) -> Event:
752
+ return self._construct_event(
753
+ state="FAILED",
754
+ source=EventSource.AGENT,
755
+ raw_error=traceback.format_exc(),
756
+ )
757
+
758
+ def get_event_succeeded(self) -> Event:
759
+ return self._construct_event(
760
+ state="SUCCEEDED",
761
+ source=EventSource.AGENT,
762
+ )
763
+
764
+ def _record_worker_events(self, result: RunResult) -> None:
765
+ for worker in self._worker_group.workers:
766
+ failure = result.failures.get(worker.global_rank)
767
+ state: str = self._get_worker_state(worker, result)
768
+ raw_error = json.dumps(failure.error_file_data) if failure else None
769
+ record(self._construct_event(state, EventSource.WORKER, worker, raw_error))
770
+
771
+ def _get_worker_state(self, worker: Worker, result: RunResult) -> str:
772
+ failure = result.failures.get(worker.global_rank)
773
+ if result.state in {WorkerState.UNHEALTHY, WorkerState.FAILED} and not failure:
774
+ # The worker got terminated by the torchelastic agent via SIGTERM signal
775
+ return "TERMINATED"
776
+ elif failure or worker.global_rank in result.return_values:
777
+ return result.state.value
778
+ else:
779
+ raise ValueError(f"Unknown worker: {worker.global_rank}")
780
+
781
+ def _construct_event(
782
+ self,
783
+ state: str,
784
+ source: EventSource,
785
+ worker: Optional[Worker] = None,
786
+ raw_error: Optional[str] = None,
787
+ ) -> Event:
788
+ wg = self._worker_group
789
+ spec = wg.spec
790
+ md = {
791
+ "group_world_size": wg.group_world_size,
792
+ "entry_point": spec.get_entrypoint_name(),
793
+ }
794
+ if worker:
795
+ md["local_rank"] = (worker.local_rank,)
796
+ md["role_rank"] = (worker.role_rank,)
797
+ md["role_world_size"] = (worker.role_world_size,)
798
+ global_rank = worker.global_rank
799
+ worker_id = str(worker.id)
800
+ else:
801
+ global_rank = None
802
+ worker_id = None
803
+ md_str = json.dumps(md)
804
+ metadata = {
805
+ "run_id": spec.rdzv_handler.get_run_id(),
806
+ "global_rank": global_rank,
807
+ "group_rank": wg.group_rank,
808
+ "worker_id": worker_id,
809
+ "role": spec.role,
810
+ "hostname": _get_fq_hostname(),
811
+ "state": state,
812
+ "total_run_time": self._total_execution_time,
813
+ "rdzv_backend": spec.rdzv_handler.get_backend(),
814
+ "raw_error": raw_error,
815
+ "metadata": md_str,
816
+ "agent_restarts": spec.max_restarts - self._remaining_restarts,
817
+ }
818
+ return Event(
819
+ f"torchelastic.worker.status.{state}", source=source, metadata=metadata
820
+ )
821
+
822
+ def _record_metrics(self, group_results: RunResult):
823
+ is_failed = group_results.is_failed()
824
+ self._record_flakiness_metric(is_failed)
825
+ spec = self._worker_group.spec
826
+ restarts_happened = self._remaining_restarts != spec.max_restarts
827
+ put_metric(f"workers.{spec.role}.run_total", 1)
828
+ self._record_metric_with_condition(
829
+ "run_success_with_retries", not is_failed and restarts_happened
830
+ )
831
+ self._record_metric_with_condition(
832
+ "run_success_no_retries", not is_failed and not restarts_happened
833
+ )
834
+ self._record_metric_with_condition(
835
+ "run_failed_with_retries", is_failed and restarts_happened
836
+ )
837
+ self._record_metric_with_condition(
838
+ "run_failed_no_retries", is_failed and not restarts_happened
839
+ )
840
+
841
+ def _record_metric_with_condition(self, metric_name, condition):
842
+ spec = self._worker_group.spec
843
+ if condition:
844
+ put_metric(f"workers.{spec.role}.{metric_name}", 1)
845
+ else:
846
+ put_metric(f"workers.{spec.role}.{metric_name}", 0)
847
+
848
+ def _record_flakiness_metric(self, is_failed: bool = False):
849
+ if is_failed:
850
+ flakiness = 100.0
851
+ else:
852
+ spec = self._worker_group.spec
853
+ flakiness = 100.0 - 100.0 * (self._remaining_restarts + 1) / (
854
+ spec.max_restarts + 1
855
+ )
856
+ spec = self._worker_group.spec
857
+
858
+ put_metric(f"workers.{spec.role}.flakiness", int(flakiness))
859
+
860
+ def _invoke_run(self, role: str = DEFAULT_ROLE) -> RunResult:
861
+ # NOTE: currently only works for a single role
862
+
863
+ spec = self._worker_group.spec
864
+ role = spec.role
865
+
866
+ log.info(
867
+ "[%s] starting workers for entrypoint: %s", role, spec.get_entrypoint_name()
868
+ )
869
+
870
+ self._initialize_workers(self._worker_group)
871
+ monitor_interval = spec.monitor_interval
872
+ rdzv_handler = spec.rdzv_handler
873
+
874
+ while True:
875
+ assert self._worker_group.state != WorkerState.INIT
876
+ time.sleep(monitor_interval)
877
+ run_result = self._monitor_workers(self._worker_group)
878
+ state = run_result.state
879
+ self._worker_group.state = state
880
+
881
+ put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
882
+ put_metric(f"workers.{role}.{state.name.lower()}", 1)
883
+
884
+ if state == WorkerState.SUCCEEDED:
885
+ log.info(
886
+ "[%s] worker group successfully finished."
887
+ " Waiting %s seconds for other agents to finish.",
888
+ role, self._exit_barrier_timeout
889
+ )
890
+ self._exit_barrier()
891
+ return run_result
892
+ elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED}:
893
+ if self._remaining_restarts > 0:
894
+ log.info(
895
+ "[%s] Worker group %s. "
896
+ "%s/%s attempts left;"
897
+ " will restart worker group",
898
+ role, state.name, self._remaining_restarts, spec.max_restarts
899
+ )
900
+ self._remaining_restarts -= 1
901
+ self._restart_workers(self._worker_group)
902
+ else:
903
+ self._stop_workers(self._worker_group)
904
+ self._worker_group.state = WorkerState.FAILED
905
+ return run_result
906
+ elif state == WorkerState.HEALTHY:
907
+ # membership changes do not count as retries
908
+ num_nodes_waiting = rdzv_handler.num_nodes_waiting()
909
+ group_rank = self._worker_group.group_rank
910
+ if num_nodes_waiting > 0:
911
+ log.info(
912
+ "[%s] Detected %s "
913
+ "new nodes from group_rank=%s; "
914
+ "will restart worker group",
915
+ role, num_nodes_waiting, group_rank
916
+ )
917
+ self._restart_workers(self._worker_group)
918
+ else:
919
+ raise Exception(f"[{role}] Worker group in {state.name} state")
920
+
921
+ def _exit_barrier(self):
922
+ """
923
+ Define a barrier that keeps the agent process alive until all workers finish.
924
+
925
+ Wait for ``exit_barrier_timeout`` seconds for all agents to finish
926
+ executing their local workers (either successfully or not). This
927
+ acts as a safety guard against user scripts that terminate at different
928
+ times.
929
+ """
930
+ log.info(
931
+ "Local worker group finished (%s). "
932
+ "Waiting %s seconds for other agents to finish",
933
+ self._worker_group.state, self._exit_barrier_timeout
934
+ )
935
+ start = time.time()
936
+ try:
937
+ store_util.barrier(
938
+ self._store,
939
+ self._worker_group.group_rank,
940
+ self._worker_group.group_world_size,
941
+ key_prefix=_TERMINAL_STATE_SYNC_ID,
942
+ barrier_timeout=self._exit_barrier_timeout,
943
+ )
944
+ log.info(
945
+ "Done waiting for other agents. Elapsed: %s seconds", time.time() - start
946
+ )
947
+ except SignalException as e:
948
+ log.warning("Got termination signal: %s", e.sigval)
949
+ raise
950
+ except Exception:
951
+ log.exception(
952
+ "Error waiting on exit barrier. Elapsed: %s seconds",
953
+ time.time() - start
954
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+
10
+ import json
11
+ import os
12
+ import signal
13
+ import socket
14
+ from string import Template
15
+ import uuid
16
+ from typing import Any, Dict, Optional, Tuple
17
+
18
+ import torch.distributed.elastic.timer as timer
19
+ from torch.distributed.elastic import events
20
+
21
+ from torch.distributed.elastic.agent.server.api import (
22
+ RunResult,
23
+ SimpleElasticAgent,
24
+ WorkerGroup,
25
+ WorkerSpec,
26
+ WorkerState,
27
+ )
28
+ from torch.distributed.elastic.events.api import EventMetadataValue
29
+ from torch.distributed.elastic.metrics.api import prof
30
+ from torch.distributed.elastic.multiprocessing import PContext, start_processes, LogsSpecs
31
+ from torch.distributed.elastic.utils import macros
32
+ from torch.distributed.elastic.utils.logging import get_logger
33
+
34
+ log = get_logger(__name__)
35
+
36
+ __all__ = [
37
+ "LocalElasticAgent",
38
+ "TORCHELASTIC_ENABLE_FILE_TIMER",
39
+ "TORCHELASTIC_TIMER_FILE",
40
+ ]
41
+
42
+ TORCHELASTIC_ENABLE_FILE_TIMER = "TORCHELASTIC_ENABLE_FILE_TIMER"
43
+ TORCHELASTIC_TIMER_FILE = "TORCHELASTIC_TIMER_FILE"
44
+
45
+ class LocalElasticAgent(SimpleElasticAgent):
46
+ """An implementation of :py:class:`torchelastic.agent.server.ElasticAgent` that handles host-local workers.
47
+
48
+ This agent is deployed per host and is configured to spawn ``n`` workers.
49
+ When using GPUs, ``n`` maps to the number of GPUs available on the host.
50
+
51
+ The local agent does not communicate to other local agents deployed on
52
+ other hosts, even if the workers may communicate inter-host. The worker id
53
+ is interpreted to be a local process. The agent starts and stops all worker
54
+ processes as a single unit.
55
+
56
+
57
+ The worker function and argument passed to the worker function must be
58
+ python multiprocessing compatible. To pass multiprocessing data structures
59
+ to the workers you may create the data structure in the same multiprocessing
60
+ context as the specified ``start_method`` and pass it as a function argument.
61
+
62
+ The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait
63
+ for other agents to finish. This acts as a safety net to handle cases where
64
+ workers finish at different times, to prevent agents from viewing workers
65
+ that finished early as a scale-down event. It is strongly advised that the
66
+ user code deal with ensuring that workers are terminated in a synchronous
67
+ manner rather than relying on the exit_barrier_timeout.
68
+
69
+ A named pipe based watchdog can be enabled in ```LocalElasticAgent``` if an
70
+ environment variable ``TORCHELASTIC_ENABLE_FILE_TIMER`` with value 1 has
71
+ been defined in the ```LocalElasticAgent``` process.
72
+ Optionally, another environment variable ```TORCHELASTIC_TIMER_FILE```
73
+ can be set with a unique file name for the named pipe. If the environment
74
+ variable ```TORCHELASTIC_TIMER_FILE``` is not set, ```LocalElasticAgent```
75
+ will internally create a unique file name and set it to the environment
76
+ variable ```TORCHELASTIC_TIMER_FILE```, and this environment variable will
77
+ be propagated to the worker processes to allow them to connect to the same
78
+ named pipe that ```LocalElasticAgent``` uses.
79
+
80
+ Logs are written to the specified log directory. Each log line will be by default
81
+ prefixed by ``[${role_name}${local_rank}]:`` (e.g. ``[trainer0]: foobar``).
82
+ Log prefixes can be customized by passing a `template string
83
+ <https://docs.python.org/3/library/string.html#template-strings>`_ as the
84
+ ``log_line_prefix_template`` argument.
85
+ The following macros (identifiers) are substituted at runtime:
86
+ ``${role_name}, ${local_rank}, ${rank}``. For example, to prefix each log line with
87
+ global rank instead of the local rank, set ``log_line_prefix_template = "[${rank}]:``.
88
+
89
+
90
+ Example launching function
91
+
92
+ ::
93
+
94
+ def trainer(args) -> str:
95
+ return "do train"
96
+
97
+ def main():
98
+ start_method="spawn"
99
+ shared_queue= multiprocessing.get_context(start_method).Queue()
100
+ spec = WorkerSpec(
101
+ role="trainer",
102
+ local_world_size=nproc_per_process,
103
+ entrypoint=trainer,
104
+ args=("foobar",),
105
+ ...<OTHER_PARAMS...>)
106
+ agent = LocalElasticAgent(spec, start_method)
107
+ results = agent.run()
108
+
109
+ if results.is_failed():
110
+ print("trainer failed")
111
+ else:
112
+ print(f"rank 0 return value: {results.return_values[0]}")
113
+ # prints -> rank 0 return value: do train
114
+
115
+ Example launching binary
116
+
117
+ ::
118
+
119
+ def main():
120
+ spec = WorkerSpec(
121
+ role="trainer",
122
+ local_world_size=nproc_per_process,
123
+ entrypoint="/usr/local/bin/trainer",
124
+ args=("--trainer-args", "foobar"),
125
+ ...<OTHER_PARAMS...>)
126
+ agent = LocalElasticAgent(spec)
127
+ results = agent.run()
128
+
129
+ if not results.is_failed():
130
+ print("binary launches do not have return values")
131
+
132
+ """
133
+
134
+ def __init__(
135
+ self,
136
+ spec: WorkerSpec,
137
+ logs_specs: LogsSpecs,
138
+ start_method="spawn",
139
+ exit_barrier_timeout: float = 300,
140
+ log_line_prefix_template: Optional[str] = None,
141
+ ):
142
+ super().__init__(spec, exit_barrier_timeout)
143
+ self._start_method = start_method
144
+ self._pcontext: Optional[PContext] = None
145
+ self._rdzv_handler = spec.rdzv_handler
146
+ self._log_line_prefix_template = log_line_prefix_template
147
+ self._worker_watchdog: Optional[timer.FileTimerServer] = None
148
+ self._logs_specs = logs_specs
149
+
150
+
151
+ def _setup_local_watchdog(self, envs: Dict[int, Dict[str, str]]) -> None:
152
+ enable_watchdog_env_name = TORCHELASTIC_ENABLE_FILE_TIMER
153
+ watchdog_enabled = os.getenv(enable_watchdog_env_name)
154
+ watchdog_file_env_name = TORCHELASTIC_TIMER_FILE
155
+ watchdog_file_path = os.getenv(watchdog_file_env_name)
156
+ if watchdog_enabled is not None and str(watchdog_enabled) == "1":
157
+ if watchdog_file_path is None:
158
+ watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4())
159
+ log.info("Starting a FileTimerServer with %s ...", watchdog_file_path)
160
+ self._worker_watchdog = timer.FileTimerServer(
161
+ file_path=watchdog_file_path,
162
+ max_interval=0.1,
163
+ daemon=True,
164
+ log_event=self._log_watchdog_event)
165
+ self._worker_watchdog.start()
166
+ log.info("FileTimerServer started")
167
+ else:
168
+ log.info("Environment variable '%s' not found. Do not start FileTimerServer.", enable_watchdog_env_name)
169
+ # Propagate the watchdog file env to worker processes
170
+ if watchdog_file_path is not None:
171
+ for worker_env in envs.values():
172
+ worker_env[watchdog_file_env_name] = watchdog_file_path
173
+
174
+
175
+ def _get_fq_hostname(self) -> str:
176
+ return socket.getfqdn(socket.gethostname())
177
+
178
+ def _log_watchdog_event(
179
+ self,
180
+ name: str,
181
+ request: Optional[timer.FileTimerRequest],
182
+ ) -> None:
183
+ wg = self._worker_group
184
+ spec = wg.spec
185
+ md = {
186
+ "watchdog_event": name
187
+ }
188
+ if request is not None:
189
+ md["worker_pid"] = str(request.worker_pid)
190
+ md["scope_id"] = request.scope_id
191
+ md["expiration_time"] = str(request.expiration_time)
192
+ md["signal"] = str(request.signal)
193
+ md_str = json.dumps(md)
194
+ state = "RUNNING"
195
+ metadata: Dict[str, EventMetadataValue] = {
196
+ "run_id": spec.rdzv_handler.get_run_id(),
197
+ "global_rank": None,
198
+ "group_rank": wg.group_rank,
199
+ "worker_id": None,
200
+ "role": spec.role,
201
+ "hostname": self._get_fq_hostname(),
202
+ "state": state,
203
+ "total_run_time": self._total_execution_time,
204
+ "rdzv_backend": spec.rdzv_handler.get_backend(),
205
+ "raw_error": None,
206
+ "metadata": md_str,
207
+ "agent_restarts": spec.max_restarts - self._remaining_restarts,
208
+ }
209
+ # Note: The 'metadata' field of the Event is converted to a TorchelasticStatusLogEntry later.
210
+ # The 'name' field of the Event is NOT used in the TorchelasticStatusLogEntry.
211
+ event = events.Event(
212
+ name=name, source=events.EventSource.AGENT, metadata=metadata
213
+ )
214
+ events.record(event)
215
+
216
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
217
+ # `torch.distributed.elastic.metrics.prof`.
218
+ @prof
219
+ def _stop_workers(self, worker_group: WorkerGroup) -> None:
220
+ self._shutdown()
221
+
222
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
223
+ # `torch.distributed.elastic.metrics.prof`.
224
+ @prof
225
+ def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
226
+ spec = worker_group.spec
227
+ store = worker_group.store
228
+ assert store is not None
229
+ master_addr, master_port = super()._get_master_addr_port(store)
230
+ restart_count = spec.max_restarts - self._remaining_restarts
231
+
232
+ use_agent_store = spec.rdzv_handler.get_backend() == "static"
233
+
234
+ args: Dict[int, Tuple] = {}
235
+ envs: Dict[int, Dict[str, str]] = {}
236
+ log_line_prefixes: Optional[Dict[int, str]] = {} if self._log_line_prefix_template else None
237
+ for worker in worker_group.workers:
238
+ local_rank = worker.local_rank
239
+ worker_env = {
240
+ "LOCAL_RANK": str(local_rank),
241
+ "RANK": str(worker.global_rank),
242
+ "GROUP_RANK": str(worker_group.group_rank),
243
+ "ROLE_RANK": str(worker.role_rank),
244
+ "ROLE_NAME": spec.role,
245
+ "LOCAL_WORLD_SIZE": str(spec.local_world_size),
246
+ "WORLD_SIZE": str(worker.world_size),
247
+ "GROUP_WORLD_SIZE": str(worker_group.group_world_size),
248
+ "ROLE_WORLD_SIZE": str(worker.role_world_size),
249
+ "MASTER_ADDR": master_addr,
250
+ "MASTER_PORT": str(master_port),
251
+ "TORCHELASTIC_RESTART_COUNT": str(restart_count),
252
+ "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
253
+ "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
254
+ "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
255
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING": os.getenv(
256
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING", str(1)
257
+ ),
258
+ }
259
+ if "OMP_NUM_THREADS" in os.environ:
260
+ worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
261
+
262
+
263
+ if self._log_line_prefix_template:
264
+ log_line_prefix = Template(self._log_line_prefix_template).safe_substitute(
265
+ role_name=spec.role,
266
+ rank=worker.global_rank,
267
+ local_rank=local_rank,)
268
+ log_line_prefixes[local_rank] = log_line_prefix
269
+
270
+ envs[local_rank] = worker_env
271
+ worker_args = list(spec.args)
272
+ worker_args = macros.substitute(worker_args, str(local_rank))
273
+ args[local_rank] = tuple(worker_args)
274
+
275
+ self._setup_local_watchdog(envs=envs)
276
+
277
+ assert spec.entrypoint is not None
278
+ assert self._logs_specs is not None
279
+ self._pcontext = start_processes(
280
+ name=spec.role,
281
+ entrypoint=spec.entrypoint,
282
+ args=args,
283
+ envs=envs,
284
+ logs_specs=self._logs_specs,
285
+ log_line_prefixes=log_line_prefixes,
286
+ start_method=self._start_method,
287
+ )
288
+
289
+ return self._pcontext.pids()
290
+
291
+ def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
292
+ if self._worker_watchdog is not None:
293
+ self._worker_watchdog.stop()
294
+ self._worker_watchdog = None
295
+ if self._pcontext:
296
+ self._pcontext.close(death_sig)
297
+ if self._rdzv_handler:
298
+ self._rdzv_handler.shutdown()
299
+
300
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
301
+ # `torch.distributed.elastic.metrics.prof`.
302
+ @prof
303
+ def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
304
+ role = worker_group.spec.role
305
+ worker_pids = {w.id for w in worker_group.workers}
306
+ assert self._pcontext is not None
307
+ pc_pids = set(self._pcontext.pids().values())
308
+ if worker_pids != pc_pids:
309
+ log.error(
310
+ "[%s] worker pids do not match process_context pids."
311
+ " Expected: %s, actual: %s",
312
+ role, worker_pids, pc_pids
313
+ )
314
+ return RunResult(state=WorkerState.UNKNOWN)
315
+
316
+ result = self._pcontext.wait(0)
317
+ if result:
318
+ if result.is_failed():
319
+ # map local rank failure to global rank
320
+ worker_failures = {}
321
+ for local_rank, failure in result.failures.items():
322
+ worker = worker_group.workers[local_rank]
323
+ worker_failures[worker.global_rank] = failure
324
+ return RunResult(
325
+ state=WorkerState.FAILED,
326
+ failures=worker_failures,
327
+ )
328
+ else:
329
+ # copy ret_val_queue into a map with a global ranks
330
+ workers_ret_vals = {}
331
+ for local_rank, ret_val in result.return_values.items():
332
+ worker = worker_group.workers[local_rank]
333
+ workers_ret_vals[worker.global_rank] = ret_val
334
+ return RunResult(
335
+ state=WorkerState.SUCCEEDED,
336
+ return_values=workers_ret_vals,
337
+ )
338
+ else:
339
+ return RunResult(state=WorkerState.HEALTHY)
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc ADDED
Binary file (596 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import json
10
+ from dataclasses import asdict, dataclass, field
11
+ from enum import Enum
12
+ from typing import Dict, Union, Optional
13
+
14
+ __all__ = ['EventSource', 'Event', 'NodeState', 'RdzvEvent']
15
+
16
+ EventMetadataValue = Union[str, int, float, bool, None]
17
+
18
+
19
+ class EventSource(str, Enum):
20
+ """Known identifiers of the event producers."""
21
+
22
+ AGENT = "AGENT"
23
+ WORKER = "WORKER"
24
+
25
+
26
+ @dataclass
27
+ class Event:
28
+ """
29
+ The class represents the generic event that occurs during the torchelastic job execution.
30
+
31
+ The event can be any kind of meaningful action.
32
+
33
+ Args:
34
+ name: event name.
35
+ source: the event producer, e.g. agent or worker
36
+ timestamp: timestamp in milliseconds when event occurred.
37
+ metadata: additional data that is associated with the event.
38
+ """
39
+
40
+ name: str
41
+ source: EventSource
42
+ timestamp: int = 0
43
+ metadata: Dict[str, EventMetadataValue] = field(default_factory=dict)
44
+
45
+ def __str__(self):
46
+ return self.serialize()
47
+
48
+ @staticmethod
49
+ def deserialize(data: Union[str, "Event"]) -> "Event":
50
+ if isinstance(data, Event):
51
+ return data
52
+ if isinstance(data, str):
53
+ data_dict = json.loads(data)
54
+ data_dict["source"] = EventSource[data_dict["source"]] # type: ignore[possibly-undefined]
55
+ return Event(**data_dict)
56
+
57
+ def serialize(self) -> str:
58
+ return json.dumps(asdict(self))
59
+
60
+
61
+ class NodeState(str, Enum):
62
+ """The states that a node can be in rendezvous."""
63
+
64
+ INIT = "INIT"
65
+ RUNNING = "RUNNING"
66
+ SUCCEEDED = "SUCCEEDED"
67
+ FAILED = "FAILED"
68
+
69
+
70
+ @dataclass
71
+ class RdzvEvent:
72
+ """
73
+ Dataclass to represent any rendezvous event.
74
+
75
+ Args:
76
+ name: Event name. (E.g. Current action being performed)
77
+ run_id: The run id of the rendezvous
78
+ message: The message describing the event
79
+ hostname: Hostname of the node
80
+ pid: The process id of the node
81
+ node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED)
82
+ master_endpoint: The master endpoint for the rendezvous store, if known
83
+ rank: The rank of the node, if known
84
+ local_id: The local_id of the node, if defined in dynamic_rendezvous.py
85
+ error_trace: Error stack trace, if this is an error event.
86
+ """
87
+
88
+ name: str
89
+ run_id: str
90
+ message: str
91
+ hostname: str
92
+ pid: int
93
+ node_state: NodeState
94
+ master_endpoint: str = ""
95
+ rank: Optional[int] = None
96
+ local_id: Optional[int] = None
97
+ error_trace: str = ""
98
+
99
+ def __str__(self):
100
+ return self.serialize()
101
+
102
+ @staticmethod
103
+ def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent":
104
+ if isinstance(data, RdzvEvent):
105
+ return data
106
+ if isinstance(data, str):
107
+ data_dict = json.loads(data)
108
+ data_dict["node_state"] = NodeState[data_dict["node_state"]] # type: ignore[possibly-undefined]
109
+ return RdzvEvent(**data_dict)
110
+
111
+ def serialize(self) -> str:
112
+ return json.dumps(asdict(self))
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ """
10
+ Library that launches and manages ``n`` copies of worker subprocesses either specified by a function or a binary.
11
+
12
+ For functions, it uses ``torch.multiprocessing`` (and therefore python
13
+ ``multiprocessing``) to spawn/fork worker processes. For binaries it uses python
14
+ ``subprocessing.Popen`` to create worker processes.
15
+
16
+
17
+ Usage 1: Launching two trainers as a function
18
+
19
+ ::
20
+
21
+ from torch.distributed.elastic.multiprocessing import Std, start_processes
22
+
23
+ def trainer(a, b, c):
24
+ pass # train
25
+
26
+
27
+ # runs two trainers
28
+ # LOCAL_RANK=0 trainer(1,2,3)
29
+ # LOCAL_RANK=1 trainer(4,5,6)
30
+ ctx = start_processes(
31
+ name="trainer",
32
+ entrypoint=trainer,
33
+ args={0: (1,2,3), 1: (4,5,6)},
34
+ envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}},
35
+ log_dir="/tmp/foobar",
36
+ redirects=Std.ALL, # write all worker stdout/stderr to a log file
37
+ tee={0: Std.ERR}, # tee only local rank 0's stderr to console
38
+ )
39
+
40
+ # waits for all copies of trainer to finish
41
+ ctx.wait()
42
+
43
+ Usage 2: Launching 2 echo workers as a binary
44
+
45
+ ::
46
+
47
+ # same as invoking
48
+ # echo hello
49
+ # echo world > stdout.log
50
+ ctx = start_processes(
51
+ name="echo"
52
+ entrypoint="echo",
53
+ log_dir="/tmp/foobar",
54
+ args={0: "hello", 1: "world"},
55
+ redirects={1: Std.OUT},
56
+ )
57
+
58
+ Just like ``torch.multiprocessing``, the return value of the function
59
+ :func:`start_processes` is a process context (:class:`api.PContext`). If a function
60
+ was launched, a :class:`api.MultiprocessContext` is returned and if a binary
61
+ was launched a :class:`api.SubprocessContext` is returned. Both are specific
62
+ implementations of the parent :class:`api.PContext` class.
63
+ """
64
+
65
+ import os
66
+ from typing import Callable, Dict, Optional, Tuple, Union, Set
67
+
68
+ from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401
69
+ _validate_full_rank,
70
+ DefaultLogsSpecs,
71
+ LogsDest,
72
+ LogsSpecs,
73
+ MultiprocessContext,
74
+ PContext,
75
+ ProcessFailure,
76
+ RunProcsResult,
77
+ SignalException,
78
+ Std,
79
+ SubprocessContext,
80
+ to_map,
81
+ )
82
+ from torch.distributed.elastic.utils.logging import get_logger
83
+
84
+ __all__ = [
85
+ "start_processes",
86
+ "MultiprocessContext",
87
+ "PContext",
88
+ "ProcessFailure",
89
+ "RunProcsResult",
90
+ "SignalException",
91
+ "Std",
92
+ "LogsDest",
93
+ "LogsSpecs",
94
+ "DefaultLogsSpecs",
95
+ "SubprocessContext",
96
+ "to_map",
97
+ ]
98
+
99
+ log = get_logger(__name__)
100
+
101
+
102
+ def start_processes(
103
+ name: str,
104
+ entrypoint: Union[Callable, str],
105
+ args: Dict[int, Tuple],
106
+ envs: Dict[int, Dict[str, str]],
107
+ logs_specs: LogsSpecs,
108
+ log_line_prefixes: Optional[Dict[int, str]] = None,
109
+ start_method: str = "spawn",
110
+ ) -> PContext:
111
+ """
112
+ Start ``n`` copies of ``entrypoint`` processes with the provided options.
113
+
114
+ ``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary).
115
+ The number of copies is determined by the number of entries for ``args`` and
116
+ ``envs`` arguments, which need to have the same key set.
117
+
118
+ ``args`` and ``env`` parameters are the arguments and environment variables
119
+ to pass down to the entrypoint mapped by the replica index (local rank).
120
+ All local ranks must be accounted for.
121
+ That is, the keyset should be ``{0,1,...,(nprocs-1)}``.
122
+
123
+ .. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings.
124
+ If any other type is given, then it is casted to a string representation
125
+ (e.g. ``str(arg1)``). Furthermore, a binary failure will only write
126
+ an ``error.json`` error file if the main function is annotated with
127
+ ``torch.distributed.elastic.multiprocessing.errors.record``. For function launches,
128
+ this is done by default and there is no need to manually annotate
129
+ with the ``@record`` annotation.
130
+
131
+ ``redirects`` and ``tee`` are bitmasks specifying which std stream(s) to redirect
132
+ to a log file in the ``log_dir``. Valid mask values are defined in ``Std``.
133
+ To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as
134
+ the local rank to specify the redirect behavior for.
135
+ Any missing local ranks will default to ``Std.NONE``.
136
+
137
+ ``tee`` acts like the unix "tee" command in that it redirects + prints to console.
138
+ To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter.
139
+
140
+ For each process, the ``log_dir`` will contain:
141
+
142
+ #. ``{local_rank}/error.json``: if the process failed, a file with the error info
143
+ #. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT``
144
+ #. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR``
145
+
146
+ .. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory.
147
+
148
+ Example:
149
+ ::
150
+
151
+ log_dir = "/tmp/test"
152
+
153
+ # ok; two copies of foo: foo("bar0"), foo("bar1")
154
+ start_processes(
155
+ name="trainer",
156
+ entrypoint=foo,
157
+ args:{0:("bar0",), 1:("bar1",),
158
+ envs:{0:{}, 1:{}},
159
+ log_dir=log_dir
160
+ )
161
+
162
+ # invalid; envs missing for local rank 1
163
+ start_processes(
164
+ name="trainer",
165
+ entrypoint=foo,
166
+ args:{0:("bar0",), 1:("bar1",),
167
+ envs:{0:{}},
168
+ log_dir=log_dir
169
+ )
170
+
171
+ # ok; two copies of /usr/bin/touch: touch file1, touch file2
172
+ start_processes(
173
+ name="trainer",
174
+ entrypoint="/usr/bin/touch",
175
+ args:{0:("file1",), 1:("file2",),
176
+ envs:{0:{}, 1:{}},
177
+ log_dir=log_dir
178
+ )
179
+
180
+ # caution; arguments casted to string, runs:
181
+ # echo "1" "2" "3" and echo "[1, 2, 3]"
182
+ start_processes(
183
+ name="trainer",
184
+ entrypoint="/usr/bin/echo",
185
+ args:{0:(1,2,3), 1:([1,2,3],),
186
+ envs:{0:{}, 1:{}},
187
+ log_dir=log_dir
188
+ )
189
+
190
+ Args:
191
+ name: a human readable short name that describes what the processes are
192
+ (used as header when tee'ing stdout/stderr outputs)
193
+ entrypoint: either a ``Callable`` (function) or ``cmd`` (binary)
194
+ args: arguments to each replica
195
+ envs: env vars to each replica
196
+ log_dir: directory used to write log files
197
+ start_method: multiprocessing start method (spawn, fork, forkserver)
198
+ ignored for binaries
199
+ redirects: which std streams to redirect to a log file
200
+ tee: which std streams to redirect + print to console
201
+ local_ranks_filter: which ranks' logs to print to console
202
+
203
+ """
204
+
205
+ nprocs = len(args)
206
+ _validate_full_rank(args, nprocs, "args")
207
+ _validate_full_rank(envs, nprocs, "envs")
208
+
209
+ context: PContext
210
+ if isinstance(entrypoint, str):
211
+ context = SubprocessContext(
212
+ name=name,
213
+ entrypoint=entrypoint,
214
+ args=args,
215
+ envs=envs,
216
+ logs_specs=logs_specs,
217
+ log_line_prefixes=log_line_prefixes,
218
+ )
219
+ else:
220
+ context = MultiprocessContext(
221
+ name=name,
222
+ entrypoint=entrypoint,
223
+ args=args,
224
+ envs=envs,
225
+ log_line_prefixes=log_line_prefixes,
226
+ start_method=start_method,
227
+ logs_specs=logs_specs,
228
+ )
229
+
230
+ try:
231
+ context.start()
232
+ return context
233
+ except Exception:
234
+ context.close()
235
+ raise
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import abc
10
+ import logging
11
+ import os
12
+ import re
13
+ import shutil
14
+ import signal
15
+ import subprocess
16
+ import sys
17
+ import tempfile
18
+ import time
19
+ from contextlib import nullcontext
20
+ from dataclasses import dataclass, field
21
+ from enum import IntFlag
22
+ from multiprocessing import synchronize
23
+ from types import FrameType
24
+ from typing import Any, Callable, Dict, Optional, Set, Tuple, Union
25
+ from abc import ABC, abstractmethod
26
+
27
+ import torch.multiprocessing as mp
28
+ from torch.distributed.elastic.multiprocessing.errors import ProcessFailure, record
29
+ from torch.distributed.elastic.multiprocessing.redirects import (
30
+ redirect_stderr,
31
+ redirect_stdout,
32
+ )
33
+
34
+ from torch.distributed.elastic.multiprocessing.subprocess_handler import SubprocessHandler, get_subprocess_handler
35
+ from torch.distributed.elastic.multiprocessing.tail_log import TailLog
36
+
37
+ IS_WINDOWS = sys.platform == "win32"
38
+ IS_MACOS = sys.platform == "darwin"
39
+
40
+
41
+ log = logging.getLogger(__name__)
42
+
43
+ __all__ = [
44
+ "DefaultLogsSpecs",
45
+ "SignalException",
46
+ "Std",
47
+ "to_map",
48
+ "RunProcsResult",
49
+ "PContext",
50
+ "get_std_cm",
51
+ "MultiprocessContext",
52
+ "SubprocessContext",
53
+ ]
54
+
55
+ class SignalException(Exception):
56
+ """
57
+ Exception is raised inside the torchelastic agent process by the termination handler
58
+ if the death signal got received by the process.
59
+ """
60
+
61
+ def __init__(self, msg: str, sigval: signal.Signals) -> None:
62
+ super().__init__(msg)
63
+ self.sigval = sigval
64
+
65
+
66
+ def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None:
67
+ """Termination handler that raises exceptions on the main process.
68
+
69
+ When the process receives death signal(SIGTERM, SIGINT), this termination handler will
70
+ be invoked. It raises the ``SignalException`` exception that should be processed by the
71
+ user code. Python does not terminate process after the termination handler is finished,
72
+ so the exception should not be silently ignored, otherwise the process will never
73
+ be terminated.
74
+ """
75
+ sigval = signal.Signals(signum)
76
+ raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)
77
+
78
+
79
+ def _get_kill_signal() -> signal.Signals:
80
+ """Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows."""
81
+ if IS_WINDOWS:
82
+ return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
83
+ else:
84
+ return signal.SIGKILL
85
+
86
+
87
+ def _get_default_signal() -> signal.Signals:
88
+ """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows."""
89
+ if IS_WINDOWS:
90
+ return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
91
+ else:
92
+ return signal.SIGTERM
93
+
94
+
95
+ def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str):
96
+ actual_keys = set(d.keys())
97
+ expected_keys = set(range(nprocs))
98
+
99
+ if actual_keys != expected_keys:
100
+ raise RuntimeError(
101
+ f"{what}, local rank mapping mismatch,"
102
+ f" expected: {expected_keys}, actual: {actual_keys}"
103
+ )
104
+
105
+
106
+ _MAPPING_REGEX = r"^(\d:[0123],)*(\d:[0123])$"
107
+ _VALUE_REGEX = r"^[0123]$"
108
+
109
+
110
+ class Std(IntFlag):
111
+ NONE = 0
112
+ OUT = 1
113
+ ERR = 2
114
+ ALL = OUT | ERR
115
+
116
+ @classmethod
117
+ def from_str(cls, vm: str) -> Union["Std", Dict[int, "Std"]]:
118
+ """
119
+ Example:
120
+ ::
121
+
122
+ from_str("0") -> Std.NONE
123
+ from_str("1") -> Std.OUT
124
+ from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR}
125
+
126
+ Any other input raises an exception
127
+ """
128
+
129
+ def to_std(v: str) -> Std: # type: ignore[return]
130
+ s = Std(int(v))
131
+ if s in Std:
132
+ return s
133
+ # return None -> should NEVER reach here since we regex check input
134
+
135
+ if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0)
136
+ return to_std(vm)
137
+ elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2)
138
+ d: Dict[int, Std] = {}
139
+ for m in vm.split(","):
140
+ i, v = m.split(":")
141
+ d[int(i)] = to_std(v)
142
+ return d
143
+ else:
144
+ raise ValueError(
145
+ f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>"
146
+ )
147
+
148
+
149
+ def to_map(
150
+ val_or_map: Union[Std, Dict[int, Std]], local_world_size: int
151
+ ) -> Dict[int, Std]:
152
+ """
153
+ Certain APIs take redirect settings either as a single value (e.g. apply to all
154
+ local ranks) or as an explicit user-provided mapping. This method is a convenience
155
+ method that converts a value or mapping into a mapping.
156
+
157
+ Example:
158
+ ::
159
+
160
+ to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT}
161
+ to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT}
162
+ to_map({0: Std.OUT, 1: Std.OUT}, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT}
163
+ """
164
+ if isinstance(val_or_map, Std):
165
+ return dict.fromkeys(range(local_world_size), val_or_map)
166
+ else:
167
+ map = {}
168
+ for i in range(local_world_size):
169
+ map[i] = val_or_map.get(i, Std.NONE)
170
+ return map
171
+
172
+
173
+ @dataclass
174
+ class LogsDest:
175
+ """
176
+ For each log type, holds mapping of local rank ids to file paths.
177
+ """
178
+ stdouts: Dict[int, str] = field(default_factory=dict)
179
+ stderrs: Dict[int, str] = field(default_factory=dict)
180
+ tee_stdouts: Dict[int, str] = field(default_factory=dict)
181
+ tee_stderrs: Dict[int, str] = field(default_factory=dict)
182
+ error_files: Dict[int, str] = field(default_factory=dict)
183
+
184
+
185
+ class LogsSpecs(ABC):
186
+ """
187
+ Defines logs processing and redirection for each worker process.
188
+
189
+ Args:
190
+ log_dir:
191
+ Base directory where logs will be written.
192
+ redirects:
193
+ Streams to redirect to files. Pass a single ``Std``
194
+ enum to redirect for all workers, or a mapping keyed
195
+ by local_rank to selectively redirect.
196
+ tee:
197
+ Streams to duplicate to stdout/stderr.
198
+ Pass a single ``Std`` enum to duplicate streams for all workers,
199
+ or a mapping keyed by local_rank to selectively duplicate.
200
+ """
201
+
202
+ def __init__(
203
+ self,
204
+ log_dir: Optional[str] = None,
205
+ redirects: Union[Std, Dict[int, Std]] = Std.NONE,
206
+ tee: Union[Std, Dict[int, Std]] = Std.NONE,
207
+ local_ranks_filter: Optional[Set[int]] = None,
208
+ ) -> None:
209
+ self._root_log_dir = log_dir
210
+ self._redirects = redirects
211
+ self._tee = tee
212
+ self._local_ranks_filter = local_ranks_filter
213
+
214
+ @abstractmethod
215
+ def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest:
216
+ """
217
+ Given the environment variables, builds destination of log files for each of the local ranks.
218
+
219
+ Envs parameter contains env variables dict for each of the local ranks, where entries are defined in:
220
+ :func:`~torchelastic.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent._start_workers`.
221
+ """
222
+ pass
223
+
224
+ @property
225
+ @abstractmethod
226
+ def root_log_dir(self) -> str:
227
+ pass
228
+
229
+ class DefaultLogsSpecs(LogsSpecs):
230
+ """
231
+ Default LogsSpecs implementation:
232
+
233
+ - `log_dir` will be created if it doesn't exist
234
+ - Generates nested folders for each attempt and rank.
235
+ """
236
+ def __init__(
237
+ self,
238
+ log_dir: Optional[str] = None,
239
+ redirects: Union[Std, Dict[int, Std]] = Std.NONE,
240
+ tee: Union[Std, Dict[int, Std]] = Std.NONE,
241
+ local_ranks_filter: Optional[Set[int]] = None,
242
+ ) -> None:
243
+ if log_dir != os.devnull:
244
+ if not log_dir:
245
+ log_dir = tempfile.mkdtemp(prefix="torchelastic_")
246
+ elif not os.path.exists(log_dir):
247
+ os.makedirs(log_dir)
248
+ else:
249
+ if os.path.isfile(log_dir):
250
+ raise NotADirectoryError(f"log_dir: {log_dir} is a file")
251
+ super().__init__(log_dir, redirects, tee, local_ranks_filter)
252
+ # initialized only once
253
+ self._run_log_dir = None
254
+
255
+ @property
256
+ def root_log_dir(self) -> str:
257
+ return str(self._root_log_dir)
258
+
259
+ def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str):
260
+ base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_")
261
+ os.makedirs(base_log_dir, exist_ok=True)
262
+ dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir)
263
+ log.info("log directory set to: %s", dir)
264
+ return dir
265
+
266
+ def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest:
267
+ """
268
+ Uses following scheme to build log destination paths:
269
+
270
+ - `<log_dir>/<rdzv_run_id>/attempt_<attempt>/<rank>/stdout.log`
271
+ - `<log_dir>/<rdzv_run_id>/attempt_<attempt>/<rank>/stderr.log`
272
+ - `<log_dir>/<rdzv_run_id>/attempt_<attempt>/<rank>/error.json`
273
+ """
274
+ nprocs = len(envs)
275
+ global_env = {} # use only to query properies that are not dependent on a rank
276
+ if nprocs > 0:
277
+ global_env = envs[0]
278
+ else:
279
+ log.warning("Empty envs map provided when defining logging destinations.")
280
+ # Keys are always defined, but values can be missing in unit tests
281
+ run_id = global_env.get("TORCHELASTIC_RUN_ID", "test_run_id")
282
+ restart_count = global_env.get("TORCHELASTIC_RESTART_COUNT", "0")
283
+
284
+ attempt_log_dir: str = ""
285
+ if self._root_log_dir != os.devnull:
286
+ if not self._run_log_dir:
287
+ self._run_log_dir = self._make_log_dir(self._root_log_dir, run_id)
288
+
289
+ attempt_log_dir = os.path.join(self._run_log_dir, f"attempt_{restart_count}") # type: ignore[call-overload]
290
+ shutil.rmtree(attempt_log_dir, ignore_errors=True)
291
+ os.makedirs(attempt_log_dir)
292
+
293
+ if self._root_log_dir == os.devnull:
294
+ attempt_log_dir = os.devnull
295
+
296
+ # create subdirs for each local rank in the logs_dir
297
+ # logs_dir
298
+ # |- 0
299
+ # |- error.json
300
+ # |- stdout.log
301
+ # |- stderr.log
302
+ # |- ...
303
+ # |- (nprocs-1)
304
+ redirs = to_map(self._redirects, nprocs)
305
+ ts = to_map(self._tee, nprocs)
306
+
307
+ # to tee stdout/stderr we first redirect into a file
308
+ # then tail -f stdout.log/stderr.log so add tee settings to redirects
309
+ for local_rank, tee_std in ts.items():
310
+ redirect_std = redirs[local_rank]
311
+ redirs[local_rank] = redirect_std | tee_std
312
+
313
+ SYS_STREAM = "" # special case to indicate to output to console
314
+ stdouts = dict.fromkeys(range(nprocs), SYS_STREAM)
315
+ stderrs = dict.fromkeys(range(nprocs), SYS_STREAM)
316
+ tee_stdouts: Dict[int, str] = {}
317
+ tee_stderrs: Dict[int, str] = {}
318
+ error_files = {}
319
+
320
+ for local_rank in range(nprocs):
321
+
322
+ if attempt_log_dir == os.devnull:
323
+ tee_stdouts[local_rank] = os.devnull
324
+ tee_stderrs[local_rank] = os.devnull
325
+ error_files[local_rank] = os.devnull
326
+ envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = ""
327
+ else:
328
+ clogdir = os.path.join(attempt_log_dir, str(local_rank))
329
+ os.mkdir(clogdir)
330
+
331
+ rd = redirs[local_rank]
332
+ if (rd & Std.OUT) == Std.OUT:
333
+ stdouts[local_rank] = os.path.join(clogdir, "stdout.log")
334
+ if (rd & Std.ERR) == Std.ERR:
335
+ stderrs[local_rank] = os.path.join(clogdir, "stderr.log")
336
+
337
+ t = ts[local_rank]
338
+ if t & Std.OUT == Std.OUT:
339
+ tee_stdouts[local_rank] = stdouts[local_rank]
340
+ if t & Std.ERR == Std.ERR:
341
+ tee_stderrs[local_rank] = stderrs[local_rank]
342
+
343
+ if self._local_ranks_filter and local_rank not in self._local_ranks_filter:
344
+ # If stream is tee'd, only write to file, but don't tail
345
+ if local_rank in tee_stdouts:
346
+ tee_stdouts.pop(local_rank, None)
347
+ if local_rank in tee_stderrs:
348
+ tee_stderrs.pop(local_rank, None)
349
+
350
+ # If stream is not redirected, don't print
351
+ if stdouts[local_rank] == SYS_STREAM:
352
+ stdouts[local_rank] = os.devnull
353
+ if stderrs[local_rank] == SYS_STREAM:
354
+ stderrs[local_rank] = os.devnull
355
+
356
+ error_file = os.path.join(clogdir, "error.json")
357
+ error_files[local_rank] = error_file
358
+ log.info("Setting worker%s reply file to: %s", local_rank, error_file)
359
+ envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = error_file
360
+
361
+ return LogsDest(stdouts, stderrs, tee_stdouts, tee_stderrs, error_files)
362
+
363
+ def __repr__(self) -> str:
364
+ return (
365
+ f"DefaultLogsSpecs(root_log_dir={self._root_log_dir}, redirects={self._redirects}, "
366
+ f"tee={self._tee}, local_ranks_filter={self._local_ranks_filter})"
367
+ )
368
+
369
+ def __eq__(self, other: object) -> bool:
370
+ if not isinstance(other, DefaultLogsSpecs):
371
+ return False
372
+
373
+ return (
374
+ self._root_log_dir == other._root_log_dir
375
+ and self._redirects == other._redirects
376
+ and self._tee == other._tee
377
+ and self._local_ranks_filter == other._local_ranks_filter
378
+ )
379
+
380
+
381
+ @dataclass
382
+ class RunProcsResult:
383
+ """
384
+ Results of a completed run of processes started with ``start_processes()``. Returned by ``PContext``.
385
+
386
+ Note the following:
387
+
388
+ 1. All fields are mapped by local rank
389
+ 2. ``return_values`` - only populated for functions (not the binaries).
390
+ 3. ``stdouts`` - path to stdout.log (empty string if no redirect)
391
+ 4. ``stderrs`` - path to stderr.log (empty string if no redirect)
392
+
393
+ """
394
+
395
+ return_values: Dict[int, Any] = field(default_factory=dict)
396
+ failures: Dict[int, ProcessFailure] = field(default_factory=dict)
397
+ stdouts: Dict[int, str] = field(default_factory=dict)
398
+ stderrs: Dict[int, str] = field(default_factory=dict)
399
+
400
+ def is_failed(self) -> bool:
401
+ return len(self.failures) > 0
402
+
403
+
404
+ class PContext(abc.ABC):
405
+ """
406
+ The base class that standardizes operations over a set of processes that are launched via different mechanisms.
407
+
408
+ The name ``PContext`` is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``.
409
+
410
+ .. warning:: stdouts and stderrs should ALWAYS be a superset of
411
+ tee_stdouts and tee_stderrs (respectively) this is b/c
412
+ tee is implemented as a redirect + tail -f <stdout/stderr.log>
413
+ """
414
+
415
+ def __init__(
416
+ self,
417
+ name: str,
418
+ entrypoint: Union[Callable, str],
419
+ args: Dict[int, Tuple],
420
+ envs: Dict[int, Dict[str, str]],
421
+ logs_specs: LogsSpecs,
422
+ log_line_prefixes: Optional[Dict[int, str]] = None,
423
+
424
+ ):
425
+ self.name = name
426
+ # validate that all mappings have the same number of keys and
427
+ # all local ranks are accounted for
428
+ nprocs = len(args)
429
+
430
+ # TODO log_line_prefixes can be exanded too
431
+ logs_dest = logs_specs.reify(envs)
432
+
433
+ _validate_full_rank(logs_dest.stdouts, nprocs, "stdouts")
434
+ _validate_full_rank(logs_dest.stderrs, nprocs, "stderrs")
435
+
436
+ self.entrypoint = entrypoint
437
+ self.args = args
438
+ self.envs = envs
439
+ self.stdouts = logs_dest.stdouts
440
+ self.stderrs = logs_dest.stderrs
441
+ self.error_files = logs_dest.error_files
442
+ self.nprocs = nprocs
443
+
444
+ self._stdout_tail = TailLog(name, logs_dest.tee_stdouts, sys.stdout, log_line_prefixes)
445
+ self._stderr_tail = TailLog(name, logs_dest.tee_stderrs, sys.stderr, log_line_prefixes)
446
+
447
+ def start(self) -> None:
448
+ """Start processes using parameters defined in the constructor."""
449
+ signal.signal(signal.SIGTERM, _terminate_process_handler)
450
+ signal.signal(signal.SIGINT, _terminate_process_handler)
451
+ if not IS_WINDOWS:
452
+ signal.signal(signal.SIGHUP, _terminate_process_handler)
453
+ signal.signal(signal.SIGQUIT, _terminate_process_handler)
454
+ self._start()
455
+ self._stdout_tail.start()
456
+ self._stderr_tail.start()
457
+
458
+ @abc.abstractmethod
459
+ def _start(self) -> None:
460
+ """Start processes using strategy defined in a particular context."""
461
+ raise NotImplementedError()
462
+
463
+ @abc.abstractmethod
464
+ def _poll(self) -> Optional[RunProcsResult]:
465
+ """
466
+ Poll the run status of the processes running under this context.
467
+ This method follows an "all-or-nothing" policy and returns
468
+ a ``RunProcessResults`` object if either all processes complete
469
+ successfully or any process fails. Returns ``None`` if
470
+ all processes are still running.
471
+ """
472
+ raise NotImplementedError()
473
+
474
+ def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]:
475
+ """
476
+ Wait for the specified ``timeout`` seconds, polling every ``period`` seconds
477
+ for the processes to be done. Returns ``None`` if the processes are still running
478
+ on timeout expiry. Negative timeout values are interpreted as "wait-forever".
479
+ A timeout value of zero simply queries the status of the processes (e.g. equivalent
480
+ to a poll).
481
+
482
+ ..note: Multiprocessing library registers SIGTERM and SIGINT signal handlers that raise
483
+ ``SignalException`` when the signals received. It is up to the consumer of the code
484
+ to properly handle the exception. It is important not to swallow the exception otherwise
485
+ the process would not terminate. Example of the typical workflow can be:
486
+
487
+ .. code-block:: python
488
+ pc = start_processes(...)
489
+ try:
490
+ pc.wait(1)
491
+ .. do some other work
492
+ except SignalException as e:
493
+ pc.shutdown(e.sigval, timeout=30)
494
+
495
+ If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating
496
+ received signal. If child processes will not terminate in the timeout time, the process will send
497
+ the SIGKILL.
498
+ """
499
+ if timeout == 0:
500
+ return self._poll()
501
+
502
+ if timeout < 0:
503
+ timeout = sys.maxsize
504
+
505
+ expiry = time.time() + timeout
506
+ while time.time() < expiry:
507
+ pr = self._poll()
508
+ if pr:
509
+ return pr
510
+ time.sleep(period)
511
+
512
+ return None
513
+
514
+ @abc.abstractmethod
515
+ def pids(self) -> Dict[int, int]:
516
+ """Return pids of processes mapped by their respective local_ranks."""
517
+ raise NotImplementedError()
518
+
519
+ @abc.abstractmethod
520
+ def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
521
+ r"""
522
+ Terminates all processes managed by this context and cleans up any
523
+ meta resources (e.g. redirect, error_file files).
524
+ """
525
+ raise NotImplementedError()
526
+
527
+ def close(
528
+ self, death_sig: Optional[signal.Signals] = None, timeout: int = 30
529
+ ) -> None:
530
+ r"""
531
+ Terminates all processes managed by this context and cleans up any
532
+ meta resources (e.g. redirect, error_file files).
533
+
534
+ Args:
535
+ death_sig: Death signal to terminate processes.
536
+ timeout: Time to wait for processes to finish, if process is
537
+ still alive after this time, it will be terminated via SIGKILL.
538
+ """
539
+ if not death_sig:
540
+ death_sig = _get_default_signal()
541
+ self._close(death_sig=death_sig, timeout=timeout)
542
+ if self._stdout_tail:
543
+ self._stdout_tail.stop()
544
+ if self._stderr_tail:
545
+ self._stderr_tail.stop()
546
+
547
+
548
+ def get_std_cm(std_rd: str, redirect_fn):
549
+ if IS_WINDOWS or IS_MACOS or not std_rd:
550
+ return nullcontext()
551
+ else:
552
+ return redirect_fn(std_rd)
553
+
554
+
555
+ def _wrap(
556
+ local_rank: int,
557
+ fn: Callable,
558
+ args: Dict[int, Tuple],
559
+ envs: Dict[int, Dict[str, str]],
560
+ stdout_redirects: Dict[int, str], # redirect file for stdout (to console if None)
561
+ stderr_redirects: Dict[int, str], # redirect file for stderr (to console if None)
562
+ ret_vals: Dict[int, mp.SimpleQueue],
563
+ queue_finished_reading_event: synchronize.Event,
564
+ ) -> None:
565
+ # get the per-rank params up front so we fail fast if no mapping is found
566
+ args_ = args[local_rank]
567
+ env_ = envs[local_rank]
568
+ ret_val_ = ret_vals[local_rank]
569
+
570
+ stdout_rd = stdout_redirects[local_rank]
571
+ stderr_rd = stderr_redirects[local_rank]
572
+
573
+ stdout_cm = get_std_cm(stdout_rd, redirect_stdout)
574
+ stderr_cm = get_std_cm(stderr_rd, redirect_stderr)
575
+
576
+ for k, v in env_.items():
577
+ os.environ[k] = v
578
+
579
+ with stdout_cm, stderr_cm:
580
+ ret = record(fn)(*args_)
581
+ ret_val_.put(ret)
582
+ queue_finished_reading_event.wait()
583
+
584
+
585
+ class MultiprocessContext(PContext):
586
+ """``PContext`` holding worker processes invoked as a function."""
587
+
588
+ def __init__(
589
+ self,
590
+ name: str,
591
+ entrypoint: Callable,
592
+ args: Dict[int, Tuple],
593
+ envs: Dict[int, Dict[str, str]],
594
+ start_method: str,
595
+ logs_specs: LogsSpecs,
596
+ log_line_prefixes: Optional[Dict[int, str]] = None,
597
+ ):
598
+ super().__init__(
599
+ name,
600
+ entrypoint,
601
+ args,
602
+ envs,
603
+ logs_specs,
604
+ log_line_prefixes,
605
+ )
606
+
607
+ self.start_method = start_method
608
+ # each ret_val queue will always contain a single element.
609
+ self._ret_vals = {
610
+ local_rank: mp.get_context(self.start_method).SimpleQueue()
611
+ for local_rank in range(self.nprocs)
612
+ }
613
+
614
+ # see comments in ``join()`` for what this is
615
+ self._return_values: Dict[int, Any] = {}
616
+ self._pc: Optional[mp.ProcessContext] = None
617
+ # Note: set method should ONLY be invoked for the use case when all processes finished
618
+ # successfully. If any process died on event.wait() calling set() method will deadlock.
619
+ self._worker_finished_event = mp.get_context(self.start_method).Event()
620
+
621
+ def _start(self):
622
+ if self._pc:
623
+ raise ValueError(
624
+ "The process context already initialized."
625
+ " Most likely the start method got called twice."
626
+ )
627
+ self._pc = mp.start_processes(
628
+ fn=_wrap,
629
+ args=(
630
+ self.entrypoint,
631
+ self.args,
632
+ self.envs,
633
+ self.stdouts,
634
+ self.stderrs,
635
+ self._ret_vals,
636
+ self._worker_finished_event,
637
+ ),
638
+ nprocs=self.nprocs,
639
+ join=False,
640
+ daemon=False,
641
+ start_method=self.start_method,
642
+ )
643
+
644
+ def _is_done(self) -> bool:
645
+ return len(self._return_values) == self.nprocs
646
+
647
+ def _poll(self) -> Optional[RunProcsResult]:
648
+ assert self._pc is not None # assertion for mypy type checker
649
+
650
+ try:
651
+ # torch.mp.ProcessContext Throws an Exception if some/all of
652
+ # worker processes failed
653
+ # timeout < 0 checks worker status and return immediately
654
+ # Join will never return success since we use synchronize.Event to wait
655
+ # for all processes to finish.
656
+ self._pc.join(-1)
657
+
658
+ # IMPORTANT: we use multiprocessing.Queue to carry worker return values
659
+ # back to the parent, the worker process will wait before terminating
660
+ # until all the buffered items are fed by the feeder thread to the underlying
661
+ # pipe. Hence to prevent deadlocks on large return values,
662
+ # we opportunistically try queue.get on each join call
663
+ # See: https://docs.python.org/2/library/multiprocessing.html#all-platforms
664
+ for local_rank in range(0, self.nprocs):
665
+ return_queue = self._ret_vals[local_rank]
666
+ if not return_queue.empty():
667
+ # save the return values temporarily into a member var
668
+ self._return_values[local_rank] = return_queue.get()
669
+
670
+ if self._is_done():
671
+ # we should ALWAYS have ALL the return values when all the processes are done
672
+ self._worker_finished_event.set()
673
+ # Wait untill all processes are finished. At this point workers finished executing
674
+ # user function
675
+ self._pc.join()
676
+ _validate_full_rank(
677
+ self._return_values, self.nprocs, "return_value queue"
678
+ )
679
+ self.close()
680
+ return RunProcsResult(
681
+ return_values=self._return_values,
682
+ stdouts=self.stdouts,
683
+ stderrs=self.stderrs,
684
+ )
685
+ else:
686
+ return None
687
+ except (mp.ProcessRaisedException, mp.ProcessExitedException) as e:
688
+ failed_local_rank = e.error_index
689
+
690
+ # entrypoint for MultiprocessContext will always be a Callable
691
+ fn_name = self.entrypoint.__qualname__ # type: ignore[union-attr]
692
+ failed_proc = self._pc.processes[failed_local_rank]
693
+ error_filepath = self.error_files[failed_local_rank]
694
+
695
+ log.exception(
696
+ "failed (exitcode: %s)"
697
+ " local_rank: %s (pid: %s)"
698
+ " of fn: %s (start_method: %s)",
699
+ failed_proc.exitcode,
700
+ failed_local_rank, e.pid,
701
+ fn_name, self.start_method,
702
+ )
703
+
704
+ self.close()
705
+ return RunProcsResult(
706
+ failures={
707
+ failed_local_rank: ProcessFailure(
708
+ local_rank=failed_local_rank,
709
+ pid=e.pid,
710
+ exitcode=failed_proc.exitcode,
711
+ error_file=error_filepath,
712
+ )
713
+ },
714
+ stdouts=self.stdouts,
715
+ stderrs=self.stderrs,
716
+ )
717
+
718
+ def pids(self) -> Dict[int, int]:
719
+ assert self._pc is not None # assertion for mypy type checking
720
+ return dict(enumerate(self._pc.pids()))
721
+
722
+ def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
723
+ if not self._pc:
724
+ return
725
+ for proc in self._pc.processes:
726
+ if proc.is_alive():
727
+ log.warning("Closing process %s via signal %s", proc.pid, death_sig.name)
728
+ try:
729
+ os.kill(proc.pid, death_sig)
730
+ except ProcessLookupError:
731
+ # If the process exited because of some reason,
732
+ # `ProcessLookupError` will be raised, it is safe to ignore it.
733
+ pass
734
+ end = time.monotonic() + timeout
735
+ for proc in self._pc.processes:
736
+ time_to_wait = end - time.monotonic()
737
+ if time_to_wait <= 0:
738
+ break
739
+ proc.join(time_to_wait)
740
+ for proc in self._pc.processes:
741
+ if proc.is_alive():
742
+ log.warning(
743
+ "Unable to shutdown process %s via %s, forcefully exiting via %s",
744
+ proc.pid, death_sig, _get_kill_signal()
745
+ )
746
+ try:
747
+ os.kill(proc.pid, _get_kill_signal())
748
+ except ProcessLookupError:
749
+ # If the process exited because of some reason,
750
+ # `ProcessLookupError` will be raised, it is safe to ignore it.
751
+ pass
752
+ proc.join()
753
+
754
+ class SubprocessContext(PContext):
755
+ """``PContext`` holding worker processes invoked as a binary."""
756
+
757
+ def __init__(
758
+ self,
759
+ name: str,
760
+ entrypoint: str,
761
+ args: Dict[int, Tuple],
762
+ envs: Dict[int, Dict[str, str]],
763
+ logs_specs: LogsSpecs,
764
+ log_line_prefixes: Optional[Dict[int, str]] = None,
765
+
766
+ ):
767
+ super().__init__(
768
+ name,
769
+ entrypoint,
770
+ args,
771
+ envs,
772
+ logs_specs,
773
+ log_line_prefixes,
774
+ )
775
+
776
+ # state vector; _vdone[local_rank] -> is local_rank finished or not
777
+ self._running_local_ranks: Set[int] = set(range(self.nprocs))
778
+ self._failures: Dict[int, ProcessFailure] = {}
779
+ self.subprocess_handlers: Dict[int, SubprocessHandler] = {}
780
+
781
+ def _start(self):
782
+ if self.subprocess_handlers:
783
+ raise ValueError(
784
+ "The subprocess handlers already initialized. Most likely the start method got called twice."
785
+ )
786
+ self.subprocess_handlers = {
787
+ local_rank: get_subprocess_handler(
788
+ entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str
789
+ args=self.args[local_rank],
790
+ env=self.envs[local_rank],
791
+ stdout=self.stdouts[local_rank],
792
+ stderr=self.stderrs[local_rank],
793
+ local_rank_id=local_rank,
794
+ )
795
+ for local_rank in range(self.nprocs)
796
+ }
797
+
798
+ def _poll(self) -> Optional[RunProcsResult]:
799
+ done_local_ranks = set()
800
+ for local_rank in self._running_local_ranks:
801
+ handler = self.subprocess_handlers[local_rank]
802
+ exitcode = handler.proc.poll()
803
+ if exitcode is not None:
804
+ done_local_ranks.add(local_rank)
805
+ if exitcode != 0: # failed or signaled
806
+ self._failures[local_rank] = ProcessFailure(
807
+ local_rank=local_rank,
808
+ pid=handler.proc.pid,
809
+ exitcode=exitcode,
810
+ error_file=self.error_files[local_rank],
811
+ )
812
+ # else: --> succeeded; nothing to do
813
+
814
+ self._running_local_ranks.difference_update(done_local_ranks)
815
+
816
+ # if ALL procs are finished or ANY have failed
817
+ if not self._running_local_ranks or self._failures:
818
+ self.close() # terminate all running procs
819
+ result = RunProcsResult(
820
+ failures=self._failures,
821
+ stdouts=self.stdouts,
822
+ stderrs=self.stderrs,
823
+ )
824
+ if result.is_failed():
825
+ first_failure = min(result.failures.values(), key=lambda f: f.timestamp)
826
+ log.error(
827
+ "failed (exitcode: %s)"
828
+ " local_rank: %s (pid: %s)"
829
+ " of binary: %s",
830
+ first_failure.exitcode, first_failure.local_rank, first_failure.pid, self.entrypoint
831
+ )
832
+ else:
833
+ # Populate return with dummy values. This provides consistency with MultiprocessingHandler
834
+ result.return_values = dict.fromkeys(range(self.nprocs))
835
+
836
+ return result
837
+ else: # there are no failures and procs still running
838
+ return None
839
+
840
+ def pids(self) -> Dict[int, int]:
841
+ return {
842
+ local_rank: sh.proc.pid
843
+ for local_rank, sh in self.subprocess_handlers.items()
844
+ }
845
+
846
+ def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
847
+ if not self.subprocess_handlers:
848
+ return
849
+ for handler in self.subprocess_handlers.values():
850
+ if handler.proc.poll() is None:
851
+ log.warning(
852
+ "Sending process %s closing signal %s", handler.proc.pid, death_sig.name
853
+ )
854
+ handler.close(death_sig=death_sig)
855
+ end = time.monotonic() + timeout
856
+ for handler in self.subprocess_handlers.values():
857
+ time_to_wait = end - time.monotonic()
858
+ if time_to_wait <= 0:
859
+ break
860
+ try:
861
+ handler.proc.wait(time_to_wait)
862
+ except subprocess.TimeoutExpired:
863
+ # Ignore the timeout expired exception, since
864
+ # the child process will be forcefully terminated via SIGKILL
865
+ pass
866
+ for handler in self.subprocess_handlers.values():
867
+ if handler.proc.poll() is None:
868
+ log.warning(
869
+ "Unable to shutdown process %s via %s, forcefully exiting via %s",
870
+ handler.proc.pid, death_sig, _get_kill_signal()
871
+ )
872
+ handler.close(death_sig=_get_kill_signal())
873
+ handler.proc.wait()
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import logging
10
+ import os
11
+ import time
12
+ from concurrent.futures._base import Future
13
+ from concurrent.futures.thread import ThreadPoolExecutor
14
+ from threading import Event
15
+ from typing import Dict, List, Optional, TextIO
16
+
17
+ __all__ = ["tail_logfile", "TailLog"]
18
+
19
+ log = logging.getLogger(__name__)
20
+
21
+
22
+ def tail_logfile(
23
+ header: str, file: str, dst: TextIO, finished: Event, interval_sec: float
24
+ ):
25
+
26
+ while not os.path.exists(file):
27
+ if finished.is_set():
28
+ return
29
+ time.sleep(interval_sec)
30
+
31
+ with open(file, errors="replace") as fp:
32
+ while True:
33
+ line = fp.readline()
34
+
35
+ if line:
36
+ dst.write(f"{header}{line}")
37
+ else: # reached EOF
38
+ if finished.is_set():
39
+ # log line producer is finished
40
+ break
41
+ else:
42
+ # log line producer is still going
43
+ # wait for a bit before looping again
44
+ time.sleep(interval_sec)
45
+
46
+
47
+ class TailLog:
48
+ """
49
+ Tail the given log files.
50
+
51
+ The log files do not have to exist when the ``start()`` method is called. The tail-er will gracefully wait until
52
+ the log files are created by the producer and will tail the contents of the
53
+ log files until the ``stop()`` method is called.
54
+
55
+ .. warning:: ``TailLog`` will wait indefinitely for the log file to be created!
56
+
57
+ Each log file's line will be suffixed with a header of the form: ``[{name}{idx}]:``,
58
+ where the ``name`` is user-provided and ``idx`` is the index of the log file
59
+ in the ``log_files`` mapping. ``log_line_prefixes`` can be used to override the
60
+ header for each log file.
61
+
62
+ Usage:
63
+
64
+ ::
65
+
66
+ log_files = {0: "/tmp/0_stdout.log", 1: "/tmp/1_stdout.log"}
67
+ tailer = TailLog("trainer", log_files, sys.stdout).start()
68
+ # actually run the trainers to produce 0_stdout.log and 1_stdout.log
69
+ run_trainers()
70
+ tailer.stop()
71
+
72
+ # once run_trainers() start writing the ##_stdout.log files
73
+ # the tailer will print to sys.stdout:
74
+ # >>> [trainer0]:log_line1
75
+ # >>> [trainer1]:log_line1
76
+ # >>> [trainer0]:log_line2
77
+ # >>> [trainer0]:log_line3
78
+ # >>> [trainer1]:log_line2
79
+
80
+ .. note:: Due to buffering log lines between files may not necessarily
81
+ be printed out in order. You should configure your application's
82
+ logger to suffix each log line with a proper timestamp.
83
+
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ name: str,
89
+ log_files: Dict[int, str],
90
+ dst: TextIO,
91
+ log_line_prefixes: Optional[Dict[int, str]] = None,
92
+ interval_sec: float = 0.1,
93
+ ):
94
+ n = len(log_files)
95
+ self._threadpool = None
96
+ if n > 0:
97
+ self._threadpool = ThreadPoolExecutor(
98
+ max_workers=n,
99
+ thread_name_prefix=f"{self.__class__.__qualname__}_{name}",
100
+ )
101
+
102
+ self._name = name
103
+ self._dst = dst
104
+ self._log_files = log_files
105
+ self._log_line_prefixes = log_line_prefixes
106
+ self._finished_events: Dict[int, Event] = {
107
+ local_rank: Event() for local_rank in log_files.keys()
108
+ }
109
+ self._futs: List[Future] = []
110
+ self._interval_sec = interval_sec
111
+ self._stopped = False
112
+
113
+ def start(self) -> "TailLog":
114
+ if not self._threadpool:
115
+ return self
116
+
117
+ for local_rank, file in self._log_files.items():
118
+ header = f"[{self._name}{local_rank}]:"
119
+ if self._log_line_prefixes and local_rank in self._log_line_prefixes:
120
+ header = self._log_line_prefixes[local_rank]
121
+ self._futs.append(
122
+ self._threadpool.submit(
123
+ tail_logfile,
124
+ header=header,
125
+ file=file,
126
+ dst=self._dst,
127
+ finished=self._finished_events[local_rank],
128
+ interval_sec=self._interval_sec,
129
+ )
130
+ )
131
+ return self
132
+
133
+ def stop(self) -> None:
134
+ for finished in self._finished_events.values():
135
+ finished.set()
136
+
137
+ for local_rank, f in enumerate(self._futs):
138
+ try:
139
+ f.result()
140
+ except Exception as e:
141
+ log.error(
142
+ "error in log tailor for %s%s. %s: %s",
143
+ self._name, local_rank,
144
+ e.__class__.__qualname__, e,
145
+ )
146
+
147
+ if self._threadpool:
148
+ self._threadpool.shutdown(wait=True)
149
+
150
+ self._stopped = True
151
+
152
+ def stopped(self) -> bool:
153
+ return self._stopped