applied-ai-018 commited on
Commit
256625a
·
verified ·
1 Parent(s): 127be38

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py +94 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/contract.py +194 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py +2 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py +52 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py +217 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py +151 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py +144 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py +438 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py +506 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py +246 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py +246 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py +133 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py +44 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py +333 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py +125 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py +164 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py +95 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py +116 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py +159 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py +108 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py +490 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py +11 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (379 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc ADDED
Binary file (3.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc ADDED
Binary file (5.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc ADDED
Binary file (4.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager, nullcontext
2
+ from typing import Any, Tuple
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.utils.checkpoint import (
7
+ _checkpoint_without_reentrant_generator,
8
+ _DEFAULT_DETERMINISM_MODE,
9
+ )
10
+
11
+ from .contract import contract
12
+
13
+
14
+ @contextmanager
15
+ def _no_hook(module: nn.Module):
16
+ r"""
17
+ Disable hooks installed by checkpoint to avoid unintentional recursion
18
+ during backward recomputation.
19
+ """
20
+ orig_enable_hook = checkpoint.state(module).enable_hook
21
+ checkpoint.state(module).enable_hook = False
22
+ try:
23
+ yield
24
+ finally:
25
+ checkpoint.state(module).enable_hook = orig_enable_hook
26
+
27
+
28
+ @contract()
29
+ def checkpoint(module: nn.Module) -> nn.Module:
30
+ r"""
31
+ This is a composable activation checkpointing API. Unlike functional
32
+ activation checkpointing APIs, this one does not require changing model
33
+ source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs,
34
+ this one does not modify model structure or fully-qualified names either.
35
+ Under the hood, it registers activation checkpointing logic as pre- and
36
+ post-forward hooks. Hence, this API can be easily applied to any model or
37
+ sub-modules in the model.
38
+
39
+ Args:
40
+ module (nn.Module): the target model or sub-module to apply activation
41
+ checkpointing.
42
+
43
+ Example::
44
+ >>> # xdoctest: +SKIP
45
+ >>> import torch.nn as nn
46
+ >>>
47
+ >>> class MyModel(nn.Module):
48
+ >>> def __init__(self):
49
+ >>> super().__init__()
50
+ >>> self.l1 = nn.Linear(10, 10)
51
+ >>> self.l2 = nn.Linear(10, 10)
52
+ >>>
53
+ >>> def forward(self, x):
54
+ >>> return self.l2(self.l1(x))
55
+ >>>
56
+ >>> model = MyModel()
57
+ >>> checkpoint(model.l1) # apply activation checkpointing only to l1
58
+ >>> model(torch.zeros(2, 10)).sum().backward()
59
+
60
+ """
61
+ torch._C._log_api_usage_once("torch.distributed.checkpoint")
62
+
63
+ def forward_pre_hook(module: nn.Module, inputs: Tuple[Any, ...]) -> None:
64
+ if checkpoint.state(module).enable_hook:
65
+
66
+ def context_fns():
67
+ return nullcontext(), _no_hook(module)
68
+
69
+ checkpoint.state(
70
+ module
71
+ )._ac_generator = _checkpoint_without_reentrant_generator(
72
+ module, True, context_fns, _DEFAULT_DETERMINISM_MODE, False, *inputs
73
+ )
74
+ next(checkpoint.state(module)._ac_generator)
75
+
76
+ def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any:
77
+ if checkpoint.state(module).enable_hook:
78
+ try:
79
+ next(checkpoint.state(module)._ac_generator)
80
+ except StopIteration:
81
+ pass
82
+ else:
83
+ raise RuntimeError(
84
+ "Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!"
85
+ )
86
+
87
+ # Ensure that we no longer hold on to the generator. always_call=True helps ensure we
88
+ # clear this even in the case of exception in fwd pass.
89
+ checkpoint.state(module)._ac_generator = None
90
+
91
+ checkpoint.state(module).enable_hook = True
92
+ module.register_forward_pre_hook(forward_pre_hook)
93
+ module.register_forward_hook(forward_hook, prepend=True, always_call=True)
94
+ return module
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/contract.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ from collections import OrderedDict
3
+ from functools import wraps
4
+ from typing import Callable, Dict, List, Optional, Type
5
+
6
+ import torch.nn as nn
7
+ from torch.distributed._composable_state import _State
8
+
9
+
10
+ def generate_state_key(string="__composable_api_state_key"):
11
+ return f"{string}_{str(uuid.uuid4())}"
12
+
13
+
14
+ STATE_KEY = generate_state_key()
15
+ REGISTRY_KEY = generate_state_key()
16
+
17
+
18
+ # TODO: we can add additional info to RegistryItem to share across APIs. E.g.,
19
+ # we can add args and kwargs here, and then we can detect whether fully_shard
20
+ # is combined with reentrant activation checkpointing and error out with a clear
21
+ # message.
22
+ class RegistryItem:
23
+ pass
24
+
25
+
26
+ def contract(state_cls: Type[_State] = _State):
27
+ r"""
28
+ Decorate a function as a composable distributed API, where the first
29
+ argument of the function must be an :class:`nn.Module` instance. The
30
+ decorator verifies that the wrapped function does not modify parameter,
31
+ buffer or sub-module fully-qualified names (FQN).
32
+
33
+ When a function ``func`` is decorated by ``@contract()``, a
34
+ ``.state(module: nn.Module)`` method will be installed to the decorated
35
+ function. Then you can retrieve and modify the state on a module by calling
36
+ ``func.state(module)``.
37
+
38
+ Example::
39
+ >>> # xdoctest: +SKIP
40
+ >>> import torch.nn as nn
41
+ >>>
42
+ >>> class MyModel(nn.Module):
43
+ >>> def __init__(self):
44
+ >>> super().__init__()
45
+ >>> self.l1 = nn.Linear(10, 10)
46
+ >>> self.l2 = nn.Linear(10, 10)
47
+ >>>
48
+ >>> def forward(self, x):
49
+ >>> return self.l2(self.l1(x))
50
+ >>>
51
+ >>> @contract()
52
+ >>> def my_feature(module: nn.Module) -> nn.Module:
53
+ >>> my_feature.state(module).some_state = "any value"
54
+ >>> return module
55
+ >>>
56
+ >>> model = MyModel()
57
+ >>> my_feature(model.l1)
58
+ >>> assert my_feature.state(model.l1).some_state == "any value"
59
+ >>> my_feature(model.l2)
60
+ >>> model(torch.randn(2, 10)).sum().backward()
61
+ """
62
+
63
+ # wraps will make functions decorated with contract() pickleable - needed for integration with torch.package
64
+ @wraps(state_cls)
65
+ def inner(func):
66
+ @wraps(func)
67
+ def wrapper(module: nn.Module, *args, **kwargs) -> Optional[nn.Module]:
68
+ # get existing global states
69
+ default_all_state: Dict[Callable, _State] = OrderedDict()
70
+ all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload]
71
+ STATE_KEY, default_all_state
72
+ )
73
+ assert isinstance(
74
+ all_state, dict
75
+ ), "Distributed composable API states corrupted"
76
+
77
+ # get global registry
78
+ default_registry: Dict[str, RegistryItem] = OrderedDict()
79
+ registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload]
80
+ REGISTRY_KEY, default_registry
81
+ )
82
+
83
+ assert isinstance(
84
+ registry, dict
85
+ ), "Distributed composable API registry corrupted"
86
+
87
+ # make sure the API func has not been applied to the input module yet.
88
+ assert func not in all_state and func.__name__ not in registry, (
89
+ "Each distinct composable distributed API can only be applied to a "
90
+ f"module once. {func.__name__} has already been applied to the "
91
+ f"following module.\n{module}"
92
+ )
93
+
94
+ # install states specific to the wrapped ``func``
95
+ all_state.setdefault(func, state_cls())
96
+ # register ``func`` in the global registry by name
97
+ registry.setdefault(func.__name__, RegistryItem())
98
+
99
+ orig_named_params = OrderedDict(module.named_parameters())
100
+ orig_named_buffers = OrderedDict(
101
+ module.named_buffers(remove_duplicate=False)
102
+ )
103
+ orig_named_modules = OrderedDict(
104
+ module.named_modules(remove_duplicate=False)
105
+ )
106
+
107
+ updated = func(module, *args, **kwargs)
108
+
109
+ if updated is None:
110
+ updated = module
111
+
112
+ new_named_params = OrderedDict(updated.named_parameters())
113
+ new_named_buffers = OrderedDict(
114
+ updated.named_buffers(remove_duplicate=False)
115
+ )
116
+ new_named_modules = OrderedDict(
117
+ updated.named_modules(remove_duplicate=False)
118
+ )
119
+
120
+ assert isinstance(updated, nn.Module), (
121
+ "Output of composable distributed APIs must be either None or "
122
+ f"nn.Module, but got {type(updated)}"
123
+ )
124
+
125
+ def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str):
126
+ if orig_fqns == new_fqns:
127
+ return
128
+
129
+ orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns)
130
+ orig_only = orig_fqn_set - new_fqn_set
131
+ new_only = new_fqn_set - orig_fqn_set
132
+ if len(orig_only) or len(new_only):
133
+ raise RuntimeError(
134
+ f"{check_key}"
135
+ "Composable distributed API implementations cannot modify "
136
+ "FQNs.\n"
137
+ f"Only in original FQNs: {orig_only},\n"
138
+ f"Only in new FQNs: {new_only}"
139
+ )
140
+ else:
141
+ raise RuntimeError(
142
+ f"{check_key}"
143
+ "Composable distributed API implementations cannot modify "
144
+ "the order of FQNs.\n"
145
+ f"Original FQNs: {orig_only}\n"
146
+ f"New FQNs: {new_only}"
147
+ )
148
+
149
+ check_fqn(
150
+ list(orig_named_params.keys()),
151
+ list(new_named_params.keys()),
152
+ "Check parameters, ",
153
+ )
154
+ check_fqn(
155
+ list(orig_named_buffers.keys()),
156
+ list(new_named_buffers.keys()),
157
+ "Check buffer, ",
158
+ )
159
+ check_fqn(
160
+ list(orig_named_modules.keys()),
161
+ list(new_named_modules.keys()),
162
+ "Check modules, ",
163
+ )
164
+
165
+ # TODO: a stricter verification should also reject changing module
166
+ # types and monkey-patching forward() method implementations.
167
+
168
+ # TODO: verify that installed distributed paradigms are compatible with
169
+ # each other.
170
+
171
+ return updated
172
+
173
+ def get_state(module: nn.Module) -> Optional[_State]:
174
+ return module.__dict__.setdefault( # type: ignore[call-overload]
175
+ STATE_KEY,
176
+ {}, # TODO(@yhcharles): this is a temporary fix, need a better way
177
+ ).get(
178
+ func
179
+ ) # type: ignore[call-overload]
180
+
181
+ wrapper.state = get_state # type: ignore[attr-defined]
182
+
183
+ return wrapper
184
+
185
+ return inner
186
+
187
+
188
+ def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]:
189
+ r"""
190
+ Get an ``OrderedDict`` of composable APIs that have been applied to the
191
+ ``module``, indexed by the API name. If no API has been applied, then this
192
+ returns ``None``.
193
+ """
194
+ return getattr(module, REGISTRY_KEY, None)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from ._fsdp_api import MixedPrecisionPolicy
2
+ from .fully_shard import FSDP, fully_shard
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+
4
+ import torch
5
+
6
+
7
+ @dataclass(frozen=True)
8
+ class MixedPrecisionPolicy:
9
+ """
10
+ This configures FSDP's mixed precision. Unlike autocast, this applies mixed
11
+ precision at the module level, not op level, which means low-precision
12
+ activations are saved for backward and high-to-low-precision casts are
13
+ incurred only at module boundaries.
14
+
15
+ FSDP works well with module-level mixed precision since it keeps the
16
+ high-precision sharded parameters in memory anyway. In other words, FSDP
17
+ does not require any extra memory to keep a high-precision copy of the
18
+ parameters for the optimizer step.
19
+
20
+ Attributes:
21
+ param_dtype (Optional[torch.dtype]): This specifies the dtype for
22
+ the unsharded parameter and hence the dtype for forward/backward
23
+ computation and the parameter all-gather. If this is ``None``, then
24
+ the unsharded parameter uses the original dtype. The optimizer step
25
+ uses the sharded parameter in the original dtype. (Default:
26
+ ``None``)
27
+ reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
28
+ gradient reduction (i.e. reduce-scatter or all-reduce). If this is
29
+ ``None`` but ``param_dtype`` is not ``None``, then the reduction
30
+ uses the compute dtype. This can be used to run gradient reduction
31
+ in full precision while using low precision for compute. (Default:
32
+ ``None``)
33
+ output_dtype (Optional[torch.dtype]): This specifies the dtype for
34
+ casting floating-point forward outputs. This can be used to
35
+ help implement cases where different modules have different mixed
36
+ precision policies. (Default: ``None``)
37
+ cast_forward_inputs (bool): This specifies whether FSDP should cast the
38
+ forward's floating-point input tensors to ``param_dtype`` or not.
39
+ """
40
+
41
+ param_dtype: Optional[torch.dtype] = None
42
+ reduce_dtype: Optional[torch.dtype] = None
43
+ output_dtype: Optional[torch.dtype] = None
44
+ cast_forward_inputs: bool = True
45
+
46
+ def __post_init__(self):
47
+ # Clamp `reduce_dtype` to `None` if no casting is required: since
48
+ # gradients are computed in `param_dtype`, if `reduce_dtype` matches,
49
+ # then we do not need extra casting
50
+ if self.param_dtype == self.reduce_dtype:
51
+ # Bypass the frozen dataclass checks
52
+ object.__setattr__(self, "reduce_dtype", None)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, NamedTuple, Optional, Tuple
2
+
3
+ import torch
4
+ import torch.distributed as dist
5
+ from torch.distributed.distributed_c10d import ReduceOp
6
+ from ._fsdp_common import (
7
+ _get_dim0_padded_size,
8
+ _raise_assert_with_print,
9
+ _to_dtype_if_needed,
10
+ )
11
+ from ._fsdp_param import FSDPParam
12
+
13
+
14
+ class AllGatherResult(NamedTuple):
15
+ all_gather_output: torch.Tensor
16
+ all_gather_event: Optional[torch.cuda.Event]
17
+ all_gather_work: Optional[dist.distributed_c10d.Work]
18
+ all_gather_input_numels: List[int]
19
+
20
+
21
+ @torch.no_grad()
22
+ def foreach_all_gather(
23
+ fsdp_params: List[FSDPParam],
24
+ group: dist.ProcessGroup,
25
+ async_op: bool,
26
+ all_gather_copy_in_stream: torch.cuda.Stream,
27
+ all_gather_stream: torch.cuda.Stream,
28
+ device: torch.device,
29
+ ) -> Optional[AllGatherResult]:
30
+ world_size, rank = group.size(), group.rank()
31
+ # - Copy in
32
+ with torch.cuda.stream(all_gather_copy_in_stream):
33
+ param_all_gather_inputs = [
34
+ fsdp_param.all_gather_input for fsdp_param in fsdp_params
35
+ ]
36
+ dtype = param_all_gather_inputs[0].dtype
37
+ if not all(t.dtype == dtype for t in param_all_gather_inputs):
38
+ raise NotImplementedError(
39
+ f"Mixed dtype not supported yet: {[t.dtype for t in param_all_gather_inputs]}"
40
+ )
41
+ inp_split_sizes = [inp.numel() for inp in param_all_gather_inputs]
42
+ all_gather_input_numel = sum(inp_split_sizes)
43
+ all_gather_output = torch.empty(
44
+ (all_gather_input_numel * world_size,), dtype=dtype, device=device
45
+ )
46
+ all_gather_input = all_gather_output.narrow(
47
+ 0, all_gather_input_numel * rank, all_gather_input_numel
48
+ )
49
+ foreach_copy_dsts = torch.split(all_gather_input, inp_split_sizes)
50
+ torch._foreach_copy_(foreach_copy_dsts, param_all_gather_inputs)
51
+ del param_all_gather_inputs
52
+ all_gather_stream.wait_stream(all_gather_copy_in_stream)
53
+ with torch.cuda.stream(all_gather_stream):
54
+ # - All-gather
55
+ all_gather_work = dist.all_gather_into_tensor(
56
+ output_tensor=all_gather_output,
57
+ input_tensor=all_gather_input,
58
+ group=group,
59
+ async_op=async_op,
60
+ )
61
+ all_gather_event = all_gather_stream.record_event()
62
+ return AllGatherResult(
63
+ all_gather_output, all_gather_event, all_gather_work, inp_split_sizes
64
+ )
65
+
66
+
67
+ @torch.no_grad()
68
+ def foreach_all_gather_copy_out(
69
+ all_gather_result: AllGatherResult,
70
+ fsdp_params: List[FSDPParam],
71
+ group: dist.ProcessGroup,
72
+ ) -> None:
73
+ (
74
+ all_gather_output,
75
+ all_gather_event,
76
+ all_gather_work,
77
+ all_gather_input_numels,
78
+ ) = all_gather_result
79
+ if all_gather_event is not None: # sync op
80
+ torch.cuda.current_stream().wait_event(all_gather_event)
81
+ if all_gather_work is not None: # async op
82
+ all_gather_work.wait()
83
+ world_size = group.size()
84
+ dtype, device = all_gather_output.dtype, all_gather_output.device
85
+ for all_gather_input_numel, fsdp_param in zip(all_gather_input_numels, fsdp_params):
86
+ fsdp_param.init_all_gather_output(
87
+ all_gather_input_numel, world_size, dtype, device
88
+ ) # no-op after 1st call
89
+ fsdp_param.alloc_all_gather_output()
90
+ all_gather_output = all_gather_output.view(world_size, -1)
91
+ out = [
92
+ fsdp_param.all_gather_output.view(world_size, -1) for fsdp_param in fsdp_params
93
+ ]
94
+ torch.split_with_sizes_copy(
95
+ all_gather_output, all_gather_input_numels, dim=1, out=out
96
+ )
97
+
98
+
99
+ @torch.no_grad()
100
+ def foreach_reduce_scatter(
101
+ fsdp_params: List[FSDPParam],
102
+ unsharded_grads: List[torch.Tensor],
103
+ group: dist.ProcessGroup,
104
+ reduce_scatter_stream: torch.cuda.Stream,
105
+ orig_dtype: torch.dtype,
106
+ reduce_dtype: Optional[torch.dtype],
107
+ device: torch.device,
108
+ divide_factors: Optional[Tuple[float, float]],
109
+ ) -> torch.cuda.Event:
110
+ """
111
+ ``unsharded_grads`` owns the references to the gradients computed by
112
+ autograd, so clearing the list frees the gradients.
113
+ """
114
+ grad_dtypes = {grad.dtype for grad in unsharded_grads}
115
+ if len(grad_dtypes) != 1:
116
+ # Check this at runtime since it could be a real runtime error if e.g.
117
+ # fp8 weights do not produce the correct higher precision gradients
118
+ _raise_assert_with_print(
119
+ f"FSDP reduce-scatter expects uniform gradient dtype but got {grad_dtypes}"
120
+ )
121
+ grad_dtype = unsharded_grads[0].dtype
122
+ reduce_dtype = reduce_dtype or grad_dtype
123
+ world_size = group.size()
124
+ padded_unsharded_sizes = tuple(
125
+ _get_dim0_padded_size(grad.size(), world_size) for grad in unsharded_grads
126
+ )
127
+ reduce_scatter_input_numel = sum(s.numel() for s in padded_unsharded_sizes)
128
+ reduce_scatter_output_numel = reduce_scatter_input_numel // world_size
129
+ current_stream = torch.cuda.current_stream()
130
+ reduce_scatter_stream.wait_stream(current_stream)
131
+ with torch.cuda.stream(reduce_scatter_stream):
132
+ reduce_scatter_input = torch.empty(
133
+ (reduce_scatter_input_numel,), dtype=reduce_dtype, device=device
134
+ )
135
+ foreach_reduce_scatter_copy_in(
136
+ unsharded_grads, reduce_scatter_input, world_size
137
+ )
138
+ # Only after the copy-in finishes can we free the gradients, which were
139
+ # computed in the default stream
140
+ current_stream.wait_stream(reduce_scatter_stream)
141
+ unsharded_grads.clear()
142
+ reduce_scatter_output = reduce_scatter_input.new_empty(
143
+ (reduce_scatter_output_numel,)
144
+ )
145
+ _reduce_scatter(
146
+ reduce_scatter_output, reduce_scatter_input, group, divide_factors
147
+ )
148
+ reduce_scatter_output = _to_dtype_if_needed(reduce_scatter_output, orig_dtype)
149
+ # - View out and accumulate
150
+ flat_grad_offset = 0 # [0, reduce_scatter_output_numel - 1]
151
+ for padded_unsharded_size, fsdp_param in zip(
152
+ padded_unsharded_sizes, fsdp_params
153
+ ):
154
+ new_sharded_grad = torch.as_strided(
155
+ reduce_scatter_output,
156
+ size=fsdp_param.sharded_size,
157
+ stride=fsdp_param.contiguous_sharded_stride,
158
+ storage_offset=flat_grad_offset,
159
+ )
160
+ to_accumulate_grad = fsdp_param.sharded_param.grad is not None
161
+ new_sharded_dtensor_grad = fsdp_param.to_sharded_dtensor(new_sharded_grad)
162
+ if to_accumulate_grad:
163
+ fsdp_param.sharded_param.grad += new_sharded_dtensor_grad
164
+ else:
165
+ fsdp_param.sharded_param.grad = new_sharded_dtensor_grad
166
+ padded_sharded_numel = padded_unsharded_size.numel() // world_size
167
+ flat_grad_offset += padded_sharded_numel
168
+ reduce_scatter_view_out_event = reduce_scatter_stream.record_event()
169
+ # The RS output is allocated in the RS stream and used in the default
170
+ # stream (for optimizer). To ensure its memory is not reused for later
171
+ # RSs, we do not need extra synchronization since the sharded parameters
172
+ # hold refs through the end of backward.
173
+ return reduce_scatter_view_out_event
174
+
175
+
176
+ def foreach_reduce_scatter_copy_in(
177
+ unsharded_grads: List[torch.Tensor],
178
+ reduce_scatter_input: torch.Tensor,
179
+ world_size: int,
180
+ ) -> None:
181
+ grad_views: List[torch.Tensor] = []
182
+ grads_to_copy: List[torch.Tensor] = []
183
+ padded_grad_slices: List[torch.Tensor] = []
184
+ for grad in unsharded_grads:
185
+ grad_size = grad.size()
186
+ dim0_padded_size = _get_dim0_padded_size(grad_size, world_size)
187
+ if dim0_padded_size != grad_size:
188
+ padded_grad = grad.new_empty(dim0_padded_size)
189
+ padded_grad_slices.append(padded_grad[: grad.size(0)])
190
+ grads_to_copy.append(grad)
191
+ grad = padded_grad
192
+ grad_views.append(grad.view(world_size, -1))
193
+ if padded_grad_slices:
194
+ torch._foreach_copy_(padded_grad_slices, grads_to_copy)
195
+ torch.cat(grad_views, dim=-1, out=reduce_scatter_input.view(world_size, -1))
196
+
197
+
198
+ def _reduce_scatter(
199
+ output: torch.Tensor,
200
+ input: torch.Tensor,
201
+ group: dist.ProcessGroup,
202
+ divide_factors: Optional[Tuple[float, float]],
203
+ ) -> None:
204
+ if divide_factors:
205
+ predivide_factor, postdivide_factor = divide_factors
206
+ _div_if_needed(input, predivide_factor)
207
+ dist.reduce_scatter_tensor(output, input, group=group)
208
+ _div_if_needed(output, postdivide_factor)
209
+ else:
210
+ # Using NCCL's reduce-scatter to do the division by world size saves
211
+ # extra memory read/write from a separate division kernel
212
+ dist.reduce_scatter_tensor(output, input, op=ReduceOp.AVG, group=group)
213
+
214
+
215
+ def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None:
216
+ if div_factor > 1:
217
+ tensor.div_(div_factor)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import traceback
3
+
4
+ from dataclasses import dataclass
5
+ from enum import auto, Enum
6
+ from typing import Any, cast, List, Optional, Tuple
7
+
8
+ import torch
9
+ import torch.distributed as dist
10
+ import torch.nn as nn
11
+ from torch.distributed._composable.contract import _get_registry
12
+ from torch.distributed._tensor import DeviceMesh, DTensor, Placement
13
+
14
+
15
+ @dataclass
16
+ class DataParallelMeshInfo:
17
+ mesh: DeviceMesh
18
+ shard_mesh_dim: Optional[int] = None
19
+ replicate_mesh_dim: Optional[int] = None
20
+
21
+ def __post_init__(self):
22
+ if self.shard_mesh_dim is None and self.replicate_mesh_dim is None:
23
+ raise AssertionError(
24
+ "At least one of shard_mesh_dim and replicate_mesh_dim must not be None"
25
+ )
26
+
27
+
28
+ @dataclass
29
+ class FSDPMeshInfo(DataParallelMeshInfo):
30
+ def __post_init__(self):
31
+ super().__post_init__()
32
+ if self.shard_mesh_dim is None:
33
+ raise AssertionError("Expects non-None shard_mesh_dim")
34
+ self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim)
35
+ self.shard_process_group = cast(
36
+ dist.ProcessGroup, self.mesh.get_group(self.shard_mesh_dim)
37
+ )
38
+ self.shard_mesh_rank: int = self.shard_process_group.rank()
39
+
40
+
41
+ @dataclass
42
+ class DDPMeshInfo(DataParallelMeshInfo):
43
+ def __post_init__(self):
44
+ super().__post_init__()
45
+ if self.replicate_mesh_dim is None:
46
+ raise AssertionError("Expects non-None replicate_mesh_dim")
47
+ self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim)
48
+ self.replicate_process_group = cast(
49
+ dist.ProcessGroup, self.mesh.get_group(self.replicate_mesh_dim)
50
+ )
51
+ self.replicate_mesh_rank: int = self.replicate_process_group.rank()
52
+
53
+
54
+ @dataclass
55
+ class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo):
56
+ def __post_init__(self):
57
+ # Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo`
58
+ super().__post_init__()
59
+
60
+
61
+ class TrainingState(Enum):
62
+ """Describes the training state of one FSDP state / parameter group."""
63
+
64
+ # Transition to forward starting pre-forward until post-forward
65
+ FORWARD = auto()
66
+ # Transition to pre-backward when unsharding in backward
67
+ PRE_BACKWARD = auto()
68
+ # Transition to post-backward when resharding and reducing gradients
69
+ POST_BACKWARD = auto()
70
+ # Idle before/after forward or before pre-backward/after post-backward
71
+ IDLE = auto()
72
+
73
+
74
+ def _raise_assert_with_print(*args: Any, **kwargs: Any):
75
+ print(f"[Rank {dist.get_rank()}] ", end="")
76
+ print(*args, **kwargs)
77
+ traceback.print_stack()
78
+ raise AssertionError(*args, **kwargs)
79
+
80
+
81
+ def _is_composable_with_fsdp(module: nn.Module) -> bool:
82
+ registry = _get_registry(module)
83
+ if registry is None:
84
+ return True
85
+ # Registry keys by function name
86
+ return "replicate" not in registry
87
+
88
+
89
+ def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size:
90
+ padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor
91
+ return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:])
92
+
93
+
94
+ def _chunk_with_empty(
95
+ tensor: torch.Tensor, num_chunks: int, dim: int
96
+ ) -> List[torch.Tensor]:
97
+ chunks = list(torch.chunk(tensor, num_chunks, dim=dim))
98
+ while len(chunks) < num_chunks:
99
+ chunks.append(chunks[0].new_empty(0))
100
+ return chunks
101
+
102
+
103
+ def _get_dim0_chunked_size(
104
+ chunk: torch.Tensor, unchunked_size: torch.Size
105
+ ) -> torch.Size:
106
+ if chunk.numel() > 0:
107
+ return chunk.size()
108
+ # For 0 numel, we need to preserve trailing dims for DTensor APIs
109
+ return cast(torch.Size, torch.Size([0]) + unchunked_size[1:])
110
+
111
+
112
+ def _from_local_no_grad(
113
+ local_tensor: torch.Tensor,
114
+ device_mesh: DeviceMesh,
115
+ placements: Tuple[Placement, ...],
116
+ global_size: torch.Size,
117
+ global_stride: Tuple[int, ...],
118
+ ) -> DTensor:
119
+ """
120
+ This method is similar to ``DTensor.from_local()`` except it avoids some
121
+ CPU overhead by avoiding default args and not being differentiable.
122
+ """
123
+ return DTensor(
124
+ # Use the local tensor directly instead of constructing a new tensor
125
+ # variable, e.g. with `view_as()`, since this is not differentiable
126
+ local_tensor,
127
+ device_mesh,
128
+ placements,
129
+ shape=global_size,
130
+ dtype=local_tensor.dtype,
131
+ requires_grad=local_tensor.requires_grad,
132
+ stride=global_stride,
133
+ )
134
+
135
+
136
+ def _to_dtype_if_needed(
137
+ tensor: torch.Tensor, dtype: Optional[torch.dtype]
138
+ ) -> torch.Tensor:
139
+ if dtype is not None and tensor.dtype != dtype:
140
+ return tensor.to(dtype)
141
+ return tensor
142
+
143
+
144
+ def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor:
145
+ if (
146
+ not isinstance(x, torch.Tensor)
147
+ or not torch.is_floating_point(x)
148
+ or x.dtype == dtype
149
+ ):
150
+ return x
151
+ return x.to(dtype)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import List, Optional, Set, Tuple, Union
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.nn as nn
7
+
8
+ from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh
9
+ from torch.distributed.device_mesh import _get_device_handle
10
+ from ._fsdp_common import _is_composable_with_fsdp, FSDPMeshInfo, HSDPMeshInfo
11
+ from ._fsdp_state import _get_module_fsdp_state
12
+
13
+
14
+ def _get_post_forward_mesh_info(
15
+ reshard_after_forward: Union[bool, int], mesh_info: FSDPMeshInfo
16
+ ) -> Optional[FSDPMeshInfo]:
17
+ shard_mesh_size = mesh_info.shard_mesh_size
18
+ if not isinstance(reshard_after_forward, (bool, int)):
19
+ raise ValueError(
20
+ "reshard_after_forward should be a bool or an int representing the "
21
+ f"group size to reshard to, not {reshard_after_forward}"
22
+ )
23
+ # NOTE: `isinstance(False, int)` returns `True`.
24
+ if not isinstance(reshard_after_forward, bool) and isinstance(
25
+ reshard_after_forward, int
26
+ ):
27
+ if (
28
+ reshard_after_forward < 1
29
+ or reshard_after_forward > shard_mesh_size
30
+ or shard_mesh_size % reshard_after_forward != 0
31
+ ):
32
+ raise ValueError(
33
+ "If passing reshard_after_forward as an int, it should be a "
34
+ f"factor of {shard_mesh_size}, not {reshard_after_forward}"
35
+ )
36
+ elif reshard_after_forward == 1:
37
+ reshard_after_forward = False
38
+ elif reshard_after_forward == shard_mesh_size:
39
+ reshard_after_forward = True
40
+ post_forward_mesh_info = None
41
+ if reshard_after_forward is True:
42
+ post_forward_mesh_info = mesh_info
43
+ elif reshard_after_forward is not False: # int case
44
+ # For HSDP, we can flatten the two replicate dims into the 0th dim
45
+ post_forward_mesh_tensor = mesh_info.mesh.mesh.view(-1, reshard_after_forward)
46
+ post_forward_mesh = DeviceMesh(
47
+ mesh_info.mesh.device_type, post_forward_mesh_tensor
48
+ )
49
+ post_forward_mesh_info = HSDPMeshInfo(
50
+ post_forward_mesh, shard_mesh_dim=1, replicate_mesh_dim=0
51
+ )
52
+ return post_forward_mesh_info
53
+
54
+
55
+ def _init_default_fully_shard_mesh() -> DeviceMesh:
56
+ """Default to global CUDA mesh if possible else global CPU mesh."""
57
+ if not dist.distributed_c10d.is_initialized():
58
+ dist.distributed_c10d.init_process_group()
59
+ default_pg = dist.distributed_c10d._get_default_group()
60
+ device_type = "cuda" if torch.cuda.is_available() else "cpu"
61
+ mesh = init_device_mesh(device_type, mesh_shape=(default_pg.size(),))
62
+ return mesh
63
+
64
+
65
+ def _get_device_from_mesh(mesh: DeviceMesh) -> torch.device:
66
+ if mesh.device_type == "cpu":
67
+ return torch.device("cpu")
68
+ device_handle = _get_device_handle(mesh.device_type)
69
+ return torch.device(mesh.device_type, device_handle.current_device())
70
+
71
+
72
+ def _get_managed_modules(root_module: nn.Module) -> List[nn.Module]:
73
+ modules: List[nn.Module] = []
74
+ # Track visisted modules to avoid visiting shared modules multiple times
75
+ visited_modules: Set[nn.Module] = set()
76
+
77
+ def dfs(module: nn.Module) -> None:
78
+ """
79
+ Runs a DFS to collect managed modules, not recursing into modules with
80
+ a non-composable API or ``fully_shard`` already applied.
81
+ """
82
+ if not _is_composable_with_fsdp(module):
83
+ return
84
+ elif module is not root_module and _get_module_fsdp_state(module) is not None:
85
+ return # nested `fully_shard` module
86
+ visited_modules.add(module)
87
+ for submodule in module.children():
88
+ if submodule not in visited_modules:
89
+ dfs(submodule)
90
+ modules.append(module)
91
+
92
+ dfs(root_module)
93
+ return modules
94
+
95
+
96
+ def _get_managed_states(
97
+ modules: List[nn.Module],
98
+ ) -> Tuple[List[nn.Parameter], List[torch.Tensor]]:
99
+ params: List[nn.Parameter] = []
100
+ buffers: List[torch.Tensor] = []
101
+ # Track visited parameters/buffers to avoid visiting shared parameters and
102
+ # buffers multiple times
103
+ visited_params: Set[nn.Parameter] = set()
104
+ visited_buffers: Set[torch.Tensor] = set()
105
+ for module in modules:
106
+ for param in module.parameters(recurse=False):
107
+ if param not in visited_params:
108
+ params.append(param)
109
+ visited_params.add(param)
110
+ for buffer in module.buffers(recurse=False):
111
+ if buffer not in visited_buffers:
112
+ buffers.append(buffer)
113
+ visited_buffers.add(buffer)
114
+ return params, buffers
115
+
116
+
117
+ def _move_states_to_device(
118
+ params: List[nn.Parameter],
119
+ buffers: List[torch.Tensor],
120
+ device: torch.device,
121
+ mesh_info: FSDPMeshInfo,
122
+ ) -> None:
123
+ """
124
+ We have FSDP move states to device for simpler and faster initialization
125
+ since FSDP almost always uses CUDA for training. We move parameters/buffers
126
+ rather than modules since modules to support ignoring parameters/buffers in
127
+ the future.
128
+ """
129
+ # TODO: De-duplicate with `_apply` after `swap_tensors` path lands:
130
+ # https://github.com/pytorch/pytorch/issues/115792
131
+ for tensor in itertools.chain(params, buffers):
132
+ if tensor.device == device or tensor.device.type == "meta":
133
+ # Keep meta-device tensors on meta device for deferred init
134
+ continue
135
+ if isinstance(tensor, DTensor):
136
+ if (dtensor_mesh_type := tensor._spec.mesh.device_type) != device.type:
137
+ raise ValueError(
138
+ "Requires DTensor to have mesh of the same type as the FSDP mesh "
139
+ f"but got {dtensor_mesh_type} for DTensor and {device.type} for FSDP"
140
+ )
141
+ raise AssertionError(
142
+ f"Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}"
143
+ )
144
+ tensor.data = tensor.to(device)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from enum import auto, Enum
3
+ from typing import cast, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from torch._prims_common import make_contiguous_strides_for
9
+ from torch.distributed._functional_collectives import AsyncCollectiveTensor
10
+ from torch.distributed._tensor import DTensor, Placement, Replicate, Shard
11
+ from torch.distributed._tensor.device_mesh import _mesh_resources
12
+ from torch.distributed._tensor.placement_types import DTensorSpec
13
+ from ._fsdp_api import MixedPrecisionPolicy
14
+ from ._fsdp_common import (
15
+ _chunk_with_empty,
16
+ _from_local_no_grad,
17
+ _get_dim0_chunked_size,
18
+ _raise_assert_with_print,
19
+ _to_dtype_if_needed,
20
+ FSDPMeshInfo,
21
+ HSDPMeshInfo,
22
+ )
23
+
24
+ """
25
+ [Note: FSDP tensors]
26
+ FSDP considers the following tensors:
27
+ - Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one
28
+ on the module when applying FSDP
29
+ - Sharded parameter: sharding the original parameter on dim-0 as a DTensor
30
+ over the main mesh
31
+ - All-gather input: the ``torch.Tensor`` passed to all-gather, derived from the
32
+ sharded parameter
33
+ - All-gather output: the ``torch.Tensor`` resulting from all-gathering the
34
+ all-gather input
35
+ - Unsharded parameter: parameter used for forward/backward computation, derived
36
+ from the all-gather output; autograd leaf
37
+
38
+ We define these tensors to describe the general framework that can accomodate
39
+ extensions, where:
40
+ - all-gather-input = pre-all-gather-transform(sharded-parameter)
41
+ - unsharded-parameter = post-all-gather-transform(all-gather-output)
42
+
43
+ For the default ``torch.Tensor`` case, the sharded parameter and all-gather
44
+ input share the same underlying tensor data, meaning that they can be thought
45
+ of as the same tensors. The same applies for the all-gather output and
46
+ unsharded parameter. For non-``torch.Tensor`` extensions, these equivalences
47
+ may no longer hold due to the pre/post-all-gather transforms.
48
+
49
+ [Note: FSDP and autograd]
50
+ FSDP dynamically frees and allocates the unsharded parameter. Since autograd
51
+ can pack a reference to it or a view to save for backward, we use storage
52
+ resizing to implement the freeing/allocation since that preserves the aliasing.
53
+ This implies that we construct the unsharded parameter object once and write to
54
+ it in-place thereafter. For the default ``torch.Tensor` original parameter
55
+ case, the all-gather output and unsharded parameter share the same
56
+ data, so we use storage resizing on the all-gather output.
57
+ """
58
+
59
+
60
+ class ShardedState(Enum):
61
+ """
62
+ - ``SHARDED``: The sharded parameter is registered to the module. It is the
63
+ only contributor to parameter memory.
64
+ - ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a
65
+ smaller world size. Since this data should not be used for computation,
66
+ we do not register it to the module. Users should reshard the module
67
+ before any in-place modifications. Both it and the sharded parameter
68
+ contribute to parameter memory.
69
+ - ``UNSHARDED``: The unsharded parameter is registered to the module. Both
70
+ it and the sharded parameter contribute to parameter memory.
71
+ """
72
+
73
+ SHARDED = auto()
74
+ SHARDED_POST_FORWARD = auto()
75
+ UNSHARDED = auto()
76
+
77
+
78
+ @dataclass
79
+ class ParamModuleInfo:
80
+ """
81
+ For a parameter, this stores the module and the parameter name to be able
82
+ to do a parameter swap via ``setattr(module, param_name, ...)`` or to get
83
+ the parameter via ``getattr(module, param_name)``. We additionally save
84
+ shared modules and shared parameter names to update them accordingly.
85
+ """
86
+
87
+ # Parameter names are unprefixed, e.g. "weight", not "lin.weight"
88
+ module: nn.Module
89
+ param_name: str
90
+ shared_modules: List[nn.Module] = field(default_factory=list)
91
+ shared_param_names: List[str] = field(default_factory=list)
92
+
93
+
94
+ class FSDPParam:
95
+ """
96
+ This class manages a parameter with FSDP or FSDP variants applied,
97
+ implementing dim-0 per-parameter sharding.
98
+ """
99
+
100
+ orig_dtype: torch.dtype
101
+ param_dtype: Optional[torch.dtype]
102
+ reduce_dtype: Optional[torch.dtype]
103
+ _orig_size: torch.Size # ND
104
+ _contiguous_orig_stride: Tuple[int, ...]
105
+ sharded_size: torch.Size # ND
106
+ contiguous_sharded_stride: Tuple[int, ...]
107
+ padded_sharded_param_size: torch.Size # ND
108
+ sharded_post_forward_size: torch.Size # ND
109
+ contiguous_sharded_post_forward_stride: Tuple[int, ...]
110
+ _sharded_param_data: torch.Tensor # 1D
111
+ sharded_param: nn.Parameter # ND
112
+ _sharded_post_forward_param_data: Optional[torch.Tensor] # 1D
113
+ _sharded_post_forward_param: Optional[nn.Parameter] # ND
114
+ _unsharded_param: nn.Parameter # ND
115
+ _global_placements: Tuple[Placement, ...]
116
+ _global_size: torch.Size
117
+ _global_stride: Tuple[int, ...]
118
+ # DTensor attributes (only defined for DTensor `param`):
119
+ _tp_spec: DTensorSpec
120
+
121
+ def __init__(
122
+ self,
123
+ param: nn.Parameter,
124
+ module_info: ParamModuleInfo,
125
+ mesh_info: FSDPMeshInfo,
126
+ post_forward_mesh_info: Optional[FSDPMeshInfo],
127
+ device: torch.device,
128
+ mp_policy: MixedPrecisionPolicy,
129
+ ):
130
+ self._module_info: ParamModuleInfo = module_info
131
+ self.mesh_info = mesh_info
132
+ self.post_forward_mesh_info = post_forward_mesh_info
133
+ self.device = device
134
+ self._init_sharded_param(param, device)
135
+ if self.post_forward_mesh_info:
136
+ self._init_sharded_post_forward_param_metadata(param)
137
+ self.all_gather_output = torch.empty(0)
138
+ self._param_fqn: Optional[str] = None # prefixed from root module
139
+
140
+ @torch.no_grad()
141
+ def _init_sharded_param(self, param: nn.Parameter, device: torch.device):
142
+ if param.device != device and param.device.type != "meta":
143
+ raise AssertionError(
144
+ f"Expects the parameter to already be moved to device {device} but got {param.device}"
145
+ )
146
+ # TODO: Replace the sharded DTensor parameter construction logic with
147
+ # `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101
148
+ # TODO: Simplify the following sharded parameter padding logic after
149
+ # https://github.com/pytorch/pytorch/issues/113045
150
+ self.is_dtensor = isinstance(param, DTensor)
151
+ if self.is_dtensor:
152
+ self._tp_spec = cast(DTensor, param)._spec
153
+ if (
154
+ self.mesh_info.shard_mesh_dim != 0
155
+ or self.mesh_info.replicate_mesh_dim is not None
156
+ ):
157
+ raise NotImplementedError("Using TP with HSDP is not supported")
158
+ dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh)
159
+ dp_global_mesh = _mesh_resources.get_parent_mesh(dp_mesh)
160
+ tp_global_mesh = _mesh_resources.get_parent_mesh(tp_mesh)
161
+ if dp_global_mesh != tp_global_mesh or (
162
+ dp_global_mesh is None or tp_global_mesh is None
163
+ ):
164
+ raise AssertionError(
165
+ "FSDP requires the DP and TP mesh to have the same parent mesh but got: \n"
166
+ f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}"
167
+ )
168
+ self._global_mesh = dp_global_mesh
169
+ if len(self._tp_spec.placements) != 1:
170
+ raise NotImplementedError(
171
+ f"FSDP only supports 1D TP, not {self._tp_spec.placements}"
172
+ )
173
+ global_placements: List[Placement] = [Replicate(), Replicate()]
174
+ global_dp_mesh_dim = _mesh_resources.get_parent_mesh_dim(dp_mesh)
175
+ global_tp_mesh_dim = _mesh_resources.get_parent_mesh_dim(tp_mesh)
176
+ assert global_dp_mesh_dim is not None # mypy
177
+ assert global_tp_mesh_dim is not None # mypy
178
+ # TODO: Hard code FSDP + TP; need to support HSDP + TP
179
+ global_placements[global_dp_mesh_dim] = Shard(0)
180
+ global_placements[global_tp_mesh_dim] = self._tp_spec.placements[0]
181
+ self._global_placements = tuple(global_placements)
182
+ self._global_size = param.size()
183
+ self._global_stride = param.stride()
184
+ param_data = cast(DTensor, param)._local_tensor
185
+ else:
186
+ self._global_mesh = self.mesh_info.mesh
187
+ self._global_placements = (Shard(0),)
188
+ self._global_size = param.size()
189
+ self._global_stride = param.stride()
190
+ param_data = param
191
+ self._orig_size = param_data.size()
192
+ self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size)
193
+ shard_rank = self.mesh_info.shard_mesh_rank
194
+ shard_world_size = self.mesh_info.shard_mesh_size
195
+ chunks = _chunk_with_empty(param_data, shard_world_size, dim=0)
196
+ sharded_param = chunks[shard_rank]
197
+ self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size())
198
+ self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size)
199
+ padded_sharded_size = chunks[0].size() # 0th always padded
200
+ padded_sharded_param = param_data.new_zeros(padded_sharded_size)
201
+ self.padded_sharded_param_size = padded_sharded_param.size()
202
+ if sharded_param.numel() > 0:
203
+ padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param)
204
+ self._sharded_param_data = padded_sharded_param.view(-1)
205
+ self.sharded_param = nn.Parameter(
206
+ self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)])
207
+ )
208
+ self.sharded_param.requires_grad_(param.requires_grad)
209
+ # Let `param_data` be freed normally when its ref count reaches 0 when
210
+ # the `fully_shard` call returns to allow provided parameters to alias
211
+ self._setattr_on_modules(self.sharded_param)
212
+ self.sharded_state = ShardedState.SHARDED
213
+
214
+ def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None:
215
+ mesh_info = self.post_forward_mesh_info
216
+ assert mesh_info is not None # mypy
217
+ param_data = param._local_tensor if isinstance(param, DTensor) else param
218
+ chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0)
219
+ self.sharded_post_forward_size = _get_dim0_chunked_size(
220
+ chunks[mesh_info.shard_mesh_rank], param_data.size()
221
+ )
222
+ self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for(
223
+ self.sharded_post_forward_size
224
+ )
225
+
226
+ def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy):
227
+ param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype)
228
+ self.orig_dtype = self.sharded_param.dtype
229
+ # Clamp `param_dtype` to `None` if no casting is required
230
+ if param_dtype == self.orig_dtype:
231
+ param_dtype = None
232
+ self.param_dtype = param_dtype
233
+ self.reduce_dtype = reduce_dtype
234
+ # None indicates that the mixed precision is not enabled
235
+
236
+ def init_all_gather_output(
237
+ self,
238
+ all_gather_input_numel: int,
239
+ world_size: int,
240
+ dtype: torch.dtype,
241
+ device: torch.device,
242
+ ):
243
+ if self.all_gather_output.numel() > 0:
244
+ return # already initialized
245
+ all_gather_output_size = torch.Size([all_gather_input_numel * world_size])
246
+ self.all_gather_output = torch.empty(
247
+ all_gather_output_size, dtype=dtype, device=device
248
+ )
249
+
250
+ def init_unsharded_param(self):
251
+ if hasattr(self, "_unsharded_param"):
252
+ return # already initialized
253
+ # For the default path (no post-all-gather), the all-gather output
254
+ # gives the unsharded parameter data directly
255
+ unsharded_param = torch.as_strided(
256
+ self.all_gather_output,
257
+ self._orig_size,
258
+ self._contiguous_orig_stride,
259
+ storage_offset=0,
260
+ )
261
+ if self.is_dtensor:
262
+ unsharded_param = _from_local_no_grad(
263
+ unsharded_param,
264
+ self._tp_spec.mesh,
265
+ self._tp_spec.placements,
266
+ self._global_size,
267
+ self._global_stride,
268
+ )
269
+ self._unsharded_param = nn.Parameter(unsharded_param)
270
+ self._unsharded_param.requires_grad_(self.sharded_param.requires_grad)
271
+
272
+ def to_sharded(self) -> None:
273
+ self._setattr_on_modules(self.sharded_param)
274
+ self.free_all_gather_output()
275
+ self.sharded_state = ShardedState.SHARDED
276
+
277
+ def to_sharded_post_forward(self) -> None:
278
+ if self.is_dtensor:
279
+ raise NotImplementedError(
280
+ "Resharding to smaller mesh with TP is not supported yet"
281
+ )
282
+ self._assert_in_states(ShardedState.UNSHARDED)
283
+ assert self.post_forward_mesh_info is not None # mypy
284
+ shard_world_size = self.post_forward_mesh_info.shard_mesh_size
285
+ if (numel := self.all_gather_output.numel()) % shard_world_size != 0:
286
+ _raise_assert_with_print(
287
+ f"All-gather output size ({numel}) must be divisible by the shard "
288
+ f"world size ({shard_world_size})"
289
+ )
290
+ shard_rank = self.post_forward_mesh_info.shard_mesh_rank
291
+ sharded_numel = numel // shard_world_size
292
+ self._sharded_post_forward_param_data = (
293
+ self.all_gather_output.narrow(0, sharded_numel * shard_rank, sharded_numel)
294
+ ).clone() # clone to be able to free all-gather output
295
+ sharded_post_forward_tensor = torch.as_strided(
296
+ self._sharded_post_forward_param_data,
297
+ size=self.sharded_post_forward_size,
298
+ stride=self.contiguous_sharded_post_forward_stride,
299
+ storage_offset=0,
300
+ )
301
+ self._sharded_post_forward_param = nn.Parameter(
302
+ self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor)
303
+ )
304
+ self._setattr_on_modules(self._sharded_post_forward_param)
305
+ self.free_all_gather_output()
306
+ self.sharded_state = ShardedState.SHARDED_POST_FORWARD
307
+
308
+ def to_unsharded(self) -> None:
309
+ # Assume that the data has been allocated and all-gathered
310
+ set_requires_grad_if_needed(self.sharded_param, self._unsharded_param)
311
+ self._setattr_on_modules(self._unsharded_param)
312
+ if self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
313
+ # The data is allocated in the default stream via the post-forward
314
+ # reshard and must be kept alive for the next all-gather copy-in.
315
+ # Since we call this method after the copy-out, the data's lifetime
316
+ # is ensured without further synchronization.
317
+ self._sharded_post_forward_param = None
318
+ self._sharded_post_forward_param_data = None # free
319
+ self.sharded_state = ShardedState.UNSHARDED
320
+
321
+ def _setattr_on_modules(self, param: nn.Parameter) -> None:
322
+ unsafe_setattr_param(
323
+ self._module_info.module, self._module_info.param_name, param
324
+ )
325
+ for shared_module, shared_param_name in zip(
326
+ self._module_info.shared_modules, self._module_info.shared_param_names
327
+ ):
328
+ unsafe_setattr_param(shared_module, shared_param_name, param)
329
+
330
+ def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor:
331
+ """
332
+ Converts a local tensor representing either the sharded parameter or
333
+ sharded gradient to DTensor.
334
+ """
335
+ if tensor.shape != self.sharded_size:
336
+ _raise_assert_with_print(
337
+ f"Expects size {self.sharded_size} but got {tensor.shape}"
338
+ )
339
+ return _from_local_no_grad(
340
+ tensor,
341
+ self._global_mesh,
342
+ self._global_placements,
343
+ self._global_size,
344
+ self._global_stride,
345
+ )
346
+
347
+ def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor:
348
+ if tensor.shape != self.sharded_post_forward_size:
349
+ _raise_assert_with_print(
350
+ f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}"
351
+ )
352
+ assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo)
353
+ # TODO: Prefer this DTensor to be read-only and generalize the
354
+ # placement once we support TP.
355
+ return _from_local_no_grad(
356
+ tensor,
357
+ self.post_forward_mesh_info.mesh,
358
+ (Replicate(), Shard(0)),
359
+ self._global_size,
360
+ self._global_stride,
361
+ )
362
+
363
+ def alloc_all_gather_output(self) -> None:
364
+ unsafe_alloc_storage(self.all_gather_output)
365
+
366
+ def free_all_gather_output(self) -> None:
367
+ unsafe_free_storage(self.all_gather_output)
368
+
369
+ @property
370
+ def all_gather_input(self) -> torch.Tensor: # 1D
371
+ self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD)
372
+ if self.sharded_state == ShardedState.SHARDED:
373
+ return _to_dtype_if_needed(self._sharded_param_data, self.param_dtype)
374
+ elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
375
+ return _to_dtype_if_needed(
376
+ cast(torch.Tensor, self._sharded_post_forward_param_data),
377
+ self.param_dtype,
378
+ )
379
+ return torch.empty(0) # mypy
380
+
381
+ @property
382
+ def unsharded_param(self) -> nn.Parameter: # ND
383
+ self._assert_in_states(ShardedState.UNSHARDED)
384
+ return self._unsharded_param
385
+
386
+ @property
387
+ def unsharded_grad_data(self) -> torch.Tensor:
388
+ grad = self.unsharded_param.grad
389
+ assert grad is not None, "Expects unsharded_param.grad to not be None"
390
+ return self._get_grad_inner_tensor(grad)
391
+
392
+ def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor:
393
+ if self.is_dtensor:
394
+ if isinstance(grad, AsyncCollectiveTensor):
395
+ grad = grad.wait()
396
+ grad = cast(DTensor, grad)._local_tensor
397
+ return grad
398
+
399
+ def _assert_in_states(self, *states: ShardedState) -> None:
400
+ if self.sharded_state not in states:
401
+ _raise_assert_with_print(
402
+ f"Expects to be in one of {states}, not {self.sharded_state}"
403
+ )
404
+
405
+
406
+ # NOTE: Unsafe here refers to not checking whether the storage is already
407
+ # allocated or freed, respectively. We should be safe to use them since we
408
+ # explicitly manage the state transition.
409
+ def unsafe_alloc_storage(tensor: torch.Tensor) -> None:
410
+ # Skip the already-allocated check and assume that `tensor` is the base
411
+ # tensor to save CPU overhead
412
+ tensor.untyped_storage().resize_(tensor.numel() * tensor.itemsize)
413
+
414
+
415
+ def unsafe_free_storage(tensor: torch.Tensor) -> None:
416
+ # Skip the already-freed check to save CPU overhead
417
+ tensor.untyped_storage().resize_(0)
418
+
419
+
420
+ # NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial
421
+ # CPU overhead, if the module did not override it. For FSDP, we know we do not
422
+ # need those checks when transitioning between sharded/unsharded parameters.
423
+ def unsafe_setattr_param(
424
+ module: nn.Module, param_name: str, param: nn.Parameter
425
+ ) -> None:
426
+ if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__:
427
+ module._parameters[param_name] = param
428
+ else: # slow path
429
+ setattr(module, param_name, param)
430
+
431
+
432
+ def set_requires_grad_if_needed(
433
+ src_tensor: torch.Tensor, dst_tensor: torch.Tensor
434
+ ) -> None:
435
+ # Only call `requires_grad_` if needed to avoid the Python <> C++ context
436
+ # switch overhead
437
+ if src_tensor.requires_grad != dst_tensor.requires_grad:
438
+ dst_tensor.requires_grad_(src_tensor.requires_grad)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.nn as nn
8
+
9
+ from torch.autograd.graph import Node
10
+ from torch.distributed.fsdp._common_utils import _named_parameters_with_duplicates
11
+ from torch.utils._pytree import tree_flatten, tree_unflatten
12
+ from torch.utils.hooks import RemovableHandle
13
+ from ._fsdp_api import MixedPrecisionPolicy
14
+ from ._fsdp_collectives import (
15
+ AllGatherResult,
16
+ foreach_all_gather,
17
+ foreach_all_gather_copy_out,
18
+ foreach_reduce_scatter,
19
+ )
20
+ from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo, TrainingState
21
+ from ._fsdp_param import FSDPParam, ParamModuleInfo, ShardedState
22
+
23
+ _ModuleToHandleDict = Dict[nn.Module, RemovableHandle] # for state dict
24
+
25
+
26
+ """
27
+ [Note: Overlapping all-gather copy-in and all-gather]
28
+ For implicit forward prefetching, we want to overlap the next copy-in with the
29
+ current all-gather. We do so using a separate copy-in stream. However, since
30
+ we have the all-gather input as a view into the output, we must make sure to
31
+ copy into different memory from the current all-gather's output. Thus, we keep
32
+ a reference to the current all-gather's output and have the next FSDP parameter
33
+ group free it after its copy-in. Finally, we have the last FSDP state flush the
34
+ reference to avoid holding onto memory after forward.
35
+ """
36
+
37
+
38
+ class FSDPCommContext:
39
+ """This has the communication state shared across FSDP states/parameter groups."""
40
+
41
+ def init(self):
42
+ # Setting the all-gather/reduce-scatter streams to be higher priority
43
+ # can help avoid some issues where their copies in/out are delayed and
44
+ # block computation
45
+ high_priority = -1
46
+ # All-gather state and copy-in stream allow overlapping the next
47
+ # copy-in with the current all-gather in forward; copy-in overlaps with
48
+ # reduce-scatter in backward without the separate copy-in stream
49
+ self.all_gather_copy_in_stream = torch.cuda.Stream(priority=high_priority)
50
+ self.all_gather_state: Optional[AllGatherState] = None
51
+ # All-gather stream allows overlapping next all-gather with current
52
+ # forward compute
53
+ self.all_gather_stream = torch.cuda.Stream(priority=high_priority)
54
+ # Reduce-scatter stream gives separate execution "thread" for post-
55
+ # backward logic like pre/post-gradient division and reduce-scatter
56
+ self.reduce_scatter_stream = torch.cuda.Stream(priority=high_priority)
57
+ # Post-forward order for explicit backward prefetching
58
+ self.post_forward_order: List[FSDPParamGroup] = [] # will cause ref cycles
59
+
60
+ def get_all_gather_streams(
61
+ self, training_state: TrainingState
62
+ ) -> Tuple[torch.cuda.Stream, torch.cuda.Stream]:
63
+ if training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD):
64
+ # Use separate streams for implicit prefetching
65
+ return self.all_gather_copy_in_stream, self.all_gather_stream
66
+ current_stream = torch.cuda.current_stream()
67
+ return current_stream, current_stream
68
+
69
+
70
+ # See [Note: Overlapping all-gather copy-in and all-gather]
71
+ class AllGatherState(NamedTuple):
72
+ all_gather_result: AllGatherResult
73
+ event: torch.cuda.Event # all-gather copy-out
74
+
75
+
76
+ class FSDPParamGroup:
77
+ """This class represents a parameter group to communicate together."""
78
+
79
+ _orig_dtype: torch.dtype
80
+ _reduce_dtype: Optional[torch.dtype]
81
+
82
+ def __init__(
83
+ self,
84
+ params: List[nn.Parameter],
85
+ module: nn.Module,
86
+ mesh_info: FSDPMeshInfo,
87
+ post_forward_mesh_info: Optional[FSDPMeshInfo],
88
+ device: torch.device,
89
+ mp_policy: MixedPrecisionPolicy,
90
+ ):
91
+ self.module = module # permit ref cycle because 1:1 lifetime
92
+ param_module_infos = _get_param_module_infos(params, module)
93
+ self.fsdp_params = [
94
+ FSDPParam(
95
+ param, module_info, mesh_info, post_forward_mesh_info, device, mp_policy
96
+ )
97
+ for param, module_info in zip(params, param_module_infos)
98
+ ]
99
+ self.mesh_info = mesh_info
100
+ self.post_forward_mesh_info = post_forward_mesh_info
101
+ self.device = device
102
+ self.mp_policy = mp_policy
103
+ self._training_state = TrainingState.IDLE
104
+ # Group's sharded state always matches its parameters' sharded states
105
+ self._sharded_state = ShardedState.SHARDED
106
+ self._module_fqn: Optional[str] = None # prefixed from root module
107
+
108
+ # - Hook state
109
+ self._module_to_pre_save_state_dict_hook_handle: _ModuleToHandleDict = {}
110
+ self._module_to_pre_load_state_dict_hook_handle: _ModuleToHandleDict = {}
111
+
112
+ # - Communication and communication/computation overlap
113
+ self.comm_ctx = FSDPCommContext()
114
+ # Group's indices in the shared post-forward order
115
+ self._post_forward_indices: List[int] = []
116
+ # Used to avoid mistargeted backward prefetches when the module is used
117
+ # in forward but not in backward: for each forward, we record a tuple
118
+ # of the output's grad fns and later query the autograd engine whether
119
+ # any grad fn will execute in the current backward to know to prefetch.
120
+ self.all_forward_output_grad_fns: Set[Tuple[Node, ...]] = set()
121
+ # Whether to reduce-scatter or all-reduce gradients, respectively
122
+ # (can be set to false to save communication during gradient
123
+ # accumulation); all-reducing without reduce-scatter is disallowed
124
+ self.reduce_scatter_grads: bool = True
125
+ self.all_reduce_grads: bool = True
126
+
127
+ # - CUDA events for stream synchronization
128
+ # Holds the all-gather output buffer, sync objects, and metadata
129
+ self._all_gather_result: Optional[AllGatherResult] = None
130
+ # Holds the reduce-scatter view-out CUDA event that marks the end of
131
+ # the group's post-backward (e.g. reduce-scatter and div), which should
132
+ # be waited on at the end of backward
133
+ self._reduce_scatter_view_out_event: Optional[torch.cuda.Event] = None
134
+ # Holds the reshard-after-forward CUDA event when resharding to a
135
+ # different world size, which should be waited on in the next unshard
136
+ self._reshard_after_forward_event: Optional[torch.cuda.Event] = None
137
+
138
+ # Initialization #
139
+ def _init_mp_dtypes(self) -> None:
140
+ for fsdp_param in self.fsdp_params:
141
+ fsdp_param.init_dtype_attrs(self.mp_policy)
142
+ orig_dtypes = {fsdp_param.orig_dtype for fsdp_param in self.fsdp_params}
143
+ if len(orig_dtypes) != 1:
144
+ # This can be relaxed if we copy-out for the reduce-scatter
145
+ raise AssertionError(
146
+ f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
147
+ )
148
+ self._orig_dtype = next(iter(orig_dtypes))
149
+ reduce_dtypes = {fsdp_param.reduce_dtype for fsdp_param in self.fsdp_params}
150
+ if len(reduce_dtypes) != 1:
151
+ # This can be relaxed if we issue one reduce-scatter per reduce
152
+ # dtype (but we would need a way for users to specify multiple
153
+ # reduce dtypes)
154
+ raise AssertionError(
155
+ f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
156
+ )
157
+ self._reduce_dtype = next(iter(reduce_dtypes))
158
+
159
+ def _init_grad_divide_factors(self):
160
+ data_parallel_world_size = 1
161
+ data_parallel_world_size *= self.mesh_info.shard_mesh_size
162
+ if isinstance(self.mesh_info, HSDPMeshInfo):
163
+ data_parallel_world_size *= self.mesh_info.replicate_mesh_size
164
+ if self._reduce_dtype == torch.float32:
165
+ # Use NCCL's AVG op to divide after reduction since it is more
166
+ # performant and fp32 has sufficient precision
167
+ self._grad_divide_factors: Optional[Tuple[float, float]] = None
168
+ return
169
+ # For N data parallel workers, each worker computes g_i, and they
170
+ # collectively reduce (g_1 + ... + g_N) / N. To avoid overflow and
171
+ # underflow, we divide by ~sqrt(N) before and after the reduction.
172
+ factor: int = 1
173
+ while (
174
+ data_parallel_world_size % factor == 0
175
+ and data_parallel_world_size / factor > factor
176
+ ):
177
+ factor *= 2
178
+ factor = float(factor)
179
+ self._grad_divide_factors = (factor, data_parallel_world_size / factor)
180
+
181
+ def lazy_init(self):
182
+ param_names_on_meta = [
183
+ fsdp_param._param_fqn
184
+ for fsdp_param in self.fsdp_params
185
+ if fsdp_param.sharded_param.device.type == "meta"
186
+ ]
187
+ if param_names_on_meta:
188
+ raise RuntimeError(
189
+ "FSDP parameters should be materialized from meta device before training, "
190
+ f"but the following were still on meta device: {param_names_on_meta}\n"
191
+ "For example, call module.to_empty(device) to materialize to device and "
192
+ "call module.reset_parameters() on each module to initialize values."
193
+ )
194
+ # Initialize mixed precision attributes lazily in case the user changes
195
+ # the parameter dtypes after construction time but before forward
196
+ self._init_mp_dtypes()
197
+ self._init_grad_divide_factors()
198
+ self._register_state_dict_hooks()
199
+
200
+ # Runtime #
201
+ def unshard(self, async_op: bool = False):
202
+ if self._all_gather_result is not None: # already called, pending wait
203
+ return
204
+ if self.is_unsharded:
205
+ return # no-op
206
+ if self._reshard_after_forward_event is not None:
207
+ # Resharded parameter data is allocated in the default stream and
208
+ # used in the all-gather streams
209
+ self._wait_all_gather_streams_on_event(self._reshard_after_forward_event)
210
+ self._reshard_after_forward_event = None
211
+ self._all_gather_result = foreach_all_gather(
212
+ self.fsdp_params,
213
+ self._all_gather_process_group,
214
+ async_op,
215
+ *self.comm_ctx.get_all_gather_streams(self._training_state),
216
+ self.device,
217
+ )
218
+
219
+ def wait_for_unshard(self):
220
+ """
221
+ 1. In forward with implict prefetching, to overlap the current copy-out
222
+ with the next all-gather, we save a reference to the current all-gather
223
+ result to free after the next copy-out.
224
+ 2. Otherwise (explicit prefetching or in backward), we free the
225
+ all-gather result immediately after the current copy-out since we can
226
+ already overlap the current copy-out with the previous reduce-scatter.
227
+ """
228
+ if not self._all_gather_result:
229
+ return # no preceding unshard
230
+ if self._training_state == TrainingState.FORWARD: # implicit prefetch
231
+ if prev_all_gather_state := self.comm_ctx.all_gather_state:
232
+ self._wait_all_gather_streams_on_event(prev_all_gather_state.event)
233
+ self.comm_ctx.all_gather_state = None # free the all-gather result
234
+ foreach_all_gather_copy_out(
235
+ self._all_gather_result, self.fsdp_params, self._all_gather_process_group
236
+ )
237
+ for fsdp_param in self.fsdp_params:
238
+ fsdp_param.init_unsharded_param() # no-op after 1st call
239
+ self._to_unsharded()
240
+ all_gather_copy_out_event = torch.cuda.Event()
241
+ all_gather_copy_out_event.record()
242
+ if self._training_state == TrainingState.FORWARD:
243
+ self.comm_ctx.all_gather_state = AllGatherState(
244
+ self._all_gather_result, all_gather_copy_out_event
245
+ )
246
+ else:
247
+ self._wait_all_gather_streams_on_event(all_gather_copy_out_event)
248
+ self._all_gather_result = None # free unless saved in `all_gather_state`
249
+
250
+ def _wait_all_gather_streams_on_event(self, event: torch.cuda.Event):
251
+ self.comm_ctx.all_gather_copy_in_stream.wait_event(event)
252
+ self.comm_ctx.all_gather_stream.wait_event(event)
253
+
254
+ def reshard(self):
255
+ if self._training_state == TrainingState.FORWARD:
256
+ if not self._reshard_after_forward:
257
+ return
258
+ if self._use_post_forward_mesh:
259
+ self._to_sharded_post_forward()
260
+ self._reshard_after_forward_event = torch.cuda.Event()
261
+ self._reshard_after_forward_event.record()
262
+ return
263
+ self._to_sharded()
264
+
265
+ def pre_forward(
266
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
267
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
268
+ with torch.profiler.record_function("FSDP::pre_forward"):
269
+ self._training_state = TrainingState.FORWARD
270
+ self.unshard()
271
+ self.wait_for_unshard()
272
+ args, kwargs = self._register_post_backward_hook(args, kwargs)
273
+ return args, kwargs
274
+
275
+ def post_forward(self, module: nn.Module, input: Any, output: Any):
276
+ with torch.profiler.record_function("FSDP::post_forward"):
277
+ self.reshard()
278
+ self._record_post_forward()
279
+ self._training_state = TrainingState.IDLE
280
+ return output
281
+
282
+ def _record_post_forward(self) -> None:
283
+ # Since a group has one pre-backward unshard for each forward call
284
+ # before the backward, we record each usage (with multiplicity)
285
+ post_forward_index = len(self.comm_ctx.post_forward_order)
286
+ self.comm_ctx.post_forward_order.append(self)
287
+ self._post_forward_indices.append(post_forward_index)
288
+
289
+ def pre_backward(self, forward_grad_fns: Tuple[Any, ...], *unused: Any):
290
+ with torch.profiler.record_function("FSDP::pre_backward"):
291
+ self._training_state = TrainingState.PRE_BACKWARD
292
+ self.unshard() # no-op if prefetched
293
+ self.wait_for_unshard()
294
+ # Can be already removed if running multiple `backward`s
295
+ self.all_forward_output_grad_fns.discard(forward_grad_fns)
296
+ self._prefetch_unshard()
297
+
298
+ def post_backward(self, *unused: Any):
299
+ self._training_state = TrainingState.POST_BACKWARD
300
+ with torch.profiler.record_function("FSDP::post_backward_reshard"):
301
+ if not self.reduce_scatter_grads:
302
+ self.reshard()
303
+ return
304
+ # Save the autograd-computed gradients before resharding to only
305
+ # access the unsharded parameters when their data is present
306
+ fsdp_params_with_grad: List[FSDPParam] = []
307
+ unsharded_grads: List[torch.Tensor] = []
308
+ for fsdp_param in self.fsdp_params:
309
+ if fsdp_param.unsharded_param.grad is not None:
310
+ fsdp_params_with_grad.append(fsdp_param)
311
+ unsharded_grads.append(fsdp_param.unsharded_grad_data)
312
+ fsdp_param.unsharded_param.grad = None
313
+ self.reshard()
314
+ if len(fsdp_params_with_grad) == 0:
315
+ return
316
+ with torch.profiler.record_function("FSDP::post_backward_reduce"):
317
+ self._reduce_scatter_view_out_event = foreach_reduce_scatter(
318
+ fsdp_params_with_grad,
319
+ unsharded_grads,
320
+ self._reduce_scatter_process_group,
321
+ self.comm_ctx.reduce_scatter_stream,
322
+ self._orig_dtype,
323
+ self._reduce_dtype,
324
+ self.device,
325
+ self._grad_divide_factors,
326
+ )
327
+
328
+ def finalize_backward(self):
329
+ if self._reduce_scatter_view_out_event is not None:
330
+ torch.cuda.current_stream().wait_event(self._reduce_scatter_view_out_event)
331
+ self._reduce_scatter_view_out_event = None
332
+ self._training_state = TrainingState.IDLE
333
+ self._post_forward_indices.clear()
334
+ self.all_forward_output_grad_fns.clear()
335
+
336
+ def _prefetch_unshard(self):
337
+ if self._training_state == TrainingState.PRE_BACKWARD:
338
+ if not self._post_forward_indices:
339
+ # Can be cleared if running multiple `backward`s
340
+ return
341
+ curr_index = self._post_forward_indices.pop()
342
+ if (target_index := curr_index - 1) < 0:
343
+ return
344
+ target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index]
345
+ if any(
346
+ torch._C._will_engine_execute_node(grad_fn) # type: ignore[attr-defined]
347
+ for grad_fns in target_fsdp_param_group.all_forward_output_grad_fns
348
+ for grad_fn in grad_fns
349
+ ):
350
+ with torch.profiler.record_function(
351
+ "FSDP::backward_prefetch"
352
+ ), target_fsdp_param_group.use_training_state(
353
+ TrainingState.PRE_BACKWARD
354
+ ):
355
+ target_fsdp_param_group.unshard()
356
+
357
+ # Utilities #
358
+ def _to_sharded(self):
359
+ if not self.is_sharded:
360
+ for fsdp_param in self.fsdp_params:
361
+ fsdp_param.to_sharded()
362
+ self._sharded_state = ShardedState.SHARDED
363
+
364
+ def _to_sharded_post_forward(self):
365
+ if not self.is_sharded_post_forward:
366
+ for fsdp_param in self.fsdp_params:
367
+ fsdp_param.to_sharded_post_forward()
368
+ self._sharded_state = ShardedState.SHARDED_POST_FORWARD
369
+
370
+ def _to_unsharded(self):
371
+ if not self.is_unsharded:
372
+ for fsdp_param in self.fsdp_params:
373
+ fsdp_param.to_unsharded()
374
+ self._sharded_state = ShardedState.UNSHARDED
375
+
376
+ @property
377
+ def is_sharded(self) -> bool:
378
+ return self._sharded_state == ShardedState.SHARDED
379
+
380
+ @property
381
+ def is_sharded_post_forward(self) -> bool:
382
+ return self._sharded_state == ShardedState.SHARDED_POST_FORWARD
383
+
384
+ @property
385
+ def is_unsharded(self) -> bool:
386
+ return self._sharded_state == ShardedState.UNSHARDED
387
+
388
+ @contextlib.contextmanager
389
+ def use_training_state(self, training_state: TrainingState):
390
+ old_training_state = self._training_state
391
+ self._training_state = training_state
392
+ try:
393
+ yield
394
+ finally:
395
+ self._training_state = old_training_state
396
+
397
+ # Hook Registration #
398
+ def _register_post_backward_hook(
399
+ self, args: Tuple[Any, ...], kwargs: Dict[str, Any]
400
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
401
+ if not torch.is_grad_enabled():
402
+ return args, kwargs
403
+ args_list, args_spec = tree_flatten(args)
404
+ kwargs_list, kwargs_spec = tree_flatten(kwargs)
405
+ args_kwargs_list = list(args_list) + list(kwargs_list)
406
+ inp_tensor_indices: List[int] = []
407
+ inp_tensors: List[torch.Tensor] = []
408
+ for i, obj in enumerate(args_kwargs_list):
409
+ if torch.is_tensor(obj) and obj.requires_grad:
410
+ inp_tensor_indices.append(i)
411
+ inp_tensors.append(obj)
412
+ if len(inp_tensors) == 0:
413
+ return args, kwargs # no tensors that require gradients
414
+ inp_tensors = RegisterPostBackwardFunction.apply(self, *inp_tensors)
415
+ for inp_tensor_idx, inp_tensor in zip(inp_tensor_indices, inp_tensors):
416
+ args_kwargs_list[inp_tensor_idx] = inp_tensor
417
+ args_list = args_kwargs_list[: len(args_list)]
418
+ kwargs_list = args_kwargs_list[len(args_list) :]
419
+ args = tree_unflatten(args_list, args_spec)
420
+ kwargs = tree_unflatten(kwargs_list, kwargs_spec)
421
+ return args, kwargs
422
+
423
+ def _register_state_dict_hooks(self) -> None:
424
+ assert len(self._module_to_pre_save_state_dict_hook_handle) == 0
425
+ assert len(self._module_to_pre_load_state_dict_hook_handle) == 0
426
+ modules_with_fsdp_params: Set[nn.Module] = {
427
+ fsdp_param._module_info.module for fsdp_param in self.fsdp_params
428
+ }
429
+
430
+ def to_sharded_hook(*args: Any, **kwargs: Any) -> None:
431
+ self._to_sharded()
432
+
433
+ for module in modules_with_fsdp_params:
434
+ self._module_to_pre_save_state_dict_hook_handle[
435
+ module
436
+ ] = module.register_state_dict_pre_hook(to_sharded_hook)
437
+ self._module_to_pre_load_state_dict_hook_handle[
438
+ module
439
+ ] = module._register_load_state_dict_pre_hook(to_sharded_hook)
440
+
441
+ # Properties #
442
+ @property
443
+ def _reshard_after_forward(self) -> bool:
444
+ return self.post_forward_mesh_info is not None
445
+
446
+ @property
447
+ def _use_post_forward_mesh(self) -> bool:
448
+ return (
449
+ self._reshard_after_forward
450
+ and self.mesh_info != self.post_forward_mesh_info
451
+ )
452
+
453
+ @property
454
+ def _all_gather_process_group(self) -> dist.ProcessGroup:
455
+ mesh_info = (
456
+ cast(FSDPMeshInfo, self.post_forward_mesh_info)
457
+ if self.is_sharded_post_forward
458
+ else self.mesh_info
459
+ )
460
+ assert isinstance(mesh_info, FSDPMeshInfo)
461
+ return mesh_info.shard_process_group
462
+
463
+ @property
464
+ def _reduce_scatter_process_group(self) -> dist.ProcessGroup:
465
+ mesh_info = self.mesh_info
466
+ assert isinstance(mesh_info, FSDPMeshInfo)
467
+ return mesh_info.shard_process_group
468
+
469
+
470
+ def _get_param_module_infos(
471
+ params: List[nn.Parameter], module: nn.Module
472
+ ) -> List[ParamModuleInfo]:
473
+ """
474
+ Shared parameter: lin1.weight = lin2.weight
475
+ Shared module: mlp.lin1 = mlp.lin2
476
+ We do not remove duplicates when traversing both modules and parameters to
477
+ find shared modules' parameters and shared parameters within a module.
478
+ """
479
+ params_set = set(params)
480
+ param_to_module_info: Dict[nn.Parameter, ParamModuleInfo] = {}
481
+ for _, submodule in module.named_modules(remove_duplicate=False):
482
+ for param_name, param in _named_parameters_with_duplicates(
483
+ submodule, recurse=False
484
+ ):
485
+ if param in params_set:
486
+ if param not in param_to_module_info:
487
+ param_to_module_info[param] = ParamModuleInfo(submodule, param_name)
488
+ else:
489
+ param_to_module_info[param].shared_modules.append(submodule)
490
+ param_to_module_info[param].shared_param_names.append(param_name)
491
+ if len(param_to_module_info) != len(params):
492
+ raise AssertionError(f"Some parameters are not in the module tree of {module}")
493
+ return [param_to_module_info[param] for param in params]
494
+
495
+
496
+ class RegisterPostBackwardFunction(torch.autograd.Function):
497
+ @staticmethod
498
+ def forward(ctx, param_group: FSDPParamGroup, *inputs: torch.Tensor):
499
+ # All tensors in `inputs` should require gradient
500
+ ctx.param_group = param_group
501
+ return inputs
502
+
503
+ @staticmethod
504
+ def backward(ctx, *grads: torch.Tensor):
505
+ ctx.param_group.post_backward()
506
+ return (None,) + grads
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ from typing import Any, Dict, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.autograd import Variable
8
+ from torch.autograd.graph import Node, register_multi_grad_hook
9
+ from torch.distributed._composable_state import (
10
+ _get_module_state,
11
+ _insert_module_state,
12
+ _State,
13
+ )
14
+ from torch.distributed.utils import _to_kwargs
15
+ from torch.utils._pytree import tree_flatten, tree_map
16
+ from torch.utils.hooks import RemovableHandle
17
+ from ._fsdp_api import MixedPrecisionPolicy
18
+ from ._fsdp_common import _cast_fp_tensor, TrainingState
19
+ from ._fsdp_param import FSDPParam
20
+ from ._fsdp_param_group import FSDPCommContext, FSDPParamGroup
21
+
22
+
23
+ class FSDPStateContext:
24
+ """This has state shared across FSDP states."""
25
+
26
+ def __init__(self):
27
+ # All FSDP states in the root state's module tree
28
+ self.all_states: List[FSDPState] = []
29
+ # Iteration's forward root runs the once-per-forward logic; this root
30
+ # may not be the overall root set by lazy initialization in cases where
31
+ # only a submodule runs forward (e.g. encoder-only for eval)
32
+ self.iter_forward_root: Optional[FSDPState] = None
33
+ # Final callback should only be queued once per backward
34
+ self.post_backward_final_callback_queued: bool = False
35
+ # Whether to finalize backward in this backward's final callback
36
+ self.is_last_backward: bool = True
37
+
38
+
39
+ class FSDPState(_State):
40
+ def __init__(self):
41
+ super().__init__()
42
+ self._fsdp_param_group: Optional[FSDPParamGroup] = None
43
+ self._is_root: Optional[bool] = None # root set during lazy init
44
+ self._state_ctx = FSDPStateContext()
45
+ self._comm_ctx = FSDPCommContext()
46
+ self._training_state: TrainingState = TrainingState.IDLE
47
+ self._pre_backward_hook_handles: List[RemovableHandle] = []
48
+
49
+ # Define a separate init since `__init__` is called in the contract
50
+ def init(
51
+ self, module: nn.Module, device: torch.device, mp_policy: MixedPrecisionPolicy
52
+ ) -> None:
53
+ _insert_module_state(module, self)
54
+ self._module = module
55
+ self._device = device
56
+ self._mp_policy = mp_policy
57
+ self._pre_forward_hook_handle = module.register_forward_pre_hook(
58
+ self._pre_forward, prepend=True, with_kwargs=True
59
+ )
60
+ self._post_forward_hook_handle = module.register_forward_hook(
61
+ self._post_forward, prepend=False
62
+ )
63
+
64
+ def _root_pre_forward(
65
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
66
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
67
+ self._lazy_init()
68
+ if self._state_ctx.iter_forward_root is not None:
69
+ return args, kwargs
70
+ self._state_ctx.iter_forward_root = self
71
+ with torch.profiler.record_function("FSDP::root_pre_forward"):
72
+ # Wait for optimizer before implicitly prefetched all-gathers
73
+ current_stream = torch.cuda.current_stream()
74
+ self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream)
75
+ self._comm_ctx.all_gather_stream.wait_stream(current_stream)
76
+ if self._device.type == "cuda":
77
+ with torch.profiler.record_function("FSDP::inputs_to_device"):
78
+ args_tuple, kwargs_tuple = _to_kwargs(
79
+ args, kwargs, self._device, False
80
+ ) # same as DDP
81
+ args, kwargs = args_tuple[0], kwargs_tuple[0]
82
+ return args, kwargs
83
+
84
+ def _lazy_init(self) -> None:
85
+ """
86
+ Lazy initialization represents when all modules' parallelisms have
87
+ finalized (e.g. FSDP has been applied to all desired modules). This
88
+ means that we can determine which state is the root, and we do so by
89
+ the 1st state to run forward.
90
+ """
91
+ if self._is_root is not None:
92
+ return # no-op: already initialized
93
+ self._is_root = True
94
+ root_module = self._module
95
+ for module_name, module in root_module.named_modules():
96
+ if (state := _get_module_fsdp_state(module)) is None:
97
+ continue
98
+ if module is not root_module:
99
+ if state._is_root is not None:
100
+ raise RuntimeError(
101
+ "FSDP state has already been lazily initialized for "
102
+ f"{module_name}\nFSDP requires running forward through "
103
+ "the root module first"
104
+ )
105
+ state._is_root = False
106
+ self._state_ctx.all_states.append(state)
107
+ if self._fsdp_param_group:
108
+ # For the root, do not reshard after forward since for training,
109
+ # the parameters would be freed and all-gathered immediately
110
+ self._fsdp_param_group.post_forward_mesh_info = None
111
+ self._init_fqns()
112
+ self._init_shared_state()
113
+ # Run parameter group lazy inits after initializing FQNs for improved
114
+ # error messages
115
+ for state in self._state_ctx.all_states:
116
+ if state._fsdp_param_group:
117
+ state._fsdp_param_group.lazy_init()
118
+
119
+ def _init_shared_state(self) -> None:
120
+ self._comm_ctx.init()
121
+ for state in self._state_ctx.all_states:
122
+ state._state_ctx = self._state_ctx
123
+ state._comm_ctx = self._comm_ctx
124
+ if fsdp_param_group := state._fsdp_param_group:
125
+ fsdp_param_group.comm_ctx = self._comm_ctx
126
+
127
+ def _init_fqns(self) -> None:
128
+ """Sets module and parameter FQN attributes for debugging."""
129
+ assert self._is_root
130
+ root_module = self._module
131
+ param_to_fsdp_param: Dict[nn.Parameter, FSDPParam] = {}
132
+ module_to_fsdp_param_group: Dict[nn.Module, FSDPParamGroup] = {}
133
+ for state in self._state_ctx.all_states:
134
+ if fsdp_param_group := state._fsdp_param_group:
135
+ for fsdp_param in fsdp_param_group.fsdp_params:
136
+ param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param
137
+ module_to_fsdp_param_group[fsdp_param_group.module] = fsdp_param_group
138
+ for param_name, param in root_module.named_parameters():
139
+ if param in param_to_fsdp_param:
140
+ param_to_fsdp_param[param]._param_fqn = param_name
141
+ for module_name, module in root_module.named_modules():
142
+ if module in module_to_fsdp_param_group:
143
+ module_to_fsdp_param_group[module]._module_fqn = module_name
144
+
145
+ def _pre_forward(
146
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
147
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
148
+ # When composing with module-hook-based activation checkpointing, the
149
+ # the pre-backward hook is responsible for the unshard
150
+ if self._training_state == TrainingState.PRE_BACKWARD:
151
+ return args, kwargs
152
+ self._training_state = TrainingState.FORWARD
153
+ args, kwargs = self._root_pre_forward(module, args, kwargs)
154
+ if self._mp_policy.cast_forward_inputs and self._mp_policy.param_dtype:
155
+ with torch.profiler.record_function("FSDP::cast_forward_inputs"):
156
+ cast_fn = functools.partial(
157
+ _cast_fp_tensor, self._mp_policy.param_dtype
158
+ )
159
+ args, kwargs = tree_map(cast_fn, args), tree_map(cast_fn, kwargs)
160
+ if self._fsdp_param_group:
161
+ args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs)
162
+ return args, kwargs
163
+
164
+ def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any:
165
+ # When composing with module-hook-based activation checkpointing, the
166
+ # post-backward hook is responsible for the reshard
167
+ if self._training_state == TrainingState.PRE_BACKWARD:
168
+ return output
169
+ if self._fsdp_param_group:
170
+ output = self._fsdp_param_group.post_forward(module, input, output)
171
+ output = self._register_pre_backward_hook(output)
172
+ self._training_state = TrainingState.IDLE
173
+ if self._state_ctx.iter_forward_root is self:
174
+ if all_gather_state := self._comm_ctx.all_gather_state:
175
+ # Free the last all-gather result if needed; refer to
176
+ # [Note: Overlapping all-gather copy-in and all-gather]
177
+ self._comm_ctx.all_gather_copy_in_stream.wait_event(
178
+ all_gather_state.event
179
+ )
180
+ self._comm_ctx.all_gather_stream.wait_event(all_gather_state.event)
181
+ self._comm_ctx.all_gather_state = None # free the all-gather result
182
+ self._state_ctx.iter_forward_root = None
183
+ if self._mp_policy.output_dtype is not None:
184
+ with torch.profiler.record_function("FSDP::cast_forward_outputs"):
185
+ output = tree_map(
186
+ functools.partial(_cast_fp_tensor, self._mp_policy.output_dtype),
187
+ output,
188
+ )
189
+ return output
190
+
191
+ def _pre_backward(self, forward_grad_fns: Tuple[Node, ...], *unused: Any) -> None:
192
+ self._training_state = TrainingState.PRE_BACKWARD
193
+ self._register_root_post_backward_final_callback()
194
+ if self._fsdp_param_group:
195
+ self._fsdp_param_group.pre_backward(forward_grad_fns, *unused)
196
+
197
+ def _root_post_backward_final_callback(self) -> None:
198
+ with torch.profiler.record_function("FSDP::root_post_backward_callback"):
199
+ for state in self._state_ctx.all_states:
200
+ if state._fsdp_param_group and state._fsdp_param_group.is_unsharded:
201
+ # Run post-backward in case forward inputs did not require
202
+ # gradient so the autograd backward did not run
203
+ state._fsdp_param_group.post_backward()
204
+ if self._state_ctx.is_last_backward:
205
+ state._finalize_backward()
206
+ if self._state_ctx.is_last_backward:
207
+ self._comm_ctx.post_forward_order.clear()
208
+ self._state_ctx.post_backward_final_callback_queued = False
209
+
210
+ def _finalize_backward(self) -> None:
211
+ self._training_state = TrainingState.IDLE
212
+ for handle in self._pre_backward_hook_handles:
213
+ handle.remove()
214
+ self._pre_backward_hook_handles.clear()
215
+ if self._fsdp_param_group:
216
+ self._fsdp_param_group.finalize_backward()
217
+
218
+ def _register_pre_backward_hook(self, output: Any) -> Any:
219
+ if not torch.is_grad_enabled():
220
+ return output
221
+
222
+ flat_outputs, _ = tree_flatten(output)
223
+ tensors = tuple(t for t in flat_outputs if t.requires_grad)
224
+ if tensors:
225
+ grad_fns = tuple(t.grad_fn for t in tensors if t.grad_fn is not None)
226
+ pre_backward = functools.partial(self._pre_backward, grad_fns)
227
+ handle = register_multi_grad_hook(tensors, pre_backward, mode="any")
228
+ self._pre_backward_hook_handles.append(handle)
229
+ if self._fsdp_param_group:
230
+ self._fsdp_param_group.all_forward_output_grad_fns.add(grad_fns)
231
+ return output
232
+
233
+ def _register_root_post_backward_final_callback(self):
234
+ if self._state_ctx.post_backward_final_callback_queued:
235
+ return
236
+ self._state_ctx.post_backward_final_callback_queued = True
237
+ Variable._execution_engine.queue_callback(
238
+ self._root_post_backward_final_callback
239
+ )
240
+
241
+
242
+ def _get_module_fsdp_state(module: nn.Module) -> Optional[FSDPState]:
243
+ state = _get_module_state(module)
244
+ if isinstance(state, FSDPState):
245
+ return state
246
+ return None
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, cast, Optional, Union
2
+
3
+ import typing_extensions
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from torch.distributed._composable import contract
9
+ from torch.distributed._tensor import DeviceMesh, DTensor
10
+
11
+ from ._fsdp_api import MixedPrecisionPolicy
12
+ from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo
13
+ from ._fsdp_init import (
14
+ _get_device_from_mesh,
15
+ _get_managed_modules,
16
+ _get_managed_states,
17
+ _get_post_forward_mesh_info,
18
+ _init_default_fully_shard_mesh,
19
+ _move_states_to_device,
20
+ )
21
+ from ._fsdp_param_group import FSDPParamGroup
22
+ from ._fsdp_state import _get_module_fsdp_state, FSDPState
23
+
24
+
25
+ # The decorator adds a state object to `module` that can be accessed via
26
+ # `fully_shard.state(module)`. The state object and module are 1:1.
27
+ @contract(state_cls=FSDPState)
28
+ def fully_shard(
29
+ module: nn.Module,
30
+ *,
31
+ mesh: Optional[DeviceMesh] = None,
32
+ reshard_after_forward: Union[bool, int] = True,
33
+ mp_policy: MixedPrecisionPolicy = MixedPrecisionPolicy(),
34
+ ):
35
+ """
36
+ Shard module parameters across data parallel workers.
37
+
38
+ This function applies fully sharded data parallelism (FSDP) or a variant to
39
+ ``module``, a technique for memory savings at the cost of communication.
40
+ Parameters are sharded across ``mesh``, and in turn, so are their gradients
41
+ and optimizer states.
42
+
43
+ The sharded parameters are all-gathered to construct the unsharded
44
+ parameters for forward or backward computation. The unsharded parameters
45
+ are freed after computation to save memory. The gradients are reduced
46
+ across the mesh and divided by the mesh size for data parallelism. The
47
+ optimizer step runs on the sharded parameters.
48
+
49
+ Each call to ``fully_shard`` constructs one communication group that
50
+ includes the parameters in ``module.parameters()`` except those already
51
+ assigned to a group from a nested call. Each group's parameters and its
52
+ gradients are communicated together in one collective, respectively.
53
+ Constructing multiple groups across the model (e.g. "layer by layer")
54
+ allows for peak memory savings and communication/computation overlap.
55
+
56
+ Implementation-wise, the sharded parameters are represented as
57
+ :class:`DTensor` s, sharded on dim-0, and the unsharded parameters are
58
+ represented as :class:`Tensor` s. A module forward pre-hook all-gathers the
59
+ parameters, and a module forward hook frees them. Similar backward hooks
60
+ gather parameters and later free parameters/reduce gradients.
61
+
62
+ Args:
63
+ mesh (Optional[DeviceMesh]): This data parallel mesh defines the
64
+ sharding and device. If 1D, then parameters are fully sharded
65
+ across the 1D mesh (FSDP). If 2D, then parameters are sharded
66
+ across the 0th dim and replicated across the 1st dim (HSDP). The
67
+ mesh's device type gives the device type used for communication;
68
+ if a CUDA or CUDA-like device type, then we use the current device.
69
+ reshard_after_forward (Union[bool, int]): This controls the parameter
70
+ behavior after forward and can trade off memory and communication:
71
+ - If ``True``, then this reshards parameters after forward and
72
+ all-gathers in backward.
73
+ - If ``False``, then this keeps the unsharded parameters in memory
74
+ after forward and avoids the all-gather in backward.
75
+ - If an ``int``, then this represents the world size to reshard to
76
+ after forward. It should be a non-trivial divisor of the ``mesh``
77
+ shard dim size (i.e. excluding 1 and the dim size itself). A choice
78
+ may be the intra-node size (e.g. ``torch.cuda.device_count()``).
79
+ This allows the all-gather in backward to be over a smaller world
80
+ size at the cost of higher memory usage than setting to ``True``.
81
+ - The root FSDP state has its value specially set to ``False`` as a
82
+ heuristic since its parameters would typically be immediately
83
+ all-gathered for backward.
84
+ - After forward, the parameters registered to the module depend on
85
+ to this: The registered parameters are the sharded parameters if
86
+ ``True``; unsharded parameters if ``False``; and the paramters
87
+ resharded to the smaller mesh otherwise. To modify the parameters
88
+ between forward and backward, the registered parameters must be the
89
+ sharded parameters. For ``False`` or an ``int``, this can be done
90
+ by manually resharding via :meth:`reshard`.
91
+ mp_policy (MixedPrecisionPolicy): This controls the mixed precision
92
+ policy, which offers parameter/reduction mixed precision for this
93
+ module. See :class:`MixedPrecisionPolicy` for details.
94
+ """
95
+ if isinstance(module, (nn.ModuleList, nn.ModuleDict)):
96
+ raise ValueError(
97
+ f"fully_shard does not support containers that do not implement forward: {module}"
98
+ )
99
+ mesh = mesh or _init_default_fully_shard_mesh()
100
+ if mesh.ndim not in (1, 2):
101
+ raise ValueError(f"fully_shard expects a 1D or 2D DeviceMesh but got {mesh}")
102
+ elif mesh.ndim == 1:
103
+ mesh_info = FSDPMeshInfo(mesh, shard_mesh_dim=0)
104
+ else:
105
+ mesh_info = HSDPMeshInfo(mesh, shard_mesh_dim=1, replicate_mesh_dim=0)
106
+ device = _get_device_from_mesh(mesh)
107
+ post_forward_mesh_info = _get_post_forward_mesh_info(
108
+ reshard_after_forward, mesh_info
109
+ )
110
+
111
+ state = fully_shard.state(module)
112
+ state.init(module, device, mp_policy)
113
+
114
+ managed_modules = _get_managed_modules(module)
115
+ params, buffers = _get_managed_states(managed_modules)
116
+ _move_states_to_device(params, buffers, device, mesh_info)
117
+ if params:
118
+ state._fsdp_param_group = FSDPParamGroup(
119
+ params, module, mesh_info, post_forward_mesh_info, device, mp_policy
120
+ )
121
+
122
+ # for dynamo
123
+ for module in managed_modules:
124
+ module._is_fsdp_managed_module = True # type: ignore[assignment]
125
+ module._fsdp_use_orig_params = True # type: ignore[assignment]
126
+
127
+ # Place FSDP leftmost for highest priority in the method resolution order
128
+ cls = module.__class__
129
+ dct = {"__deepcopy__": unimplemented_deepcopy}
130
+ new_cls = type(f"FSDP{cls.__name__}", (FSDP, cls), dct)
131
+ module.__class__ = new_cls
132
+ return module
133
+
134
+
135
+ def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> typing_extensions.Never:
136
+ raise AssertionError(
137
+ "FSDP does not support deepcopy. Please use state dict for serialization."
138
+ )
139
+
140
+
141
+ class FSDP:
142
+ def __new__(cls, *args, **kwargs):
143
+ """
144
+ Override ``__new__`` to remove the FSDP class and directly construct
145
+ the original class for cases like indexing into a container module.
146
+ """
147
+ # Use index 2 since 0 is the dynamically constructed `FSDP<...>` class
148
+ # and index 1 is the `FSDP` class itself
149
+ orig_cls = cls.__mro__[2]
150
+ self = orig_cls.__new__(orig_cls, *args, **kwargs)
151
+ self.__init__(*args, **kwargs)
152
+ return self
153
+
154
+ def reshard(self) -> None:
155
+ """
156
+ Reshards the module's parameters, registering the sharded parameters
157
+ to the module and freeing the unsharded parameters if needed. This
158
+ method is *not* recursive.
159
+ """
160
+ state = self._get_fsdp_state()
161
+ if fsdp_param_group := state._fsdp_param_group:
162
+ fsdp_param_group.reshard()
163
+
164
+ def set_is_last_backward(self, is_last_backward: bool) -> None:
165
+ """
166
+ Sets whether the next backward is the last one, meaning that FSDP
167
+ should wait for gradient reduction to finish and clear internal data
168
+ structures used for explicit prefetching.
169
+ """
170
+ state = self._get_fsdp_state()
171
+ state._state_ctx.is_last_backward = is_last_backward
172
+
173
+ def set_requires_gradient_sync(
174
+ self, requires_gradient_sync: bool, recurse: bool = True
175
+ ) -> None:
176
+ """
177
+ Sets if the module should sync gradients. This can be used to implement
178
+ gradient accumulation without communication. For HSDP, this controls
179
+ both reduce-scatter and all-reduce together.
180
+
181
+ Args:
182
+ requires_gradient_sync (bool): Whether to reduce gradients for the
183
+ module's parameters.
184
+ recurse (bool): Whether to set for all submodules or just the
185
+ passed-in module.
186
+ """
187
+ for module in cast(nn.Module, self).modules():
188
+ if isinstance(module, FSDP):
189
+ state = module._get_fsdp_state()
190
+ if fsdp_param_group := state._fsdp_param_group:
191
+ fsdp_param_group.reduce_scatter_grads = requires_gradient_sync
192
+ fsdp_param_group.all_reduce_grads = requires_gradient_sync
193
+
194
+ def set_requires_all_reduce(self, requires_all_reduce: bool, recurse: bool = True):
195
+ """
196
+ Sets if the module should all-reduce gradients. This can be used to
197
+ implement gradient accumulation with only reduce-scatter but not
198
+ all-reduce for HSDP.
199
+ """
200
+ for module in cast(nn.Module, self).modules():
201
+ if isinstance(module, FSDP):
202
+ state = module._get_fsdp_state()
203
+ if fsdp_param_group := state._fsdp_param_group:
204
+ fsdp_param_group.all_reduce_grads = requires_all_reduce
205
+
206
+ def _get_fsdp_state(self) -> FSDPState:
207
+ if (state := _get_module_fsdp_state(cast(nn.Module, self))) is None:
208
+ raise AssertionError(f"No FSDP state found on {self}")
209
+ return state
210
+
211
+ def _apply(self, *args: Any, **kwargs: Any) -> Any:
212
+ # Reshard to ensure that sharded parameters are registered
213
+ self.reshard()
214
+ ret = super()._apply(*args, **kwargs) # type: ignore[misc]
215
+ state = self._get_fsdp_state()
216
+ if not (fsdp_param_group := state._fsdp_param_group):
217
+ return ret
218
+ # TODO: Remove this padding logic once DTensor pads the local tensor:
219
+ # https://github.com/pytorch/pytorch/issues/113045
220
+ with torch.no_grad():
221
+ for fsdp_param in fsdp_param_group.fsdp_params:
222
+ module_info = fsdp_param._module_info
223
+ new_param = getattr(module_info.module, module_info.param_name)
224
+ if new_param is not fsdp_param.sharded_param:
225
+ if torch.__future__.get_swap_module_params_on_conversion():
226
+ raise AssertionError(
227
+ "Expects swap_tensors to preserve object but got "
228
+ f"{new_param} instead of {fsdp_param.sharded_param}"
229
+ )
230
+ else:
231
+ raise AssertionError(
232
+ "Please set torch.__future__.set_swap_module_params_on_conversion(True) "
233
+ "to use _apply methods with FSDP"
234
+ )
235
+ local_tensor = new_param._local_tensor
236
+ padded_sharded_size = fsdp_param.padded_sharded_param_size
237
+ if local_tensor.size() != padded_sharded_size:
238
+ padded_local_tensor = local_tensor.new_zeros(padded_sharded_size)
239
+ padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor)
240
+ local_tensor = padded_local_tensor
241
+ fsdp_param._sharded_param_data = local_tensor.view(-1)
242
+ assert isinstance(fsdp_param.sharded_param, DTensor) # mypy
243
+ fsdp_param.sharded_param._local_tensor = local_tensor[
244
+ : fsdp_param.sharded_size[0]
245
+ ]
246
+ return ret
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Callable, Iterable, Optional, Union
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.nn as nn
7
+ from torch.distributed._composable.contract import contract
8
+ from torch.distributed._composable_state import _get_module_state, _insert_module_state
9
+ from torch.distributed.fsdp._common_utils import _FSDPState
10
+ from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo
11
+
12
+ from torch.distributed.fsdp._init_utils import (
13
+ _init_buffer_state,
14
+ _init_core_state,
15
+ _init_device_handle,
16
+ _init_ignored_module_states,
17
+ _init_param_handle_from_module,
18
+ _init_prefetching_state,
19
+ _init_process_group_state,
20
+ _init_runtime_state,
21
+ _init_state_dict_state,
22
+ HYBRID_SHARDING_STRATEGIES,
23
+ )
24
+ from torch.distributed.fsdp._runtime_utils import (
25
+ _register_post_forward_hook,
26
+ _register_pre_forward_hook,
27
+ _register_root_pre_forward_hook,
28
+ )
29
+ from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks
30
+ from torch.distributed.fsdp._wrap_utils import _auto_wrap
31
+ from torch.distributed.fsdp.api import (
32
+ BackwardPrefetch,
33
+ CPUOffload,
34
+ MixedPrecision,
35
+ ShardingStrategy,
36
+ )
37
+ from torch.distributed.fsdp.wrap import _Policy
38
+
39
+
40
+ @contract(state_cls=_FSDPState)
41
+ def fully_shard(
42
+ module: nn.Module,
43
+ *,
44
+ process_group: Optional[dist.ProcessGroup] = None,
45
+ policy: Optional[_Policy] = None,
46
+ strategy: Optional[ShardingStrategy] = None,
47
+ mixed_precision: Optional[MixedPrecision] = None,
48
+ cpu_offload: Optional[CPUOffload] = None,
49
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
50
+ device_id: Optional[Union[int, torch.device]] = None,
51
+ param_init_fn: Optional[Callable[[nn.Module], None]] = None,
52
+ sync_module_states: bool = False,
53
+ forward_prefetch: bool = False,
54
+ ignored_states: Union[
55
+ Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
56
+ ] = None,
57
+ ) -> nn.Module:
58
+ """
59
+ Applies ``FullyShardedDataParallel` (FSDP) semantics to ``module``.
60
+ """
61
+ warnings.warn(
62
+ "``torch.distributed._composable.fully_shard`` is being deprecated."
63
+ "You can contintue to use the wrapper based FSDP."
64
+ "See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py."
65
+ "``torch.distributed._composable.fully_shard`` will be removed after PyTorch 2.5."
66
+ )
67
+
68
+ torch._C._log_api_usage_once("torch.distributed.fully_shard")
69
+ # Enforce the new auto wrap policy
70
+ if policy is not None and not isinstance(policy, _Policy):
71
+ raise ValueError(f"Expects a `_Policy` but got {policy}")
72
+ state = fully_shard.state(module)
73
+ state = _init_ignored_module_states(state, module, ignored_modules, ignored_states)
74
+ state = _init_device_handle(state, module, state._ignored_params, device_id)
75
+ _annotate_modules_for_dynamo(module, state._ignored_modules, True)
76
+ state = _init_process_group_state(state, process_group, strategy, policy)
77
+ if policy is not None:
78
+ root_kwargs = {
79
+ "process_group": process_group,
80
+ "strategy": strategy,
81
+ "mixed_precision": mixed_precision,
82
+ "cpu_offload": cpu_offload,
83
+ "ignored_modules": ignored_modules,
84
+ "device_id": device_id,
85
+ "param_init_fn": param_init_fn,
86
+ "sync_module_states": sync_module_states,
87
+ "forward_prefetch": forward_prefetch,
88
+ "ignored_states": ignored_states,
89
+ }
90
+ if strategy in HYBRID_SHARDING_STRATEGIES:
91
+ root_kwargs["process_group"] = (state.process_group, state._inter_node_pg)
92
+ _auto_wrap(
93
+ module,
94
+ policy,
95
+ state._ignored_modules,
96
+ state._ignored_params,
97
+ root_kwargs,
98
+ fully_shard,
99
+ )
100
+ state = _init_core_state(
101
+ state,
102
+ strategy or ShardingStrategy.FULL_SHARD,
103
+ mixed_precision,
104
+ cpu_offload,
105
+ limit_all_gathers=True,
106
+ use_orig_params=True,
107
+ backward_prefetch_limit=1,
108
+ forward_prefetch_limit=1,
109
+ )
110
+ state = _init_runtime_state(state)
111
+ state = _init_prefetching_state(
112
+ state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch
113
+ )
114
+ state = _init_buffer_state(state, module)
115
+ state = _init_param_handle_from_module(
116
+ state, module, device_id, param_init_fn, sync_module_states
117
+ )
118
+ state = _init_state_dict_state(state)
119
+ _register_all_state_dict_hooks(state)
120
+ _register_pre_forward_hook(state, module)
121
+ _register_post_forward_hook(state, module)
122
+ _register_root_pre_forward_hook(state, module) # prepend last
123
+ # Always insert the state for the passed-in module even if it has no
124
+ # managed parameters, in which case it has no handles and does not appear
125
+ # in `_fully_sharded_module_to_handles`
126
+ _insert_module_state(module, state)
127
+ for submodule in module.modules():
128
+ if (
129
+ submodule in state._fully_sharded_module_to_handle
130
+ and _get_module_state(submodule) is None
131
+ ):
132
+ _insert_module_state(submodule, state)
133
+ return module
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc ADDED
Binary file (25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc ADDED
Binary file (2.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (499 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc ADDED
Binary file (723 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Expiration timers are set up on the same process as the agent and
9
+ used from your script to deal with stuck workers. When you go into
10
+ a code-block that has the potential to get stuck you can acquire
11
+ an expiration timer, which instructs the timer server to kill the
12
+ process if it does not release the timer by the self-imposed expiration
13
+ deadline.
14
+
15
+ Usage::
16
+
17
+ import torchelastic.timer as timer
18
+ import torchelastic.agent.server as agent
19
+
20
+ def main():
21
+ start_method = "spawn"
22
+ message_queue = mp.get_context(start_method).Queue()
23
+ server = timer.LocalTimerServer(message, max_interval=0.01)
24
+ server.start() # non-blocking
25
+
26
+ spec = WorkerSpec(
27
+ fn=trainer_func,
28
+ args=(message_queue,),
29
+ ...<OTHER_PARAMS...>)
30
+ agent = agent.LocalElasticAgent(spec, start_method)
31
+ agent.run()
32
+
33
+ def trainer_func(message_queue):
34
+ timer.configure(timer.LocalTimerClient(message_queue))
35
+ with timer.expires(after=60): # 60 second expiry
36
+ # do some work
37
+
38
+ In the example above if ``trainer_func`` takes more than 60 seconds to
39
+ complete, then the worker process is killed and the agent retries the worker group.
40
+ """
41
+
42
+ from .api import TimerClient, TimerRequest, TimerServer, configure, expires # noqa: F401
43
+ from .local_timer import LocalTimerClient, LocalTimerServer # noqa: F401
44
+ from .file_based_local_timer import FileTimerClient, FileTimerServer, FileTimerRequest # noqa: F401
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import io
8
+ import json
9
+ import logging
10
+ import os
11
+ import select
12
+ import signal
13
+ import sys
14
+ import threading
15
+ import time
16
+ from typing import Callable, Dict, List, Optional, Set, Tuple
17
+
18
+ from torch.distributed.elastic.timer.api import TimerClient, TimerRequest
19
+
20
+ __all__ = ["FileTimerClient", "FileTimerRequest", "FileTimerServer"]
21
+
22
+ log = logging.getLogger(__name__)
23
+
24
+ class FileTimerRequest(TimerRequest):
25
+ """
26
+ Data object representing a countdown timer acquisition and release
27
+ that is used between the ``FileTimerClient`` and ``FileTimerServer``.
28
+ A negative ``expiration_time`` should be interpreted as a "release"
29
+ request.
30
+ ``signal`` is the signal to reap the worker process from the server
31
+ process.
32
+ """
33
+
34
+ __slots__ = ["version", "worker_pid", "scope_id", "expiration_time", "signal"]
35
+
36
+ def __init__(self, worker_pid: int, scope_id: str, expiration_time: float, signal: int = 0) -> None:
37
+ self.version = 1
38
+ self.worker_pid = worker_pid
39
+ self.scope_id = scope_id
40
+ self.expiration_time = expiration_time
41
+ self.signal = signal
42
+
43
+ def __eq__(self, other) -> bool:
44
+ if isinstance(other, FileTimerRequest):
45
+ return (
46
+ self.version == other.version
47
+ and self.worker_pid == other.worker_pid
48
+ and self.scope_id == other.scope_id
49
+ and self.expiration_time == other.expiration_time
50
+ and self.signal == other.signal
51
+ )
52
+ return False
53
+
54
+ def to_json(self) -> str:
55
+ return json.dumps(
56
+ {
57
+ "version": self.version,
58
+ "pid": self.worker_pid,
59
+ "scope_id": self.scope_id,
60
+ "expiration_time": self.expiration_time,
61
+ "signal": self.signal
62
+ },
63
+ )
64
+
65
+
66
+ class FileTimerClient(TimerClient):
67
+ """
68
+ Client side of ``FileTimerServer``. This client is meant to be used
69
+ on the same host that the ``FileTimerServer`` is running on and uses
70
+ pid to uniquely identify a worker.
71
+ This client uses a named_pipe to send timer requests to the
72
+ ``FileTimerServer``. This client is a producer while the
73
+ ``FileTimerServer`` is a consumer. Multiple clients can work with
74
+ the same ``FileTimerServer``.
75
+
76
+ Args:
77
+
78
+ file_path: str, the path of a FIFO special file. ``FileTimerServer``
79
+ must have created it by calling os.mkfifo().
80
+
81
+ signal: signal, the signal to use to kill the process. Using a
82
+ negative or zero signal will not kill the process.
83
+ """
84
+ def __init__(self, file_path: str, signal=(signal.SIGKILL if sys.platform != "win32" else
85
+ signal.CTRL_C_EVENT)) -> None: # type: ignore[attr-defined]
86
+ super().__init__()
87
+ self._file_path = file_path
88
+ self.signal = signal
89
+
90
+ def _open_non_blocking(self) -> Optional[io.TextIOWrapper]:
91
+ try:
92
+ fd = os.open(self._file_path, os.O_WRONLY | os.O_NONBLOCK)
93
+ return os.fdopen(fd, "wt")
94
+ except Exception:
95
+ return None
96
+
97
+ def _send_request(self, request: FileTimerRequest) -> None:
98
+ # The server may have crashed or may haven't started yet.
99
+ # In such case, calling open() in blocking model blocks the client.
100
+ # To avoid such issue, open it in non-blocking mode, and an OSError will
101
+ # be raised if the server is not there.
102
+ file = self._open_non_blocking()
103
+ if file is None:
104
+ raise BrokenPipeError("Could not send the FileTimerRequest because FileTimerServer is not available.")
105
+ with file:
106
+ json_request = request.to_json()
107
+ # Write request with no greater than select.PIPE_BUF is guarantee to be atomic.
108
+ if len(json_request) > select.PIPE_BUF:
109
+ raise RuntimeError(
110
+ f"FileTimerRequest larger than {select.PIPE_BUF} bytes "
111
+ f"is not supported: {json_request}"
112
+ )
113
+ file.write(json_request + "\n")
114
+
115
+ def acquire(self, scope_id: str, expiration_time: float) -> None:
116
+ self._send_request(
117
+ request=FileTimerRequest(
118
+ worker_pid=os.getpid(),
119
+ scope_id=scope_id,
120
+ expiration_time=expiration_time,
121
+ signal=self.signal
122
+ ),
123
+ )
124
+
125
+ def release(self, scope_id: str) -> None:
126
+ self._send_request(
127
+ request=FileTimerRequest(
128
+ worker_pid=os.getpid(),
129
+ scope_id=scope_id,
130
+ expiration_time=-1,
131
+ signal=0
132
+ ),
133
+ )
134
+
135
+
136
+ class FileTimerServer:
137
+ """
138
+ Server that works with ``FileTimerClient``. Clients are expected to be
139
+ running on the same host as the process that is running this server.
140
+ Each host in the job is expected to start its own timer server locally
141
+ and each server instance manages timers for local workers (running on
142
+ processes on the same host).
143
+
144
+ Args:
145
+
146
+ file_path: str, the path of a FIFO special file to be created.
147
+
148
+ max_interval: float, max interval in seconds for each watchdog loop.
149
+
150
+ daemon: bool, running the watchdog thread in daemon mode or not.
151
+ A daemon thread will not block a process to stop.
152
+ log_event: Callable[[Dict[str, str]], None], an optional callback for
153
+ logging the events in JSON format.
154
+ """
155
+
156
+ def __init__(
157
+ self,
158
+ file_path: str,
159
+ max_interval: float = 10,
160
+ daemon: bool = True,
161
+ log_event: Optional[Callable[[str, Optional[FileTimerRequest]], None]] = None
162
+ ) -> None:
163
+ self._file_path = file_path
164
+ self._max_interval = max_interval
165
+ self._daemon = daemon
166
+ self._timers: Dict[Tuple[int, str], FileTimerRequest] = {}
167
+ self._stop_signaled = False
168
+ self._watchdog_thread: Optional[threading.Thread] = None
169
+ if os.path.exists(self._file_path):
170
+ os.remove(self._file_path)
171
+ os.mkfifo(self._file_path)
172
+ # For test only. Count the number of requests received.
173
+ self._request_count = 0
174
+ # For test only. Process all requests and stop the server.
175
+ self._run_once = False
176
+ self._log_event = log_event if log_event is not None else lambda name, request: None
177
+
178
+
179
+ def start(self) -> None:
180
+ log.info(
181
+ "Starting %s..."
182
+ " max_interval=%s,"
183
+ " daemon=%s",
184
+ type(self).__name__, self._max_interval, self._daemon
185
+ )
186
+ self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=self._daemon)
187
+ log.info("Starting watchdog thread...")
188
+ self._watchdog_thread.start()
189
+ self._log_event("watchdog started", None)
190
+
191
+ def stop(self) -> None:
192
+ log.info("Stopping %s", type(self).__name__)
193
+ self._stop_signaled = True
194
+ if self._watchdog_thread:
195
+ log.info("Stopping watchdog thread...")
196
+ self._watchdog_thread.join(self._max_interval)
197
+ self._watchdog_thread = None
198
+ else:
199
+ log.info("No watchdog thread running, doing nothing")
200
+ if os.path.exists(self._file_path):
201
+ os.remove(self._file_path)
202
+ self._log_event("watchdog stopped", None)
203
+
204
+ def run_once(self) -> None:
205
+ self._run_once = True
206
+ if self._watchdog_thread:
207
+ log.info("Stopping watchdog thread...")
208
+ self._watchdog_thread.join()
209
+ self._watchdog_thread = None
210
+ else:
211
+ log.info("No watchdog thread running, doing nothing")
212
+ if os.path.exists(self._file_path):
213
+ os.remove(self._file_path)
214
+
215
+ def _watchdog_loop(self) -> None:
216
+ # Open the pipe in blocking mode blocks the server thread.
217
+ # This is fine for the following reasons:
218
+ # 1. No client case usually does not happen.
219
+ # 2. We are running the watchdog loop in a separate daemon
220
+ # thread, which will not block the process to stop.
221
+ with open(self._file_path) as fd:
222
+ while not self._stop_signaled:
223
+ try:
224
+ run_once = self._run_once
225
+ self._run_watchdog(fd)
226
+ if run_once:
227
+ break
228
+ except Exception:
229
+ log.exception("Error running watchdog")
230
+
231
+ def _run_watchdog(self, fd: io.TextIOWrapper) -> None:
232
+ timer_requests = self._get_requests(fd, self._max_interval)
233
+ self.register_timers(timer_requests)
234
+ now = time.time()
235
+ reaped_worker_pids = set()
236
+ for worker_pid, expired_timers in self.get_expired_timers(now).items():
237
+ log.info("Reaping worker_pid=[%s]. Expired timers: %s", worker_pid, self._get_scopes(expired_timers))
238
+ reaped_worker_pids.add(worker_pid)
239
+ # In case we have multiple expired timers, we find the first timer
240
+ # with a valid signal (>0) in the expiration time order.
241
+ expired_timers.sort(key=lambda timer: timer.expiration_time)
242
+ signal = 0
243
+ expired_timer = None
244
+ for timer in expired_timers:
245
+ self._log_event("timer expired", timer)
246
+ if timer.signal > 0:
247
+ signal = timer.signal
248
+ expired_timer = timer
249
+ break
250
+ if signal <= 0:
251
+ log.info("No signal specified with worker=[%s]. Do not reap it.", worker_pid)
252
+ continue
253
+ if self._reap_worker(worker_pid, signal):
254
+ log.info("Successfully reaped worker=[%s] with signal=%s", worker_pid, signal)
255
+ self._log_event("kill worker process", expired_timer)
256
+ else:
257
+ log.error("Error reaping worker=[%s]. Will retry on next watchdog.", worker_pid)
258
+ self.clear_timers(reaped_worker_pids)
259
+
260
+ def _get_scopes(self, timer_requests: List[FileTimerRequest]) -> List[str]:
261
+ return [r.scope_id for r in timer_requests]
262
+
263
+ def _get_requests(self, fd: io.TextIOWrapper, max_interval: float) -> List[FileTimerRequest]:
264
+ start = time.time()
265
+ requests = []
266
+ while not self._stop_signaled or self._run_once:
267
+ # For named pipe, readline() is blocking when at least one writer opens.
268
+ # It returns only when flush() is called at the writer side.
269
+ # Note that flush() is automatically called inside close().
270
+ # After the last writer closes, readline() is not blocking.
271
+ # It will return an empty string when it's at end-of-file.
272
+ # Since the client side always opens the pipe, writes a message and closes
273
+ # the pipe immediately, the readline() call below is not blocking for long.
274
+ json_request = fd.readline()
275
+ if len(json_request) == 0:
276
+ if self._run_once:
277
+ break
278
+ time.sleep(min(max_interval, 1))
279
+ else:
280
+ request = json.loads(json_request)
281
+ pid = request["pid"]
282
+ scope_id = request["scope_id"]
283
+ expiration_time = request["expiration_time"]
284
+ signal = request["signal"]
285
+ requests.append(
286
+ FileTimerRequest(
287
+ worker_pid=pid, scope_id=scope_id, expiration_time=expiration_time, signal=signal
288
+ )
289
+ )
290
+ now = time.time()
291
+ if now - start > max_interval:
292
+ break
293
+ return requests
294
+
295
+ def register_timers(self, timer_requests: List[FileTimerRequest]) -> None:
296
+ for request in timer_requests:
297
+ pid = request.worker_pid
298
+ scope_id = request.scope_id
299
+ expiration_time = request.expiration_time
300
+ self._request_count += 1
301
+
302
+ key = (pid, scope_id)
303
+ # negative expiration is a proxy for a release call
304
+ if expiration_time < 0:
305
+ if key in self._timers:
306
+ del self._timers[key]
307
+ else:
308
+ self._timers[key] = request
309
+
310
+ def clear_timers(self, worker_pids: Set[int]) -> None:
311
+ for (pid, scope_id) in list(self._timers.keys()):
312
+ if pid in worker_pids:
313
+ del self._timers[(pid, scope_id)]
314
+
315
+ def get_expired_timers(self, deadline: float) -> Dict[int, List[FileTimerRequest]]:
316
+ # pid -> [timer_requests...]
317
+ expired_timers: Dict[int, List[FileTimerRequest]] = {}
318
+ for request in self._timers.values():
319
+ if request.expiration_time <= deadline:
320
+ expired_scopes = expired_timers.setdefault(request.worker_pid, [])
321
+ expired_scopes.append(request)
322
+ return expired_timers
323
+
324
+ def _reap_worker(self, worker_pid: int, signal: int) -> bool:
325
+ try:
326
+ os.kill(worker_pid, signal)
327
+ return True
328
+ except ProcessLookupError:
329
+ log.info("Process with pid=%s does not exist. Skipping", worker_pid)
330
+ return True
331
+ except Exception:
332
+ log.exception("Error terminating pid=%s", worker_pid)
333
+ return False
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ import logging
7
+ import multiprocessing as mp
8
+ import os
9
+ import signal
10
+ import time
11
+ from queue import Empty
12
+ from typing import Any, Dict, List, Set, Tuple
13
+
14
+ from .api import RequestQueue, TimerClient, TimerRequest, TimerServer
15
+
16
+ __all__ = ['LocalTimerClient', 'MultiprocessingRequestQueue', 'LocalTimerServer']
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+ class LocalTimerClient(TimerClient):
21
+ """
22
+ Client side of ``LocalTimerServer``. This client is meant to be used
23
+ on the same host that the ``LocalTimerServer`` is running on and uses
24
+ pid to uniquely identify a worker. This is particularly useful in situations
25
+ where one spawns a subprocess (trainer) per GPU on a host with multiple
26
+ GPU devices.
27
+ """
28
+
29
+ def __init__(self, mp_queue):
30
+ super().__init__()
31
+ self._mp_queue = mp_queue
32
+
33
+ def acquire(self, scope_id, expiration_time):
34
+ pid = os.getpid()
35
+ acquire_request = TimerRequest(pid, scope_id, expiration_time)
36
+ self._mp_queue.put(acquire_request)
37
+
38
+ def release(self, scope_id):
39
+ pid = os.getpid()
40
+ release_request = TimerRequest(pid, scope_id, -1)
41
+ self._mp_queue.put(release_request)
42
+
43
+
44
+ class MultiprocessingRequestQueue(RequestQueue):
45
+ """
46
+ A ``RequestQueue`` backed by python ``multiprocessing.Queue``
47
+ """
48
+
49
+ def __init__(self, mp_queue: mp.Queue):
50
+ super().__init__()
51
+ self._mp_queue = mp_queue
52
+
53
+ def size(self) -> int:
54
+ return self._mp_queue.qsize()
55
+
56
+ def get(self, size, timeout: float) -> List[TimerRequest]:
57
+ requests = []
58
+ wait = timeout
59
+ for _ in range(0, size):
60
+ start = time.time()
61
+
62
+ try:
63
+ r = self._mp_queue.get(block=True, timeout=wait)
64
+ except Empty:
65
+ break
66
+
67
+ requests.append(r)
68
+ wait = wait - (time.time() - start)
69
+ if wait <= 0:
70
+ break
71
+
72
+ return requests
73
+
74
+
75
+ class LocalTimerServer(TimerServer):
76
+ """
77
+ Server that works with ``LocalTimerClient``. Clients are expected to be
78
+ subprocesses to the parent process that is running this server. Each host
79
+ in the job is expected to start its own timer server locally and each
80
+ server instance manages timers for local workers (running on processes
81
+ on the same host).
82
+ """
83
+
84
+ def __init__(
85
+ self, mp_queue: mp.Queue, max_interval: float = 60, daemon: bool = True
86
+ ):
87
+ super().__init__(MultiprocessingRequestQueue(mp_queue), max_interval, daemon)
88
+ self._timers: Dict[Tuple[Any, str], TimerRequest] = {}
89
+
90
+ def register_timers(self, timer_requests: List[TimerRequest]) -> None:
91
+ for request in timer_requests:
92
+ pid = request.worker_id
93
+ scope_id = request.scope_id
94
+ expiration_time = request.expiration_time
95
+
96
+ # negative expiration is a proxy for a release call
97
+ if expiration_time < 0:
98
+ self._timers.pop((pid, scope_id), None)
99
+ else:
100
+ self._timers[(pid, scope_id)] = request
101
+
102
+ def clear_timers(self, worker_ids: Set[int]) -> None:
103
+ for (pid, scope_id) in list(self._timers.keys()):
104
+ if pid in worker_ids:
105
+ self._timers.pop((pid, scope_id))
106
+
107
+ def get_expired_timers(self, deadline: float) -> Dict[Any, List[TimerRequest]]:
108
+ # pid -> [timer_requests...]
109
+ expired_timers: Dict[Any, List[TimerRequest]] = {}
110
+ for request in self._timers.values():
111
+ if request.expiration_time <= deadline:
112
+ expired_scopes = expired_timers.setdefault(request.worker_id, [])
113
+ expired_scopes.append(request)
114
+ return expired_timers
115
+
116
+ def _reap_worker(self, worker_id: int) -> bool:
117
+ try:
118
+ os.kill(worker_id, signal.SIGKILL)
119
+ return True
120
+ except ProcessLookupError:
121
+ log.info("Process with pid=%s does not exist. Skipping", worker_id)
122
+ return True
123
+ except Exception:
124
+ log.exception("Error terminating pid=%s", worker_id)
125
+ return False
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (462 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc ADDED
Binary file (7.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (6.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc ADDED
Binary file (4.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """A helper to roughly balance a sequential module.
8
+
9
+ Usage::
10
+
11
+ import torch
12
+ from torch.distributed.pipeline.sync import Pipe
13
+ from torch.distributed.pipeline.sync.balance import balance_by_time
14
+
15
+ sample = torch.empty(128, 3, 224, 224)
16
+ balance = balance_by_time(torch.cuda.device_count(), model, sample)
17
+
18
+ pipe = Pipe(model, balance, chunks=8)
19
+
20
+ """
21
+ from typing import Any, List, Union, Sequence
22
+
23
+ import torch
24
+ from torch import Tensor
25
+ import torch.nn as nn
26
+
27
+ from . import blockpartition
28
+ from .profile import profile_sizes, profile_times
29
+
30
+ __all__ = ["balance_by_time", "balance_by_size"]
31
+
32
+
33
+ Device = Union[torch.device, int, str]
34
+
35
+ Tensors = Sequence[Tensor]
36
+ TensorOrTensors = Union[Tensor, Tensors]
37
+
38
+
39
+ def balance_cost(cost: List[int], partitions: int) -> List[int]:
40
+ partitioned = blockpartition.solve(cost, partitions)
41
+ return [len(p) for p in partitioned]
42
+
43
+
44
+ def balance_by_time(
45
+ partitions: int,
46
+ module: nn.Sequential,
47
+ sample: Union[List[Any], Tensor],
48
+ *,
49
+ timeout: float = 1.0,
50
+ device: Device = torch.device("cuda"),
51
+ ) -> List[int]:
52
+ """Naive automatic balancing by elapsed time per layer.
53
+ ::
54
+
55
+ sample = torch.empty(128, 3, 224, 224)
56
+ balance = balance_by_time(torch.cuda.device_count(), model, sample)
57
+ pipe = Pipe(model, balance, chunks=8)
58
+
59
+ Args:
60
+ partitions (int):
61
+ intended number of partitions
62
+ module (torch.nn.Sequential):
63
+ sequential module to be partitioned
64
+ sample (torch.Tensor):
65
+ example input with arbitrary batch size
66
+
67
+ Keyword Args:
68
+ timeout (float):
69
+ profiling iterates again if the timeout (in second) is not exceeded
70
+ (default: ``1.0``)
71
+ device ('cpu' or 'cuda' device):
72
+ CPU or CUDA device where each layer is profiled (default: the
73
+ current CUDA device)
74
+
75
+ Returns:
76
+ A list of number of layers in each partition. Use it for the `balance`
77
+ parameter of :class:`~torchpipe.Pipe`.
78
+
79
+ .. note::
80
+ `module` and `sample` must be placed on the same device.
81
+
82
+ """
83
+ times = profile_times(module, sample, timeout, torch.device(device))
84
+ return balance_cost(times, partitions)
85
+
86
+
87
+ def balance_by_size(
88
+ partitions: int,
89
+ module: nn.Sequential,
90
+ input: Union[List[Any], Tensor],
91
+ *,
92
+ chunks: int = 1,
93
+ param_scale: float = 2.0,
94
+ device: Device = torch.device("cuda"),
95
+ ) -> List[int]:
96
+ """Naive automatic balancing by CUDA memory usage per layer.
97
+
98
+ During training, required memory for parameters depends on which optimizer
99
+ is used. Optimizers may use buffers for each parameter to track
100
+ optimization statistics internally, such as momentum buffer in SGD.
101
+
102
+ To get more reliable size based balance, you should specify `param_scale`
103
+ with regard to your optimizer. The default `param_scale` is 2 instead of 1
104
+ due to gradient accumulation which is necessary for every optimizer.
105
+
106
+ Follow this guide to choose correct `param_scale` for typical optimizers:
107
+
108
+ ========= ============= =========================================
109
+ Optimizer `param_scale` Internal State
110
+ ========= ============= =========================================
111
+ SGD 2--3 (momentum_buffer)
112
+ Adam 4--5 exp_avg, exp_avg_sq, (max_exp_avg_sq)
113
+ Adadelta 4 square_avg, acc_delta
114
+ Adagrad 3 sum
115
+ RMSprop 3--5 square_avg, (momentum_buffer), (grad_avg)
116
+ ========= ============= =========================================
117
+
118
+ Here's a simple example with the Adam optimizer::
119
+
120
+ balance = balance_by_size(
121
+ torch.cuda.device_count(),
122
+ model,
123
+
124
+ # Same size with mini-batch to train
125
+ torch.empty(1024, 3, 224, 224),
126
+
127
+ # Number of micro-batches to train with Pipe
128
+ chunks=8,
129
+
130
+ # 4 for Adam
131
+ param_scale=4.0,
132
+ )
133
+
134
+ pipe = Pipe(model, balance, chunks=8)
135
+ adam = Adam(pipe.parameters())
136
+
137
+ Args:
138
+ partitions (int):
139
+ intended number of partitions
140
+ module (torch.nn.Sequential):
141
+ sequential module to be partitioned
142
+ input (torch.Tensor):
143
+ example mini-batch with the same size to train
144
+
145
+ Keyword Args:
146
+ chunks (int):
147
+ number of micro-batches will be used to train (default: ``1``)
148
+ param_scale (float):
149
+ how many copies of parameters would be allocated for training. It
150
+ depends on optimizer. See the above guide. (default: ``2.0``)
151
+ device ('cuda' device):
152
+ CUDA device where each layer is profiled (default: the current CUDA
153
+ device)
154
+
155
+ Returns:
156
+ A list of number of layers in each partition. Use it for the `balance`
157
+ parameter of :class:`~torchpipe.Pipe`.
158
+
159
+ .. note::
160
+ `module` and `input` must be placed on the same CUDA device.
161
+
162
+ """
163
+ sizes = profile_sizes(module, input, chunks, param_scale, torch.device(device))
164
+ return balance_cost(sizes, partitions)
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """Implements "Block Partitions of Sequences" by Imre Bárány et al.
8
+
9
+ Paper: https://arxiv.org/pdf/1308.2452.pdf
10
+
11
+ """
12
+ from typing import Iterator, List, Tuple
13
+
14
+ __all__ = ["solve"]
15
+
16
+
17
+ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
18
+ """Splits a sequence into several partitions to minimize variance for each
19
+ partition.
20
+
21
+ The result might not be optimal. However, it can be done only in O(kn³),
22
+ where k is the number of partitions and n is the length of the sequence.
23
+
24
+ """
25
+ if partitions < 1:
26
+ raise ValueError(f"partitions must be a positive integer ({partitions} < 1)")
27
+
28
+ n = len(sequence)
29
+ if n < partitions:
30
+ raise ValueError(f"sequence is shorter than intended partitions ({n} < {partitions})")
31
+
32
+ # Normalize the sequence in [0, 1].
33
+ minimum = min(sequence)
34
+ maximum = max(sequence) - minimum
35
+
36
+ normal_sequence: List[float]
37
+ if maximum == 0:
38
+ normal_sequence = [0 for _ in sequence]
39
+ else:
40
+ normal_sequence = [(x - minimum) / maximum for x in sequence]
41
+
42
+ splits = [n // partitions * (x + 1) for x in range(partitions - 1)] + [n]
43
+
44
+ def block_size(i: int) -> float:
45
+ start = splits[i - 1] if i > 0 else 0
46
+ stop = splits[i]
47
+ return sum(normal_sequence[start:stop])
48
+
49
+ def leaderboard() -> Iterator[Tuple[float, int]]:
50
+ return ((block_size(i), i) for i in range(partitions))
51
+
52
+ while True:
53
+ """
54
+ (1) Fix p ∈ [k] with M(P) = bp. So Bp is a maximal block of P.
55
+ """
56
+ # max_size: M(P)
57
+ max_size, p = max(leaderboard())
58
+
59
+ while True:
60
+ """
61
+ (2) If M(P) ≤ m(P) + 1, then stop.
62
+ """
63
+ # min_size: m(P)
64
+ min_size, q = min(leaderboard())
65
+
66
+ if max_size <= min_size + 1:
67
+ return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)]
68
+
69
+ """
70
+ (3) If M(P) > m(P) + 1, then let m(P) = bq for the q ∈ [k] which is
71
+ closest to p (ties broken arbitrarily). Thus Bq is a minimal block
72
+ of P. Let Bh be the block next to Bq between Bp and Bq. (Note that
73
+ Bh is a non-empty block: if it were, then m(P) = 0 and we should
74
+ have chosen Bh instead of Bq.)
75
+ """
76
+ if p < q:
77
+ """
78
+ So either p < q and then h = q−1 and we define P ∗ by moving
79
+ the last element from Bh = Bq−1 to Bq,
80
+ """
81
+ h = q - 1
82
+ splits[h] -= 1
83
+ else:
84
+ """
85
+ or q < p, and then h = q + 1 and P ∗ is obtained by moving the
86
+ first element of Bh = Bq+1 to Bq.
87
+ """
88
+ h = q + 1
89
+ splits[q] += 1
90
+
91
+ """
92
+ Set P = P ∗ . If p = h, then go to (1), else go to (2).
93
+ """
94
+ if p == h:
95
+ break
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """Per-layer profilers."""
8
+ import copy
9
+ import time
10
+ from typing import Any, Generator, List, Union, Sequence
11
+
12
+ import torch
13
+ from torch import Tensor
14
+ import torch.nn as nn
15
+
16
+ from ..microbatch import Batch
17
+
18
+ __all__: List[str] = []
19
+
20
+
21
+ Device = Union[torch.device, int, str]
22
+
23
+ Tensors = Sequence[Tensor]
24
+ TensorOrTensors = Union[Tensor, Tensors]
25
+
26
+
27
+ def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]:
28
+ """Copies layers for ease to profile. It doesn't modify the given
29
+ module.
30
+ """
31
+ for layer in module:
32
+ layer_copy = copy.deepcopy(layer)
33
+ layer_copy.to(device)
34
+ layer_copy.train()
35
+ yield layer_copy
36
+
37
+
38
+ def detach(batch: Batch) -> None:
39
+ """Detaches from autograd graph."""
40
+ for i, x in enumerate(batch):
41
+ batch[i] = x.detach().requires_grad_(x.requires_grad)
42
+
43
+
44
+ def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]:
45
+ """Profiles elapsed times per layer."""
46
+ if any(p.grad is not None for p in module.parameters()):
47
+ raise ValueError("some parameter already has gradient")
48
+
49
+ _batch = Batch(sample)
50
+ for i, x in enumerate(_batch):
51
+ _batch[i] = x.detach().to(device).requires_grad_(x.requires_grad)
52
+
53
+ time_bufs: List[List[float]] = [[] for _ in module]
54
+ begun_at = time.time()
55
+
56
+ while time.time() - begun_at < timeout:
57
+ batch = _batch
58
+
59
+ for i, layer in enumerate(layerwise_sandbox(module, device)):
60
+ detach(batch)
61
+
62
+ if device.type == "cuda":
63
+ torch.cuda.synchronize(device)
64
+ tick = time.time()
65
+
66
+ # Forward
67
+ batch = batch.call(layer)
68
+
69
+ # Backward
70
+ backward_tensors = tuple(y for y in batch if y.requires_grad)
71
+ if backward_tensors:
72
+ torch.autograd.backward(backward_tensors, backward_tensors)
73
+
74
+ if device.type == "cuda":
75
+ torch.cuda.synchronize(device)
76
+ tock = time.time()
77
+
78
+ time_bufs[i].append(tock - tick)
79
+
80
+ us = 1_000_000
81
+ return [sum(int(t * us) for t in buf) for buf in time_bufs]
82
+
83
+
84
+ def profile_sizes(
85
+ module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device,
86
+ ) -> List[int]:
87
+ """Profiles CUDA memory usage per layer."""
88
+ if device.type != "cuda":
89
+ raise ValueError("size profiler supports only CUDA device")
90
+
91
+ batch = Batch(input)
92
+ sizes: List[int] = []
93
+
94
+ latent_scale = batch[0].size(0) / chunks
95
+ for i, x in enumerate(batch):
96
+ batch[i] = x[:1].detach().to(device).requires_grad_(x.requires_grad)
97
+
98
+ for layer in layerwise_sandbox(module, device):
99
+ detach(batch)
100
+
101
+ # Detect memory usage at forward.
102
+ torch._C._cuda_clearCublasWorkspaces()
103
+ memory_before = torch.cuda.memory_allocated(device)
104
+ batch = batch.call(layer)
105
+ torch._C._cuda_clearCublasWorkspaces()
106
+ memory_after = torch.cuda.memory_allocated(device)
107
+ latent_size = memory_after - memory_before
108
+
109
+ # Analyze size of parameters.
110
+ param_size = sum(p._typed_storage()._nbytes() for p in layer.parameters())
111
+
112
+ # Combine size of parameters and activations with normalize scales.
113
+ size = latent_size * latent_scale + param_size * param_scale
114
+ sizes.append(int(size))
115
+
116
+ return sizes
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """Tracks the running statistics per mini-batch instead of micro-batch."""
8
+ from typing import TypeVar, cast
9
+
10
+ import torch
11
+ from torch import Tensor, nn
12
+ from torch.nn.functional import batch_norm
13
+ from torch.nn.modules.batchnorm import _BatchNorm
14
+
15
+ from .checkpoint import is_recomputing
16
+
17
+ __all__ = ["DeferredBatchNorm"]
18
+
19
+
20
+ TModule = TypeVar("TModule", bound=nn.Module)
21
+
22
+
23
+ class DeferredBatchNorm(_BatchNorm):
24
+ """A BatchNorm layer tracks multiple micro-batches to update running statistics per mini-batch."""
25
+
26
+ sum: Tensor
27
+ sum_squares: Tensor
28
+ running_mean: Tensor
29
+ running_var: Tensor
30
+ num_batches_tracked: Tensor
31
+
32
+ def __init__(
33
+ self,
34
+ num_features: int,
35
+ eps: float = 1e-5,
36
+ momentum: float = 0.1,
37
+ affine: bool = True,
38
+ chunks: int = 1,
39
+ ) -> None:
40
+ super().__init__(num_features, eps, momentum, affine, track_running_stats=True)
41
+
42
+ self.register_buffer("sum", torch.zeros_like(self.running_mean))
43
+ self.register_buffer("sum_squares", torch.zeros_like(self.running_var))
44
+
45
+ self.counter = 0
46
+ self.tracked = 0
47
+ self.chunks = chunks
48
+
49
+ def _check_input_dim(self, input: Tensor) -> None:
50
+ # It's the typical _check_input_dim() implementation in PyTorch.
51
+ if input.dim() <= 2:
52
+ raise ValueError("expected at least 3D input (got %dD input)" % input.dim())
53
+
54
+ def _track(self, input: Tensor) -> bool:
55
+ """Tracks statistics of a micro-batch."""
56
+ # Dimensions except channel. For example, (0, 2, 3) is for BatchNorm2d.
57
+ dim = [0]
58
+ dim.extend(range(2, input.dim()))
59
+
60
+ with torch.no_grad():
61
+ self.sum += input.sum(dim)
62
+ self.sum_squares += (input ** 2).sum(dim)
63
+
64
+ size = input.size().numel() // input.size(1)
65
+ self.counter += size
66
+ self.tracked += 1
67
+
68
+ return self.tracked == self.chunks
69
+
70
+ def _commit(self) -> None:
71
+ """Update the running statistics of a mini-batch."""
72
+ exponential_average_factor = 0.0
73
+ self.num_batches_tracked += 1
74
+ if self.momentum is None: # use cumulative moving average
75
+ exponential_average_factor = 1.0 / float(self.num_batches_tracked)
76
+ else: # use exponential moving average
77
+ exponential_average_factor = self.momentum
78
+
79
+ mean = self.sum / self.counter
80
+ var = self.sum_squares / self.counter - mean ** 2
81
+
82
+ # Calculate the exponential moving average here.
83
+ m = exponential_average_factor
84
+
85
+ self.running_mean *= 1 - m
86
+ self.running_mean += mean * m
87
+
88
+ self.running_var *= 1 - m
89
+ self.running_var += var * m
90
+
91
+ self.sum.zero_()
92
+ self.sum_squares.zero_()
93
+ self.counter = 0
94
+ self.tracked = 0
95
+
96
+ def forward(self, input: Tensor) -> Tensor:
97
+ if not self.training:
98
+ # Don't train parameters on the evaluation mode.
99
+ return batch_norm(
100
+ input,
101
+ running_mean=self.running_mean,
102
+ running_var=self.running_var,
103
+ weight=self.weight,
104
+ bias=self.bias,
105
+ training=False,
106
+ momentum=0.0,
107
+ eps=self.eps,
108
+ )
109
+
110
+ if not is_recomputing():
111
+ # Track a micro-batch on the training mode
112
+ # but not under a recomputation.
113
+ tracked_enough = self._track(input)
114
+
115
+ # Update the running statistics for a mini-batch
116
+ # if it has tracked enough micro-batches.
117
+ if tracked_enough:
118
+ self._commit()
119
+
120
+ # Normalize a micro-batch and train the parameters.
121
+ return batch_norm(
122
+ input,
123
+ running_mean=None,
124
+ running_var=None,
125
+ weight=self.weight,
126
+ bias=self.bias,
127
+ training=True,
128
+ momentum=0.0,
129
+ eps=self.eps,
130
+ )
131
+
132
+ @classmethod
133
+ def convert_deferred_batch_norm(cls, module: TModule, chunks: int = 1) -> TModule:
134
+ """Converts a :class:`nn.BatchNorm` or underlying :class:`nn.BatchNorm`s into :class:`DeferredBatchNorm`::
135
+
136
+ from torchvision.models.resnet import resnet101
137
+ from torchpipe.batchnorm import DeferredBatchNorm
138
+ model = resnet101()
139
+ model = DeferredBatchNorm.convert_deferred_batch_norm(model)
140
+
141
+ """
142
+ if isinstance(module, DeferredBatchNorm) and module.chunks is chunks:
143
+ return cast(TModule, module)
144
+
145
+ module_output: nn.Module = module
146
+
147
+ if isinstance(module, _BatchNorm) and module.track_running_stats:
148
+ module_output = DeferredBatchNorm(module.num_features, module.eps, module.momentum, module.affine, chunks)
149
+ if module.affine:
150
+ module_output.register_parameter("weight", module.weight)
151
+ module_output.register_parameter("bias", module.bias)
152
+ module_output.register_buffer("running_mean", module.running_mean)
153
+ module_output.register_buffer("running_var", module.running_var)
154
+ module_output.register_buffer("num_batches_tracked", module.num_batches_tracked)
155
+
156
+ for name, child in module.named_children():
157
+ module_output.add_module(name, cls.convert_deferred_batch_norm(child, chunks))
158
+
159
+ return cast(TModule, module_output)
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """Autograd functions for stream-aware CUDA copy.
8
+
9
+ It is used to overlap copy and computation on the same GPU.
10
+ """
11
+ from collections import deque
12
+ from typing import Deque, List, Optional, Tuple, Sequence
13
+
14
+ import torch
15
+ from torch import Tensor
16
+
17
+ from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream
18
+
19
+ __all__: List[str] = ["Context", "Copy", "Wait"]
20
+
21
+
22
+ Tensors = Sequence[Tensor]
23
+
24
+
25
+ # Common interface between :class:`Copy` and :class:`Wait`.
26
+ class Context:
27
+ prev_stream: AbstractStream
28
+ next_stream: AbstractStream
29
+
30
+
31
+ class Copy(torch.autograd.Function):
32
+ """Copies tensors on specific streams."""
33
+
34
+ @staticmethod
35
+ # type: ignore[override]
36
+ def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input,) -> Tensors:
37
+ ctx.prev_stream = prev_stream
38
+ ctx.next_stream = next_stream
39
+
40
+ output = []
41
+ output_stream = current_stream(get_device(next_stream))
42
+
43
+ with use_stream(prev_stream), use_stream(next_stream):
44
+ for x in input:
45
+ if torch.is_tensor(x):
46
+ y = x.to(get_device(next_stream), non_blocking=True)
47
+ output.append(y)
48
+
49
+ # 'prev_stream' is not where 'x' has been allocated.
50
+ record_stream(x, prev_stream)
51
+ # 'y' has been allocated on 'next_stream'.
52
+ # It might be used on the current stream captured as 'output_stream'.
53
+ record_stream(y, output_stream)
54
+ else:
55
+ output.append(x)
56
+
57
+ return tuple(output)
58
+
59
+ @staticmethod
60
+ def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]:
61
+ prev_stream = ctx.prev_stream
62
+ next_stream = ctx.next_stream
63
+
64
+ grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
65
+ input_stream = current_stream(get_device(prev_stream))
66
+
67
+ with use_stream(prev_stream), use_stream(next_stream):
68
+ for x in reversed(grad_output):
69
+ y = x.to(get_device(prev_stream), non_blocking=True)
70
+ grad_input.appendleft(y)
71
+
72
+ # 'next_stream' is not where 'x' has been allocated.
73
+ record_stream(x, next_stream)
74
+ # 'y' has been allocated on 'prev_stream'.
75
+ # It might be used on the current stream captured as 'input_stream'.
76
+ record_stream(y, input_stream)
77
+
78
+ grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
79
+ return grad_streams + tuple(grad_input)
80
+
81
+
82
+ class Wait(torch.autograd.Function):
83
+ """Synchronizes a stream to another stream.
84
+
85
+ Place it just before you want to start an operation on the next stream,
86
+ provided that all operations on the previous stream are done.
87
+
88
+ """
89
+
90
+ @staticmethod
91
+ # type: ignore[override]
92
+ def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input) -> Tensors:
93
+ ctx.prev_stream = prev_stream
94
+ ctx.next_stream = next_stream
95
+
96
+ wait_stream(next_stream, prev_stream)
97
+
98
+ return tuple(x.detach() if torch.is_tensor(x) else x for x in input)
99
+
100
+ @staticmethod
101
+ def backward(ctx: Context, *grad_input: Tensor,) -> Tuple[Optional[Tensor], ...]:
102
+ prev_stream = ctx.prev_stream
103
+ next_stream = ctx.next_stream
104
+
105
+ wait_stream(prev_stream, next_stream)
106
+
107
+ grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
108
+ return grad_streams + grad_input
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """The Pipe interface."""
8
+ from collections import OrderedDict
9
+ from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Union, Sequence, Tuple, cast
10
+
11
+ import torch
12
+ from torch import Tensor, nn
13
+ from torch.distributed.rpc import RRef
14
+ import torch.autograd
15
+ import torch.cuda
16
+
17
+ from . import microbatch
18
+ from .batchnorm import DeferredBatchNorm
19
+ from .pipeline import Pipeline
20
+ from .skip.layout import inspect_skip_layout
21
+ from .skip.skippable import verify_skippables
22
+ from .stream import AbstractStream, new_stream
23
+
24
+ __all__ = ["Pipe", "BalanceError", "PipeSequential", "WithDevice"]
25
+
26
+
27
+ Device = Union[torch.device, int, str]
28
+ Devices = Union[Iterable[Device], List[Device]]
29
+
30
+ Tensors = Sequence[Tensor]
31
+ TensorOrTensors = Union[Tensor, Tensors]
32
+
33
+ if TYPE_CHECKING:
34
+ # Typechecking: nn.Module is not a Generic
35
+ Module = nn.Module[TensorOrTensors] # type: ignore[type-arg]
36
+ NamedModules = OrderedDict[str, Module]
37
+ else:
38
+ Module = nn.Module
39
+ NamedModules = OrderedDict
40
+
41
+
42
+ def _recommend_auto_balance(message: str) -> str:
43
+ """Expands a message with recommendation to :mod:`torchpipe.balance`."""
44
+ return f"""{message}
45
+
46
+ If your model is still under development, its optimal balance would change
47
+ frequently. In this case, we highly recommend 'torch.distributed.pipeline.sync.balance' for
48
+ naive automatic balancing:
49
+
50
+ from torch.distributed.pipeline.sync import Pipe
51
+ from torch.distributed.pipeline.sync.balance import balance_by_time
52
+
53
+ partitions = torch.cuda.device_count()
54
+ sample = torch.empty(...)
55
+ balance = balance_by_time(partitions, model, sample)
56
+
57
+ model = Pipe(model, balance, ...)
58
+ """
59
+
60
+
61
+ def _verify_module(module: nn.Sequential) -> None:
62
+ if not isinstance(module, nn.Sequential):
63
+ raise TypeError("module must be nn.Sequential to be partitioned")
64
+
65
+ named_children = list(module.named_children())
66
+ if len(named_children) != len(module):
67
+ raise ValueError("module with duplicate children is not supported")
68
+
69
+
70
+ def _verify_splitting(
71
+ module: nn.Sequential, partitions: List[nn.Sequential], devices: List[torch.device]
72
+ ) -> None:
73
+ num_parameters = len(list(module.parameters()))
74
+ num_child_parameters = sum(len(list(child.parameters())) for child in module.children())
75
+ if num_parameters == num_child_parameters:
76
+ return
77
+
78
+ for i in range(len(partitions)):
79
+ for j in range(i + 1, len(partitions)):
80
+ parti = partitions[i]
81
+ partj = partitions[j]
82
+ if devices[i] == devices[j]:
83
+ continue
84
+ for p in parti.parameters():
85
+ for q in partj.parameters():
86
+ if p is q:
87
+ raise ValueError("module with duplicate parameters on distinct devices is not supported")
88
+
89
+
90
+ class BalanceError(ValueError):
91
+ pass
92
+
93
+
94
+ def _retrieve_device(module: nn.Module) -> torch.device:
95
+ """Validates all parameters in the Module have the same device and returns
96
+ the appropriate device.
97
+
98
+ Args:
99
+ An ``nn.Module`` to process.
100
+
101
+ Returns:
102
+ ``torch.Device`` for the entire module.
103
+
104
+ Raises:
105
+ ValueError:
106
+ If devices for ``nn.Module`` parameters are not all same.
107
+ """
108
+
109
+ device = None
110
+ for parameter in module.parameters():
111
+ if device is None:
112
+ device = parameter.device
113
+ elif device != parameter.device:
114
+ raise ValueError(
115
+ f'nn.Module: {module}, should have all parameters on a single device,'
116
+ ' please use .to() to place the module on a single device')
117
+
118
+ return device if device is not None else torch.device("cpu")
119
+
120
+
121
+ class PipeSequential(nn.Sequential):
122
+ """
123
+ Pipe variant of ``nn.Sequential`` which supports multiple inputs.
124
+ """
125
+
126
+ def forward(self, *inputs):
127
+ for module in self:
128
+ if isinstance(inputs, Tuple): # type: ignore[arg-type]
129
+ inputs = module(*inputs)
130
+ else:
131
+ # Don't expand single variables (ex: lists/Tensor)
132
+ inputs = module(inputs)
133
+ return inputs
134
+
135
+
136
+ class WithDevice(nn.Module):
137
+ """
138
+ Wraps an ``nn.Module`` which is part of ``nn.Sequential`` passed into :class:`Pipe`
139
+ that overrides the device for that module. In cases where :class:`Pipe`
140
+ can't implicitly determine the device for the module and places it on CPU,
141
+ this wrapper can be used to override the implicit behavior and explicitly
142
+ specify which device a module should run on.
143
+
144
+ The provided module is also moved to the given device via ``.to(device)``
145
+ by :class:`Pipe`
146
+
147
+ Args:
148
+ module(:class:`torch.nn.Module`): The module to be wrapped.
149
+ device(:class:`torch.device`): The device to run the module on.
150
+
151
+ Example::
152
+ >>> # xdoctest: +SKIP("distributed")
153
+ >>> fc1 = nn.Linear(16, 8).cuda(0)
154
+ >>> fc2 = nn.Linear(8, 4).cuda(1)
155
+ >>> dropout = nn.Dropout()
156
+ >>>
157
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
158
+ >>> # Dropout does not have any parameters/buffers, but we want to
159
+ >>> # run it on cuda:1 to avoid any GPU to CPU transfers.
160
+ >>> model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
161
+ >>> # xdoctest: +SKIP("Needs RPC framework init")
162
+ >>> model = Pipe(model, chunks=8)
163
+ """
164
+ def __init__(self, module: nn.Module, device: torch.device):
165
+ super().__init__()
166
+ self._module = module
167
+ self._device = torch.device(device)
168
+
169
+ def forward(self, *args, **kwargs):
170
+ return self._module(*args, **kwargs)
171
+
172
+ @property
173
+ def module(self):
174
+ return self._module
175
+
176
+ @property
177
+ def device(self):
178
+ return self._device
179
+
180
+
181
+ def _assemble_partition(modules: List[nn.Module]):
182
+ modules_list: List[nn.Module] = []
183
+ for module in modules:
184
+ if isinstance(module, nn.Sequential):
185
+ modules_list.extend(module.children())
186
+ else:
187
+ modules_list.append(module)
188
+ return PipeSequential(*modules_list)
189
+
190
+
191
+ def _split_module(modules: nn.Sequential) -> Tuple[List[nn.Sequential], List[torch.device]]:
192
+ partitions = []
193
+ devices = []
194
+
195
+ current_partition = []
196
+ current_device = None
197
+ for name, module in modules.named_children():
198
+ if isinstance(module, WithDevice):
199
+ # Process device override and move module to appropriate device.
200
+ device = module.device
201
+ module = module.module
202
+ module.to(device)
203
+ else:
204
+ device = _retrieve_device(module)
205
+ if current_device is not None and (current_device != device or device.type == 'cpu'):
206
+ partitions.append(_assemble_partition(current_partition))
207
+ devices.append(current_device)
208
+ current_partition = []
209
+ current_device = device
210
+ current_partition.append(module)
211
+
212
+ if current_device is not None:
213
+ partitions.append(_assemble_partition(current_partition))
214
+ devices.append(current_device)
215
+
216
+ partitions = cast(List[nn.Sequential], nn.ModuleList(partitions))
217
+
218
+ return partitions, devices
219
+
220
+
221
+ MOVING_DENIED = TypeError("denied to move parameters and buffers, because Pipe should manage device placement")
222
+
223
+
224
+ class Pipe(Module):
225
+ """Wraps an arbitrary :class:`nn.Sequential <torch.nn.Sequential>` module
226
+ to train on using synchronous pipeline parallelism. If the module requires
227
+ lots of memory and doesn't fit on a single GPU, pipeline parallelism is a
228
+ useful technique to employ for training.
229
+
230
+ The implementation is based on the torchgpipe_ paper.
231
+
232
+ .. _torchgpipe: https://arxiv.org/abs/2004.09910
233
+
234
+ Pipe combines pipeline parallelism with checkpointing to reduce peak
235
+ memory required to train while minimizing device under-utilization.
236
+
237
+ You should place all the modules on the appropriate devices and wrap them
238
+ into an :class:`nn.Sequential <torch.nn.Sequential>` module defining the
239
+ desired order of execution. If a module does not contain any
240
+ parameters/buffers, it is assumed this module should be executed on CPU
241
+ and appropriate input tensors to the module are moved to CPU before
242
+ execution. This behavior can be overridden by the :class:`WithDevice`
243
+ wrapper which can be used to explicitly specify which device a module
244
+ should run on.
245
+
246
+ Args:
247
+ module (:class:`nn.Sequential <torch.nn.Sequential>`):
248
+ sequential module to be parallelized using pipelining. Each module
249
+ in the sequence has to have all of its parameters on a single
250
+ device. Each module in the sequence has to either be an nn.Module
251
+ or :class:`nn.Sequential <torch.nn.Sequential>` (to combine multiple
252
+ sequential modules on a single device)
253
+ chunks (int):
254
+ number of micro-batches (default: ``1``)
255
+ checkpoint (str):
256
+ when to enable checkpointing, one of ``'always'``,
257
+ ``'except_last'``, or ``'never'`` (default: ``'except_last'``).
258
+ ``'never'`` disables checkpointing completely, ``'except_last'``
259
+ enables checkpointing for all micro-batches except the last one
260
+ and ``'always'`` enables checkpointing for all micro-batches.
261
+ deferred_batch_norm (bool):
262
+ whether to use deferred ``BatchNorm`` moving statistics (default:
263
+ :data:`False`). If set to :data:`True`, we track statistics across
264
+ multiple micro-batches to update the running statistics per
265
+ mini-batch.
266
+
267
+ Raises:
268
+ TypeError:
269
+ the module is not a :class:`nn.Sequential <torch.nn.Sequential>`.
270
+ ValueError:
271
+ invalid arguments
272
+
273
+ Example::
274
+ Pipeline of two FC layers across GPUs 0 and 1.
275
+
276
+ >>> # Need to initialize RPC framework first.
277
+ >>> # xdoctest: +SKIP
278
+ >>> os.environ['MASTER_ADDR'] = 'localhost'
279
+ >>> os.environ['MASTER_PORT'] = '29500'
280
+ >>> torch.distributed.rpc.init_rpc('worker', rank=0, world_size=1)
281
+ >>>
282
+ >>> # Build pipe.
283
+ >>> fc1 = nn.Linear(16, 8).cuda(0)
284
+ >>> fc2 = nn.Linear(8, 4).cuda(1)
285
+ >>> model = nn.Sequential(fc1, fc2)
286
+ >>> model = Pipe(model, chunks=8)
287
+ >>> input = torch.rand(16, 16).cuda(0)
288
+ >>> output_rref = model(input)
289
+
290
+ .. note::
291
+ You can wrap a :class:`Pipe` model with
292
+ :class:`torch.nn.parallel.DistributedDataParallel` only when the
293
+ checkpoint parameter of :class:`Pipe` is ``'never'``.
294
+
295
+ .. note::
296
+ :class:`Pipe` only supports intra-node pipelining currently, but
297
+ will be expanded to support inter-node pipelining in the future.
298
+ The forward function returns an :class:`~torch.distributed.rpc.RRef`
299
+ to allow for inter-node pipelining in the future, where the output
300
+ might be on a remote host. For intra-node pipelining you can use
301
+ :meth:`~torch.distributed.rpc.RRef.local_value` to retrieve the
302
+ output locally.
303
+
304
+ .. warning::
305
+ :class:`Pipe` is experimental and subject to change.
306
+ """
307
+
308
+ def __init__(
309
+ self,
310
+ module: nn.Sequential,
311
+ chunks: int = 1,
312
+ checkpoint: str = "except_last",
313
+ deferred_batch_norm: bool = False,
314
+ ) -> None:
315
+ super().__init__()
316
+
317
+ # Check if RPC framework is initialized.
318
+ if not torch.distributed.rpc._is_current_rpc_agent_set():
319
+ raise RuntimeError(
320
+ 'Please initialize RPC framework for Pipe using '
321
+ 'torch.distributed.rpc.init_rpc')
322
+
323
+ chunks = int(chunks)
324
+ checkpoint = str(checkpoint)
325
+
326
+ if chunks <= 0:
327
+ raise ValueError("number of chunks must be positive integer")
328
+ if checkpoint not in ["always", "except_last", "never"]:
329
+ raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'")
330
+
331
+ _verify_module(module)
332
+
333
+ # Verify if the underlying skippable modules satisfy integrity. The
334
+ # integrity can be verified before forward() because it is static.
335
+ verify_skippables(module)
336
+
337
+ self.chunks = chunks
338
+ self.checkpoint = checkpoint
339
+
340
+ if deferred_batch_norm:
341
+ module = DeferredBatchNorm.convert_deferred_batch_norm(module, chunks)
342
+
343
+ self.partitions, self.devices = _split_module(module)
344
+ _verify_splitting(module, self.partitions, self.devices)
345
+
346
+ self._copy_streams: List[List[AbstractStream]] = []
347
+ self._skip_layout = inspect_skip_layout(self.partitions)
348
+
349
+ # Separate CUDA streams for copy.
350
+ copy_streams = self._ensure_copy_streams()
351
+
352
+ # The micro-batch index where the checkpointing stops.
353
+ checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint]
354
+
355
+ self.pipeline = Pipeline(self.partitions, self.devices, copy_streams, self._skip_layout, checkpoint_stop)
356
+
357
+ def __len__(self) -> int:
358
+ """Counts the length of the underlying sequential module."""
359
+ return sum(len(p) for p in self.partitions)
360
+
361
+ def __getitem__(self, index: int) -> nn.Module:
362
+ """Gets a layer in the underlying sequential module."""
363
+ partitions = self.partitions
364
+ if index < 0:
365
+ partitions = partitions[::-1]
366
+
367
+ for partition in partitions:
368
+ try:
369
+ return partition[index]
370
+ except IndexError:
371
+ pass
372
+
373
+ shift = len(partition)
374
+
375
+ if index < 0:
376
+ index += shift
377
+ else:
378
+ index -= shift
379
+
380
+ raise IndexError
381
+
382
+ def __iter__(self) -> Iterator[nn.Module]:
383
+ """Iterates over children of the underlying sequential module."""
384
+ for partition in self.partitions:
385
+ yield from partition
386
+
387
+ # Pipe should manage the device of each partition.
388
+ # Deny cuda(), cpu(), and to() with device, by TypeError.
389
+ def cuda(self, device: Optional[Device] = None) -> "Pipe":
390
+ raise MOVING_DENIED
391
+
392
+ def cpu(self) -> "Pipe":
393
+ raise MOVING_DENIED
394
+
395
+ def to(self, *args: Any, **kwargs: Any) -> "Pipe":
396
+ # Deny these usages:
397
+ #
398
+ # - to(device[, dtype, non_blocking])
399
+ # - to(tensor[, non_blocking])
400
+ #
401
+ # But allow this:
402
+ #
403
+ # - to(dtype[, non_blocking])
404
+ #
405
+ if "device" in kwargs or "tensor" in kwargs:
406
+ raise MOVING_DENIED
407
+
408
+ if args:
409
+ if isinstance(args[0], (torch.device, int, str)):
410
+ raise MOVING_DENIED
411
+ if torch.is_tensor(args[0]):
412
+ raise MOVING_DENIED
413
+
414
+ return super().to(*args, **kwargs)
415
+
416
+ def _ensure_copy_streams(self) -> List[List[AbstractStream]]:
417
+ """Ensures that :class:`Pipe` caches CUDA streams for copy.
418
+
419
+ It's worth to cache CUDA streams although PyTorch already manages a
420
+ pool of pre-allocated CUDA streams, because it may reduce GPU memory
421
+ fragmentation when the number of micro-batches is small.
422
+
423
+ """
424
+ if not self._copy_streams:
425
+ for device in self.devices:
426
+ self._copy_streams.append([new_stream(device) for _ in range(self.chunks)])
427
+
428
+ return self._copy_streams
429
+
430
+ def forward(self, *inputs) -> RRef:
431
+ """
432
+ Processes a single input mini-batch through the pipe and returns an
433
+ :class:`~torch.distributed.rpc.RRef` pointing to the output.
434
+ :class:`Pipe` is a fairly transparent module wrapper. It doesn't
435
+ modify the input and output signature of the underlying module. But
436
+ there's type restriction. Input and output have to contain at least one
437
+ tensor. This restriction is applied at partition boundaries too.
438
+
439
+ The sequence of inputs are fed into the first stage of the pipeline as
440
+ ``*inputs``. As a result the positional args for this function should
441
+ match the positional args for the first stage of the pipeline. The same
442
+ condition applies for output of one stage of the pipeline which is the
443
+ input for the next stage.
444
+
445
+ The input tensor is split into multiple micro-batches based on the
446
+ ``chunks`` parameter used to initialize :class:`Pipe`. The batch size
447
+ is assumed to be the first dimension of the tensor and if the batch
448
+ size is less than ``chunks``, the number of micro-batches is equal to
449
+ the batch size.
450
+
451
+ Only tensors are split into multiple micro-batches, non-Tensor inputs
452
+ are just replicated as-is in each micro-batch. For non-Tensor outputs
453
+ in the last stage of the pipeline, they are aggregated as a ``List``
454
+ and returned the user. For example, if you have 2 micro-batches
455
+ returning the integer 5, the user would receive the consolidated
456
+ output of `[5, 5]`
457
+
458
+ All the input tensors need to be on the same device as the first
459
+ partition of the pipeline.
460
+
461
+ If a tensor is wrapped with the :class:`NoChunk` wrapper, the tensor
462
+ is not split across micro-batches and is replicated as-is similar to
463
+ non-tensors.
464
+
465
+ Args:
466
+ inputs: input mini-batch
467
+
468
+ Returns:
469
+ :class:`~torch.distributed.rpc.RRef` to the output of the mini-batch
470
+
471
+ Raises:
472
+ TypeError: input doesn't contain at least one tensor
473
+
474
+ """
475
+ first_partition_device = self.devices[0] if len(self.devices) != 0 else torch.device("cpu")
476
+ microbatch.check(first_partition_device, *inputs)
477
+
478
+ if not self.devices:
479
+ # Empty sequential module is not illegal.
480
+ return RRef(*inputs)
481
+
482
+ # Divide a mini-batch into micro-batches.
483
+ batches = microbatch.scatter(*inputs, chunks=self.chunks)
484
+
485
+ # Run pipeline parallelism.
486
+ self.pipeline.run(batches)
487
+
488
+ # Merge the micro-batches into one mini-batch.
489
+ output = microbatch.gather(batches)
490
+ return RRef(output)
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """Supports efficiency with skip connections."""
8
+ from .namespace import Namespace
9
+ from .skippable import pop, skippable, stash, verify_skippables
10
+
11
+ __all__ = ["skippable", "stash", "pop", "verify_skippables", "Namespace"]
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (447 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc ADDED
Binary file (1.59 kB). View file