Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/19.attention.dense.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_composable_state.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_state_dict_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/argparse_util.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/collective_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/constants.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/logging_handlers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/remote_device.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/rendezvous.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/run.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py +4 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py +94 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/contract.py +194 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py +52 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py +151 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py +133 -0
- venv/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py +154 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/comm_mode.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/op_coverage.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/visualize_sharding.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py +10 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23df29fc283a4778362ca11878b726b106636e57f13cafdd3b7e8d152f7cd803
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3536c32890a60ae164b6183fe9a28b27f077c525d28082130c1170ce3a102b66
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85850ddefde2ae71ef563fbe93a20541501ab97aa8bddcba877f75f88b477b49
|
3 |
+
size 33555627
|
ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9481d4fb12dce0ead4f14f9da839d0eee10d5a75575757b7d83941aff4daa47
|
3 |
+
size 33555627
|
ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68c64b8c5e27d75610444fb8d6c0c947755967dc23edb76508400582707ccc28
|
3 |
+
size 33555533
|
ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:98dd5fa13a50c5a826026c48b1dc8b5d5bcf612a6e3c73820746a66b9c37e117
|
3 |
+
size 16778396
|
ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:925d12e782d3b4c723c33574c3e050f75f71219966d298f9e666fc6ff9a74092
|
3 |
+
size 16778411
|
ckpts/universal/global_step120/zero/19.attention.dense.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b1ad2fd75bd755ea7368b4b3c7ffd4939641bf426bd66fee5f584f0c1bc40969
|
3 |
+
size 16778317
|
ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f6deb500a5774778a3f48fb763ff31bdabad47a2645e52c8ee5fd9b8f8393f8
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3f70fb651b3ec524d6c5654873f3b4ec8b5db182812412661ab7f6449776850
|
3 |
+
size 9387
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_composable_state.cpython-310.pyc
ADDED
Binary file (1.33 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives.cpython-310.pyc
ADDED
Binary file (28.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc
ADDED
Binary file (10.4 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_state_dict_utils.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/argparse_util.cpython-310.pyc
ADDED
Binary file (3.94 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc
ADDED
Binary file (3.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/collective_utils.cpython-310.pyc
ADDED
Binary file (5.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/constants.cpython-310.pyc
ADDED
Binary file (558 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc
ADDED
Binary file (18.4 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-310.pyc
ADDED
Binary file (133 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc
ADDED
Binary file (7.12 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/logging_handlers.cpython-310.pyc
ADDED
Binary file (403 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/remote_device.cpython-310.pyc
ADDED
Binary file (3.82 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/rendezvous.cpython-310.pyc
ADDED
Binary file (8.48 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/run.cpython-310.pyc
ADDED
Binary file (28.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .checkpoint_activation import checkpoint
|
2 |
+
from .contract import _get_registry, contract
|
3 |
+
from .fully_shard import fully_shard
|
4 |
+
from .replicate import replicate
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (374 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc
ADDED
Binary file (3.37 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc
ADDED
Binary file (5.22 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc
ADDED
Binary file (3.84 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc
ADDED
Binary file (4.14 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from contextlib import contextmanager, nullcontext
|
2 |
+
from typing import Any, Tuple
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
from torch.utils.checkpoint import (
|
7 |
+
_checkpoint_without_reentrant_generator,
|
8 |
+
_DEFAULT_DETERMINISM_MODE,
|
9 |
+
)
|
10 |
+
|
11 |
+
from .contract import contract
|
12 |
+
|
13 |
+
|
14 |
+
@contextmanager
|
15 |
+
def _no_hook(module: nn.Module):
|
16 |
+
r"""
|
17 |
+
Disable hooks installed by checkpoint to avoid unintentional recursion
|
18 |
+
during backward recomputation.
|
19 |
+
"""
|
20 |
+
orig_enable_hook = checkpoint.state(module).enable_hook
|
21 |
+
checkpoint.state(module).enable_hook = False
|
22 |
+
try:
|
23 |
+
yield
|
24 |
+
finally:
|
25 |
+
checkpoint.state(module).enable_hook = orig_enable_hook
|
26 |
+
|
27 |
+
|
28 |
+
@contract()
|
29 |
+
def checkpoint(module: nn.Module) -> nn.Module:
|
30 |
+
r"""
|
31 |
+
This is a composable activation checkpointing API. Unlike functional
|
32 |
+
activation checkpointing APIs, this one does not require changing model
|
33 |
+
source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs,
|
34 |
+
this one does not modify model structure or fully-qualified names either.
|
35 |
+
Under the hood, it registers activation checkpointing logic as pre- and
|
36 |
+
post-forward hooks. Hence, this API can be easily applied to any model or
|
37 |
+
sub-modules in the model.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
module (nn.Module): the target model or sub-module to apply activation
|
41 |
+
checkpointing.
|
42 |
+
|
43 |
+
Example::
|
44 |
+
>>> # xdoctest: +SKIP
|
45 |
+
>>> import torch.nn as nn
|
46 |
+
>>>
|
47 |
+
>>> class MyModel(nn.Module):
|
48 |
+
>>> def __init__(self):
|
49 |
+
>>> super().__init__()
|
50 |
+
>>> self.l1 = nn.Linear(10, 10)
|
51 |
+
>>> self.l2 = nn.Linear(10, 10)
|
52 |
+
>>>
|
53 |
+
>>> def forward(self, x):
|
54 |
+
>>> return self.l2(self.l1(x))
|
55 |
+
>>>
|
56 |
+
>>> model = MyModel()
|
57 |
+
>>> checkpoint(model.l1) # apply activation checkpointing only to l1
|
58 |
+
>>> model(torch.zeros(2, 10)).sum().backward()
|
59 |
+
|
60 |
+
"""
|
61 |
+
torch._C._log_api_usage_once("torch.distributed.checkpoint")
|
62 |
+
|
63 |
+
def forward_pre_hook(module: nn.Module, inputs: Tuple[Any, ...]) -> None:
|
64 |
+
if checkpoint.state(module).enable_hook:
|
65 |
+
|
66 |
+
def context_fns():
|
67 |
+
return nullcontext(), _no_hook(module)
|
68 |
+
|
69 |
+
checkpoint.state(
|
70 |
+
module
|
71 |
+
)._ac_generator = _checkpoint_without_reentrant_generator(
|
72 |
+
module, True, context_fns, _DEFAULT_DETERMINISM_MODE, False, *inputs
|
73 |
+
)
|
74 |
+
next(checkpoint.state(module)._ac_generator)
|
75 |
+
|
76 |
+
def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any:
|
77 |
+
if checkpoint.state(module).enable_hook:
|
78 |
+
try:
|
79 |
+
next(checkpoint.state(module)._ac_generator)
|
80 |
+
except StopIteration:
|
81 |
+
pass
|
82 |
+
else:
|
83 |
+
raise RuntimeError(
|
84 |
+
"Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!"
|
85 |
+
)
|
86 |
+
|
87 |
+
# Ensure that we no longer hold on to the generator. always_call=True helps ensure we
|
88 |
+
# clear this even in the case of exception in fwd pass.
|
89 |
+
checkpoint.state(module)._ac_generator = None
|
90 |
+
|
91 |
+
checkpoint.state(module).enable_hook = True
|
92 |
+
module.register_forward_pre_hook(forward_pre_hook)
|
93 |
+
module.register_forward_hook(forward_hook, prepend=True, always_call=True)
|
94 |
+
return module
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/contract.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
from collections import OrderedDict
|
3 |
+
from functools import wraps
|
4 |
+
from typing import Callable, Dict, List, Optional, Type
|
5 |
+
|
6 |
+
import torch.nn as nn
|
7 |
+
from torch.distributed._composable_state import _State
|
8 |
+
|
9 |
+
|
10 |
+
def generate_state_key(string="__composable_api_state_key"):
|
11 |
+
return f"{string}_{str(uuid.uuid4())}"
|
12 |
+
|
13 |
+
|
14 |
+
STATE_KEY = generate_state_key()
|
15 |
+
REGISTRY_KEY = generate_state_key()
|
16 |
+
|
17 |
+
|
18 |
+
# TODO: we can add additional info to RegistryItem to share across APIs. E.g.,
|
19 |
+
# we can add args and kwargs here, and then we can detect whether fully_shard
|
20 |
+
# is combined with reentrant activation checkpointing and error out with a clear
|
21 |
+
# message.
|
22 |
+
class RegistryItem:
|
23 |
+
pass
|
24 |
+
|
25 |
+
|
26 |
+
def contract(state_cls: Type[_State] = _State):
|
27 |
+
r"""
|
28 |
+
Decorate a function as a composable distributed API, where the first
|
29 |
+
argument of the function must be an :class:`nn.Module` instance. The
|
30 |
+
decorator verifies that the wrapped function does not modify parameter,
|
31 |
+
buffer or sub-module fully-qualified names (FQN).
|
32 |
+
|
33 |
+
When a function ``func`` is decorated by ``@contract()``, a
|
34 |
+
``.state(module: nn.Module)`` method will be installed to the decorated
|
35 |
+
function. Then you can retrieve and modify the state on a module by calling
|
36 |
+
``func.state(module)``.
|
37 |
+
|
38 |
+
Example::
|
39 |
+
>>> # xdoctest: +SKIP
|
40 |
+
>>> import torch.nn as nn
|
41 |
+
>>>
|
42 |
+
>>> class MyModel(nn.Module):
|
43 |
+
>>> def __init__(self):
|
44 |
+
>>> super().__init__()
|
45 |
+
>>> self.l1 = nn.Linear(10, 10)
|
46 |
+
>>> self.l2 = nn.Linear(10, 10)
|
47 |
+
>>>
|
48 |
+
>>> def forward(self, x):
|
49 |
+
>>> return self.l2(self.l1(x))
|
50 |
+
>>>
|
51 |
+
>>> @contract()
|
52 |
+
>>> def my_feature(module: nn.Module) -> nn.Module:
|
53 |
+
>>> my_feature.state(module).some_state = "any value"
|
54 |
+
>>> return module
|
55 |
+
>>>
|
56 |
+
>>> model = MyModel()
|
57 |
+
>>> my_feature(model.l1)
|
58 |
+
>>> assert my_feature.state(model.l1).some_state == "any value"
|
59 |
+
>>> my_feature(model.l2)
|
60 |
+
>>> model(torch.randn(2, 10)).sum().backward()
|
61 |
+
"""
|
62 |
+
|
63 |
+
# wraps will make functions decorated with contract() pickleable - needed for integration with torch.package
|
64 |
+
@wraps(state_cls)
|
65 |
+
def inner(func):
|
66 |
+
@wraps(func)
|
67 |
+
def wrapper(module: nn.Module, *args, **kwargs) -> Optional[nn.Module]:
|
68 |
+
# get existing global states
|
69 |
+
default_all_state: Dict[Callable, _State] = OrderedDict()
|
70 |
+
all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload]
|
71 |
+
STATE_KEY, default_all_state
|
72 |
+
)
|
73 |
+
assert isinstance(
|
74 |
+
all_state, dict
|
75 |
+
), "Distributed composable API states corrupted"
|
76 |
+
|
77 |
+
# get global registry
|
78 |
+
default_registry: Dict[str, RegistryItem] = OrderedDict()
|
79 |
+
registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload]
|
80 |
+
REGISTRY_KEY, default_registry
|
81 |
+
)
|
82 |
+
|
83 |
+
assert isinstance(
|
84 |
+
registry, dict
|
85 |
+
), "Distributed composable API registry corrupted"
|
86 |
+
|
87 |
+
# make sure the API func has not been applied to the input module yet.
|
88 |
+
assert func not in all_state and func.__name__ not in registry, (
|
89 |
+
"Each distinct composable distributed API can only be applied to a "
|
90 |
+
f"module once. {func.__name__} has already been applied to the "
|
91 |
+
f"following module.\n{module}"
|
92 |
+
)
|
93 |
+
|
94 |
+
# install states specific to the wrapped ``func``
|
95 |
+
all_state.setdefault(func, state_cls())
|
96 |
+
# register ``func`` in the global registry by name
|
97 |
+
registry.setdefault(func.__name__, RegistryItem())
|
98 |
+
|
99 |
+
orig_named_params = OrderedDict(module.named_parameters())
|
100 |
+
orig_named_buffers = OrderedDict(
|
101 |
+
module.named_buffers(remove_duplicate=False)
|
102 |
+
)
|
103 |
+
orig_named_modules = OrderedDict(
|
104 |
+
module.named_modules(remove_duplicate=False)
|
105 |
+
)
|
106 |
+
|
107 |
+
updated = func(module, *args, **kwargs)
|
108 |
+
|
109 |
+
if updated is None:
|
110 |
+
updated = module
|
111 |
+
|
112 |
+
new_named_params = OrderedDict(updated.named_parameters())
|
113 |
+
new_named_buffers = OrderedDict(
|
114 |
+
updated.named_buffers(remove_duplicate=False)
|
115 |
+
)
|
116 |
+
new_named_modules = OrderedDict(
|
117 |
+
updated.named_modules(remove_duplicate=False)
|
118 |
+
)
|
119 |
+
|
120 |
+
assert isinstance(updated, nn.Module), (
|
121 |
+
"Output of composable distributed APIs must be either None or "
|
122 |
+
f"nn.Module, but got {type(updated)}"
|
123 |
+
)
|
124 |
+
|
125 |
+
def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str):
|
126 |
+
if orig_fqns == new_fqns:
|
127 |
+
return
|
128 |
+
|
129 |
+
orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns)
|
130 |
+
orig_only = orig_fqn_set - new_fqn_set
|
131 |
+
new_only = new_fqn_set - orig_fqn_set
|
132 |
+
if len(orig_only) or len(new_only):
|
133 |
+
raise RuntimeError(
|
134 |
+
f"{check_key}"
|
135 |
+
"Composable distributed API implementations cannot modify "
|
136 |
+
"FQNs.\n"
|
137 |
+
f"Only in original FQNs: {orig_only},\n"
|
138 |
+
f"Only in new FQNs: {new_only}"
|
139 |
+
)
|
140 |
+
else:
|
141 |
+
raise RuntimeError(
|
142 |
+
f"{check_key}"
|
143 |
+
"Composable distributed API implementations cannot modify "
|
144 |
+
"the order of FQNs.\n"
|
145 |
+
f"Original FQNs: {orig_only}\n"
|
146 |
+
f"New FQNs: {new_only}"
|
147 |
+
)
|
148 |
+
|
149 |
+
check_fqn(
|
150 |
+
list(orig_named_params.keys()),
|
151 |
+
list(new_named_params.keys()),
|
152 |
+
"Check parameters, ",
|
153 |
+
)
|
154 |
+
check_fqn(
|
155 |
+
list(orig_named_buffers.keys()),
|
156 |
+
list(new_named_buffers.keys()),
|
157 |
+
"Check buffer, ",
|
158 |
+
)
|
159 |
+
check_fqn(
|
160 |
+
list(orig_named_modules.keys()),
|
161 |
+
list(new_named_modules.keys()),
|
162 |
+
"Check modules, ",
|
163 |
+
)
|
164 |
+
|
165 |
+
# TODO: a stricter verification should also reject changing module
|
166 |
+
# types and monkey-patching forward() method implementations.
|
167 |
+
|
168 |
+
# TODO: verify that installed distributed paradigms are compatible with
|
169 |
+
# each other.
|
170 |
+
|
171 |
+
return updated
|
172 |
+
|
173 |
+
def get_state(module: nn.Module) -> Optional[_State]:
|
174 |
+
return module.__dict__.setdefault( # type: ignore[call-overload]
|
175 |
+
STATE_KEY,
|
176 |
+
{}, # TODO(@yhcharles): this is a temporary fix, need a better way
|
177 |
+
).get(
|
178 |
+
func
|
179 |
+
) # type: ignore[call-overload]
|
180 |
+
|
181 |
+
wrapper.state = get_state # type: ignore[attr-defined]
|
182 |
+
|
183 |
+
return wrapper
|
184 |
+
|
185 |
+
return inner
|
186 |
+
|
187 |
+
|
188 |
+
def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]:
|
189 |
+
r"""
|
190 |
+
Get an ``OrderedDict`` of composable APIs that have been applied to the
|
191 |
+
``module``, indexed by the API name. If no API has been applied, then this
|
192 |
+
returns ``None``.
|
193 |
+
"""
|
194 |
+
return getattr(module, REGISTRY_KEY, None)
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
@dataclass(frozen=True)
|
8 |
+
class MixedPrecisionPolicy:
|
9 |
+
"""
|
10 |
+
This configures FSDP's mixed precision. Unlike autocast, this applies mixed
|
11 |
+
precision at the module level, not op level, which means low-precision
|
12 |
+
activations are saved for backward and high-to-low-precision casts are
|
13 |
+
incurred only at module boundaries.
|
14 |
+
|
15 |
+
FSDP works well with module-level mixed precision since it keeps the
|
16 |
+
high-precision sharded parameters in memory anyway. In other words, FSDP
|
17 |
+
does not require any extra memory to keep a high-precision copy of the
|
18 |
+
parameters for the optimizer step.
|
19 |
+
|
20 |
+
Attributes:
|
21 |
+
param_dtype (Optional[torch.dtype]): This specifies the dtype for
|
22 |
+
the unsharded parameter and hence the dtype for forward/backward
|
23 |
+
computation and the parameter all-gather. If this is ``None``, then
|
24 |
+
the unsharded parameter uses the original dtype. The optimizer step
|
25 |
+
uses the sharded parameter in the original dtype. (Default:
|
26 |
+
``None``)
|
27 |
+
reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
|
28 |
+
gradient reduction (i.e. reduce-scatter or all-reduce). If this is
|
29 |
+
``None`` but ``param_dtype`` is not ``None``, then the reduction
|
30 |
+
uses the compute dtype. This can be used to run gradient reduction
|
31 |
+
in full precision while using low precision for compute. (Default:
|
32 |
+
``None``)
|
33 |
+
output_dtype (Optional[torch.dtype]): This specifies the dtype for
|
34 |
+
casting floating-point forward outputs. This can be used to
|
35 |
+
help implement cases where different modules have different mixed
|
36 |
+
precision policies. (Default: ``None``)
|
37 |
+
cast_forward_inputs (bool): This specifies whether FSDP should cast the
|
38 |
+
forward's floating-point input tensors to ``param_dtype`` or not.
|
39 |
+
"""
|
40 |
+
|
41 |
+
param_dtype: Optional[torch.dtype] = None
|
42 |
+
reduce_dtype: Optional[torch.dtype] = None
|
43 |
+
output_dtype: Optional[torch.dtype] = None
|
44 |
+
cast_forward_inputs: bool = True
|
45 |
+
|
46 |
+
def __post_init__(self):
|
47 |
+
# Clamp `reduce_dtype` to `None` if no casting is required: since
|
48 |
+
# gradients are computed in `param_dtype`, if `reduce_dtype` matches,
|
49 |
+
# then we do not need extra casting
|
50 |
+
if self.param_dtype == self.reduce_dtype:
|
51 |
+
# Bypass the frozen dataclass checks
|
52 |
+
object.__setattr__(self, "reduce_dtype", None)
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import traceback
|
3 |
+
|
4 |
+
from dataclasses import dataclass
|
5 |
+
from enum import auto, Enum
|
6 |
+
from typing import Any, cast, List, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.distributed as dist
|
10 |
+
import torch.nn as nn
|
11 |
+
from torch.distributed._composable.contract import _get_registry
|
12 |
+
from torch.distributed._tensor import DeviceMesh, DTensor, Placement
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class DataParallelMeshInfo:
|
17 |
+
mesh: DeviceMesh
|
18 |
+
shard_mesh_dim: Optional[int] = None
|
19 |
+
replicate_mesh_dim: Optional[int] = None
|
20 |
+
|
21 |
+
def __post_init__(self):
|
22 |
+
if self.shard_mesh_dim is None and self.replicate_mesh_dim is None:
|
23 |
+
raise AssertionError(
|
24 |
+
"At least one of shard_mesh_dim and replicate_mesh_dim must not be None"
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
@dataclass
|
29 |
+
class FSDPMeshInfo(DataParallelMeshInfo):
|
30 |
+
def __post_init__(self):
|
31 |
+
super().__post_init__()
|
32 |
+
if self.shard_mesh_dim is None:
|
33 |
+
raise AssertionError("Expects non-None shard_mesh_dim")
|
34 |
+
self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim)
|
35 |
+
self.shard_process_group = cast(
|
36 |
+
dist.ProcessGroup, self.mesh.get_group(self.shard_mesh_dim)
|
37 |
+
)
|
38 |
+
self.shard_mesh_rank: int = self.shard_process_group.rank()
|
39 |
+
|
40 |
+
|
41 |
+
@dataclass
|
42 |
+
class DDPMeshInfo(DataParallelMeshInfo):
|
43 |
+
def __post_init__(self):
|
44 |
+
super().__post_init__()
|
45 |
+
if self.replicate_mesh_dim is None:
|
46 |
+
raise AssertionError("Expects non-None replicate_mesh_dim")
|
47 |
+
self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim)
|
48 |
+
self.replicate_process_group = cast(
|
49 |
+
dist.ProcessGroup, self.mesh.get_group(self.replicate_mesh_dim)
|
50 |
+
)
|
51 |
+
self.replicate_mesh_rank: int = self.replicate_process_group.rank()
|
52 |
+
|
53 |
+
|
54 |
+
@dataclass
|
55 |
+
class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo):
|
56 |
+
def __post_init__(self):
|
57 |
+
# Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo`
|
58 |
+
super().__post_init__()
|
59 |
+
|
60 |
+
|
61 |
+
class TrainingState(Enum):
|
62 |
+
"""Describes the training state of one FSDP state / parameter group."""
|
63 |
+
|
64 |
+
# Transition to forward starting pre-forward until post-forward
|
65 |
+
FORWARD = auto()
|
66 |
+
# Transition to pre-backward when unsharding in backward
|
67 |
+
PRE_BACKWARD = auto()
|
68 |
+
# Transition to post-backward when resharding and reducing gradients
|
69 |
+
POST_BACKWARD = auto()
|
70 |
+
# Idle before/after forward or before pre-backward/after post-backward
|
71 |
+
IDLE = auto()
|
72 |
+
|
73 |
+
|
74 |
+
def _raise_assert_with_print(*args: Any, **kwargs: Any):
|
75 |
+
print(f"[Rank {dist.get_rank()}] ", end="")
|
76 |
+
print(*args, **kwargs)
|
77 |
+
traceback.print_stack()
|
78 |
+
raise AssertionError(*args, **kwargs)
|
79 |
+
|
80 |
+
|
81 |
+
def _is_composable_with_fsdp(module: nn.Module) -> bool:
|
82 |
+
registry = _get_registry(module)
|
83 |
+
if registry is None:
|
84 |
+
return True
|
85 |
+
# Registry keys by function name
|
86 |
+
return "replicate" not in registry
|
87 |
+
|
88 |
+
|
89 |
+
def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size:
|
90 |
+
padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor
|
91 |
+
return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:])
|
92 |
+
|
93 |
+
|
94 |
+
def _chunk_with_empty(
|
95 |
+
tensor: torch.Tensor, num_chunks: int, dim: int
|
96 |
+
) -> List[torch.Tensor]:
|
97 |
+
chunks = list(torch.chunk(tensor, num_chunks, dim=dim))
|
98 |
+
while len(chunks) < num_chunks:
|
99 |
+
chunks.append(chunks[0].new_empty(0))
|
100 |
+
return chunks
|
101 |
+
|
102 |
+
|
103 |
+
def _get_dim0_chunked_size(
|
104 |
+
chunk: torch.Tensor, unchunked_size: torch.Size
|
105 |
+
) -> torch.Size:
|
106 |
+
if chunk.numel() > 0:
|
107 |
+
return chunk.size()
|
108 |
+
# For 0 numel, we need to preserve trailing dims for DTensor APIs
|
109 |
+
return cast(torch.Size, torch.Size([0]) + unchunked_size[1:])
|
110 |
+
|
111 |
+
|
112 |
+
def _from_local_no_grad(
|
113 |
+
local_tensor: torch.Tensor,
|
114 |
+
device_mesh: DeviceMesh,
|
115 |
+
placements: Tuple[Placement, ...],
|
116 |
+
global_size: torch.Size,
|
117 |
+
global_stride: Tuple[int, ...],
|
118 |
+
) -> DTensor:
|
119 |
+
"""
|
120 |
+
This method is similar to ``DTensor.from_local()`` except it avoids some
|
121 |
+
CPU overhead by avoiding default args and not being differentiable.
|
122 |
+
"""
|
123 |
+
return DTensor(
|
124 |
+
# Use the local tensor directly instead of constructing a new tensor
|
125 |
+
# variable, e.g. with `view_as()`, since this is not differentiable
|
126 |
+
local_tensor,
|
127 |
+
device_mesh,
|
128 |
+
placements,
|
129 |
+
shape=global_size,
|
130 |
+
dtype=local_tensor.dtype,
|
131 |
+
requires_grad=local_tensor.requires_grad,
|
132 |
+
stride=global_stride,
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
def _to_dtype_if_needed(
|
137 |
+
tensor: torch.Tensor, dtype: Optional[torch.dtype]
|
138 |
+
) -> torch.Tensor:
|
139 |
+
if dtype is not None and tensor.dtype != dtype:
|
140 |
+
return tensor.to(dtype)
|
141 |
+
return tensor
|
142 |
+
|
143 |
+
|
144 |
+
def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor:
|
145 |
+
if (
|
146 |
+
not isinstance(x, torch.Tensor)
|
147 |
+
or not torch.is_floating_point(x)
|
148 |
+
or x.dtype == dtype
|
149 |
+
):
|
150 |
+
return x
|
151 |
+
return x.to(dtype)
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
from typing import Callable, Iterable, Optional, Union
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.distributed as dist
|
6 |
+
import torch.nn as nn
|
7 |
+
from torch.distributed._composable.contract import contract
|
8 |
+
from torch.distributed._composable_state import _get_module_state, _insert_module_state
|
9 |
+
from torch.distributed.fsdp._common_utils import _FSDPState
|
10 |
+
from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo
|
11 |
+
|
12 |
+
from torch.distributed.fsdp._init_utils import (
|
13 |
+
_init_buffer_state,
|
14 |
+
_init_core_state,
|
15 |
+
_init_device_handle,
|
16 |
+
_init_ignored_module_states,
|
17 |
+
_init_param_handle_from_module,
|
18 |
+
_init_prefetching_state,
|
19 |
+
_init_process_group_state,
|
20 |
+
_init_runtime_state,
|
21 |
+
_init_state_dict_state,
|
22 |
+
HYBRID_SHARDING_STRATEGIES,
|
23 |
+
)
|
24 |
+
from torch.distributed.fsdp._runtime_utils import (
|
25 |
+
_register_post_forward_hook,
|
26 |
+
_register_pre_forward_hook,
|
27 |
+
_register_root_pre_forward_hook,
|
28 |
+
)
|
29 |
+
from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks
|
30 |
+
from torch.distributed.fsdp._wrap_utils import _auto_wrap
|
31 |
+
from torch.distributed.fsdp.api import (
|
32 |
+
BackwardPrefetch,
|
33 |
+
CPUOffload,
|
34 |
+
MixedPrecision,
|
35 |
+
ShardingStrategy,
|
36 |
+
)
|
37 |
+
from torch.distributed.fsdp.wrap import _Policy
|
38 |
+
|
39 |
+
|
40 |
+
@contract(state_cls=_FSDPState)
|
41 |
+
def fully_shard(
|
42 |
+
module: nn.Module,
|
43 |
+
*,
|
44 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
45 |
+
policy: Optional[_Policy] = None,
|
46 |
+
strategy: Optional[ShardingStrategy] = None,
|
47 |
+
mixed_precision: Optional[MixedPrecision] = None,
|
48 |
+
cpu_offload: Optional[CPUOffload] = None,
|
49 |
+
ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
|
50 |
+
device_id: Optional[Union[int, torch.device]] = None,
|
51 |
+
param_init_fn: Optional[Callable[[nn.Module], None]] = None,
|
52 |
+
sync_module_states: bool = False,
|
53 |
+
forward_prefetch: bool = False,
|
54 |
+
ignored_states: Union[
|
55 |
+
Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
|
56 |
+
] = None,
|
57 |
+
) -> nn.Module:
|
58 |
+
"""
|
59 |
+
Applies ``FullyShardedDataParallel` (FSDP) semantics to ``module``.
|
60 |
+
"""
|
61 |
+
warnings.warn(
|
62 |
+
"``torch.distributed._composable.fully_shard`` is being deprecated."
|
63 |
+
"You can contintue to use the wrapper based FSDP."
|
64 |
+
"See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py."
|
65 |
+
"``torch.distributed._composable.fully_shard`` will be removed after PyTorch 2.5."
|
66 |
+
)
|
67 |
+
|
68 |
+
torch._C._log_api_usage_once("torch.distributed.fully_shard")
|
69 |
+
# Enforce the new auto wrap policy
|
70 |
+
if policy is not None and not isinstance(policy, _Policy):
|
71 |
+
raise ValueError(f"Expects a `_Policy` but got {policy}")
|
72 |
+
state = fully_shard.state(module)
|
73 |
+
state = _init_ignored_module_states(state, module, ignored_modules, ignored_states)
|
74 |
+
state = _init_device_handle(state, module, state._ignored_params, device_id)
|
75 |
+
_annotate_modules_for_dynamo(module, state._ignored_modules, True)
|
76 |
+
state = _init_process_group_state(state, process_group, strategy, policy)
|
77 |
+
if policy is not None:
|
78 |
+
root_kwargs = {
|
79 |
+
"process_group": process_group,
|
80 |
+
"strategy": strategy,
|
81 |
+
"mixed_precision": mixed_precision,
|
82 |
+
"cpu_offload": cpu_offload,
|
83 |
+
"ignored_modules": ignored_modules,
|
84 |
+
"device_id": device_id,
|
85 |
+
"param_init_fn": param_init_fn,
|
86 |
+
"sync_module_states": sync_module_states,
|
87 |
+
"forward_prefetch": forward_prefetch,
|
88 |
+
"ignored_states": ignored_states,
|
89 |
+
}
|
90 |
+
if strategy in HYBRID_SHARDING_STRATEGIES:
|
91 |
+
root_kwargs["process_group"] = (state.process_group, state._inter_node_pg)
|
92 |
+
_auto_wrap(
|
93 |
+
module,
|
94 |
+
policy,
|
95 |
+
state._ignored_modules,
|
96 |
+
state._ignored_params,
|
97 |
+
root_kwargs,
|
98 |
+
fully_shard,
|
99 |
+
)
|
100 |
+
state = _init_core_state(
|
101 |
+
state,
|
102 |
+
strategy or ShardingStrategy.FULL_SHARD,
|
103 |
+
mixed_precision,
|
104 |
+
cpu_offload,
|
105 |
+
limit_all_gathers=True,
|
106 |
+
use_orig_params=True,
|
107 |
+
backward_prefetch_limit=1,
|
108 |
+
forward_prefetch_limit=1,
|
109 |
+
)
|
110 |
+
state = _init_runtime_state(state)
|
111 |
+
state = _init_prefetching_state(
|
112 |
+
state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch
|
113 |
+
)
|
114 |
+
state = _init_buffer_state(state, module)
|
115 |
+
state = _init_param_handle_from_module(
|
116 |
+
state, module, device_id, param_init_fn, sync_module_states
|
117 |
+
)
|
118 |
+
state = _init_state_dict_state(state)
|
119 |
+
_register_all_state_dict_hooks(state)
|
120 |
+
_register_pre_forward_hook(state, module)
|
121 |
+
_register_post_forward_hook(state, module)
|
122 |
+
_register_root_pre_forward_hook(state, module) # prepend last
|
123 |
+
# Always insert the state for the passed-in module even if it has no
|
124 |
+
# managed parameters, in which case it has no handles and does not appear
|
125 |
+
# in `_fully_sharded_module_to_handles`
|
126 |
+
_insert_module_state(module, state)
|
127 |
+
for submodule in module.modules():
|
128 |
+
if (
|
129 |
+
submodule in state._fully_sharded_module_to_handle
|
130 |
+
and _get_module_state(submodule) is None
|
131 |
+
):
|
132 |
+
_insert_module_state(submodule, state)
|
133 |
+
return module
|
venv/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import weakref
|
2 |
+
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
from torch.distributed._composable_state import _State
|
7 |
+
from torch.nn.parallel import DistributedDataParallel
|
8 |
+
|
9 |
+
from .contract import _get_registry, contract
|
10 |
+
|
11 |
+
_ROOT_MODULE_PREFIX = ""
|
12 |
+
|
13 |
+
|
14 |
+
class _ReplicateState(_State):
|
15 |
+
def __init__(self) -> None:
|
16 |
+
super().__init__()
|
17 |
+
self.module: nn.Module = nn.ParameterList()
|
18 |
+
self.has_initialized: bool = False
|
19 |
+
self._param_list: nn.ParameterList = nn.ParameterList()
|
20 |
+
# TODO(@fegin): this variable is originally create for testing, we
|
21 |
+
# should remove this if possible.
|
22 |
+
self._param_names: List[str] = []
|
23 |
+
|
24 |
+
def _collect_params(
|
25 |
+
self,
|
26 |
+
module: nn.Module,
|
27 |
+
ignored_modules: Set[nn.Module],
|
28 |
+
ignored_params: Set[nn.Parameter],
|
29 |
+
prefix: str = _ROOT_MODULE_PREFIX,
|
30 |
+
) -> None:
|
31 |
+
# skip if managed by fully_sharded API
|
32 |
+
if _is_fully_sharded(module):
|
33 |
+
return
|
34 |
+
|
35 |
+
# if a module is ignored, all descendants of the module are ignored.
|
36 |
+
if module in ignored_modules:
|
37 |
+
return
|
38 |
+
|
39 |
+
recurse_prefix = (
|
40 |
+
f"{prefix}." if prefix != _ROOT_MODULE_PREFIX else _ROOT_MODULE_PREFIX
|
41 |
+
)
|
42 |
+
|
43 |
+
for n, p in module.named_parameters(recurse=False):
|
44 |
+
if p not in ignored_params:
|
45 |
+
self._param_list.append(p)
|
46 |
+
self._param_names.append(f"{recurse_prefix}{n}")
|
47 |
+
|
48 |
+
for name, child_module in module.named_children():
|
49 |
+
self._collect_params(
|
50 |
+
child_module,
|
51 |
+
ignored_modules,
|
52 |
+
ignored_params,
|
53 |
+
prefix=f"{recurse_prefix}{name}",
|
54 |
+
)
|
55 |
+
|
56 |
+
def init(
|
57 |
+
self,
|
58 |
+
module: nn.Module,
|
59 |
+
ignored_modules: Set[nn.Module],
|
60 |
+
**kwargs,
|
61 |
+
) -> None:
|
62 |
+
if _is_fully_sharded(module):
|
63 |
+
raise RuntimeError(
|
64 |
+
"Cannot apply `replicate()` on a Module already managed by `fully_shard`"
|
65 |
+
)
|
66 |
+
|
67 |
+
if self.has_initialized:
|
68 |
+
return
|
69 |
+
|
70 |
+
self.has_initialized = True
|
71 |
+
self.module = module
|
72 |
+
ignored_params = {p for m in ignored_modules for p in m.parameters()}
|
73 |
+
self._collect_params(module, ignored_modules, ignored_params)
|
74 |
+
module.register_forward_pre_hook(self.forward_pre_hook, with_kwargs=True)
|
75 |
+
module.register_forward_hook(self.forward_post_hook) # type: ignore[arg-type]
|
76 |
+
|
77 |
+
if "device_id" in kwargs:
|
78 |
+
# replicate() supports a small usability enhancement where
|
79 |
+
# user can pass in device_id as a Union[int, torch.device] even for
|
80 |
+
# CPU devices so users don't have to change code for CPU/GPU runs.
|
81 |
+
# We derive the right device_ids to feed into DDP to support this.
|
82 |
+
if kwargs["device_id"] is not None:
|
83 |
+
device_id = kwargs["device_id"]
|
84 |
+
# Convert to device_ids that DDP expects.
|
85 |
+
if isinstance(device_id, torch.device) and device_id.type == "cpu":
|
86 |
+
# CPU modules receive device_ids None
|
87 |
+
kwargs["device_ids"] = None
|
88 |
+
else:
|
89 |
+
# GPU modules expect device_ids=[cuda_device]
|
90 |
+
kwargs["device_ids"] = [device_id]
|
91 |
+
else:
|
92 |
+
kwargs["device_ids"] = None
|
93 |
+
kwargs.pop("device_id")
|
94 |
+
|
95 |
+
self._ddp = DistributedDataParallel(self._param_list, **kwargs)
|
96 |
+
# Weakref to the DDP instance is currently only used for testing.
|
97 |
+
replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp)
|
98 |
+
|
99 |
+
def forward_pre_hook(
|
100 |
+
self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
101 |
+
) -> Any:
|
102 |
+
return self._ddp._pre_forward(*args, **kwargs)
|
103 |
+
|
104 |
+
def forward_post_hook(
|
105 |
+
self,
|
106 |
+
module: nn.Module,
|
107 |
+
input: Tuple[torch.Tensor],
|
108 |
+
output: torch.Tensor,
|
109 |
+
) -> torch.Tensor:
|
110 |
+
return self._ddp._post_forward(output)
|
111 |
+
|
112 |
+
|
113 |
+
@contract(state_cls=_ReplicateState)
|
114 |
+
def replicate(
|
115 |
+
module: nn.Module,
|
116 |
+
ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
|
117 |
+
**kwargs,
|
118 |
+
) -> nn.Module:
|
119 |
+
r"""Replicates a module
|
120 |
+
|
121 |
+
Args:
|
122 |
+
module (torch.nn.Module): module to replicate
|
123 |
+
|
124 |
+
Example::
|
125 |
+
>>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d)
|
126 |
+
>>> module = nn.Linear(3, 3)
|
127 |
+
>>> replicate(module)
|
128 |
+
"""
|
129 |
+
torch._C._log_api_usage_once("torch.distributed.replicate")
|
130 |
+
|
131 |
+
# TODO(fegin): using kwargs is not a good idea if we would like to make
|
132 |
+
# replicate a formal API to replace DDP.
|
133 |
+
if "device_id" in kwargs:
|
134 |
+
if not isinstance(kwargs["device_id"], (int, torch.device)):
|
135 |
+
raise RuntimeError(
|
136 |
+
"Expected device_id to be int or torch.device, "
|
137 |
+
f"but got {type(kwargs['device_id'])}"
|
138 |
+
)
|
139 |
+
|
140 |
+
if ignored_modules is None:
|
141 |
+
ignored_modules = {}
|
142 |
+
else:
|
143 |
+
ignored_modules = set(ignored_modules)
|
144 |
+
replicate.state(module).init(module, ignored_modules, **kwargs)
|
145 |
+
|
146 |
+
return module
|
147 |
+
|
148 |
+
|
149 |
+
def _is_fully_sharded(module: nn.Module) -> bool:
|
150 |
+
r"""Check if module is marked with fully_shard."""
|
151 |
+
registry = _get_registry(module)
|
152 |
+
if registry is None:
|
153 |
+
return False
|
154 |
+
return "fully_shard" in registry
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (762 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/comm_mode.cpython-310.pyc
ADDED
Binary file (3.08 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/op_coverage.cpython-310.pyc
ADDED
Binary file (2.79 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/visualize_sharding.cpython-310.pyc
ADDED
Binary file (4.93 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
2 |
+
from .embedding_ops import * # noqa: F403
|
3 |
+
from .matrix_ops import * # noqa: F403
|
4 |
+
from .math_ops import * # noqa: F403
|
5 |
+
from .tensor_ops import * # noqa: F403
|
6 |
+
from .pointwise_ops import * # noqa: F403
|
7 |
+
from .random_ops import * # noqa: F403
|
8 |
+
from .view_ops import * # noqa: F403
|
9 |
+
from .conv_ops import * # noqa: F403
|
10 |
+
from .experimental_ops import * # noqa: F403
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (406 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc
ADDED
Binary file (3.75 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc
ADDED
Binary file (6.18 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc
ADDED
Binary file (2.32 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc
ADDED
Binary file (7.18 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc
ADDED
Binary file (1.55 kB). View file
|
|