diff --git a/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4a57b5f4a71fb70831f10eaa68f441670a227144 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23df29fc283a4778362ca11878b726b106636e57f13cafdd3b7e8d152f7cd803 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f717f09590aff264f95a9cb388ffd9e64f019ce4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3536c32890a60ae164b6183fe9a28b27f077c525d28082130c1170ce3a102b66 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..accb49337815fbe5ce80aec0d8cc0d62b3e4cd82 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85850ddefde2ae71ef563fbe93a20541501ab97aa8bddcba877f75f88b477b49 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a53eaf4cd90d4c13774260b045e4287c09e90cef --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9481d4fb12dce0ead4f14f9da839d0eee10d5a75575757b7d83941aff4daa47 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..78b8598f5829c4f3d6d1237683ea5ff34c6bfe26 --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68c64b8c5e27d75610444fb8d6c0c947755967dc23edb76508400582707ccc28 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e508f67d8e3245602a5f8a5866e0404f9fb42ff3 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98dd5fa13a50c5a826026c48b1dc8b5d5bcf612a6e3c73820746a66b9c37e117 +size 16778396 diff --git a/ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..e6ea2543511ca5f710130c465eec007977a8e403 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:925d12e782d3b4c723c33574c3e050f75f71219966d298f9e666fc6ff9a74092 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/19.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/19.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2dc8c0092953442962b8f97db9af92a4010ef0b5 --- /dev/null +++ b/ckpts/universal/global_step120/zero/19.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1ad2fd75bd755ea7368b4b3c7ffd4939641bf426bd66fee5f584f0c1bc40969 +size 16778317 diff --git a/ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..edc178ad74a1bca3a2bbc71220c9aff426389f62 --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f6deb500a5774778a3f48fb763ff31bdabad47a2645e52c8ee5fd9b8f8393f8 +size 9372 diff --git a/ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4228adc6e09fbd0b5739cb6e602f71331fd3109d --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3f70fb651b3ec524d6c5654873f3b4ec8b5db182812412661ab7f6449776850 +size 9387 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fb2e41c6241e6a7e757eab650e51b93347f4c9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_composable_state.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_composable_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c1a75ada7d7a97c3abb483db8b9aeb6525f6f86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_composable_state.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f1dfe115c2314cf2bbe2f161765b99fa6710676 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1225923be6a9f1377126e165be530c7da06b51cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_state_dict_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_state_dict_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..847041c3030160ae2093dcd850ccbd3f201bc718 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/_state_dict_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/argparse_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/argparse_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..541142435bb55c265b64e35e6539ad291df2a25a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/argparse_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b397b3844f5184a462326dceba6e3673b519ba73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/collective_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/collective_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5473bafeef407cc98e40b553d1d7b9c0c9ff6ca3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/collective_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42c4735e935e7108edd1e1b08efb59c66e4afe30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d9c7e0b6e3c68658755ebc19853551a744b95ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f75273c414081dba982106c33f0a38a22989c2a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4cbc9e9a4039b9d827c4929b5a0fa3a83832962 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/logging_handlers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/logging_handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab1dbcada3cdda2f7da417a4c02043853d0410e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/logging_handlers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/remote_device.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/remote_device.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82361a6c7ef809362bbe352034e77d1fa2f0dfaf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/remote_device.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/rendezvous.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af2f1c6c7d6ad95162fd87cc3e79819fc271307d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/rendezvous.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/run.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/run.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a3a140a361048897a57a804f41bea54dc7e848e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/run.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebae3746164cfc1c45f54f017b14bee2ccd17212 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f6f9ccfa27ee04f0138daa65df48470fe24d770 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py @@ -0,0 +1,4 @@ +from .checkpoint_activation import checkpoint +from .contract import _get_registry, contract +from .fully_shard import fully_shard +from .replicate import replicate diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..042f886e11f84bd238dbde58dc553cf1d45f84ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1137ee6fa34b61459d414be970547c373332fbae Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10a6ee6ffc145c368d71c5c096292c90ddc5b285 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4576046dcf3adac17741ed529ffbbcb025f1eb58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae42a895a5053b3de6e8f0fdde394ef4a63984ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..8accef6afc3433c3371955eceb9f626e6fcd8558 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py @@ -0,0 +1,94 @@ +from contextlib import contextmanager, nullcontext +from typing import Any, Tuple + +import torch +import torch.nn as nn +from torch.utils.checkpoint import ( + _checkpoint_without_reentrant_generator, + _DEFAULT_DETERMINISM_MODE, +) + +from .contract import contract + + +@contextmanager +def _no_hook(module: nn.Module): + r""" + Disable hooks installed by checkpoint to avoid unintentional recursion + during backward recomputation. + """ + orig_enable_hook = checkpoint.state(module).enable_hook + checkpoint.state(module).enable_hook = False + try: + yield + finally: + checkpoint.state(module).enable_hook = orig_enable_hook + + +@contract() +def checkpoint(module: nn.Module) -> nn.Module: + r""" + This is a composable activation checkpointing API. Unlike functional + activation checkpointing APIs, this one does not require changing model + source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs, + this one does not modify model structure or fully-qualified names either. + Under the hood, it registers activation checkpointing logic as pre- and + post-forward hooks. Hence, this API can be easily applied to any model or + sub-modules in the model. + + Args: + module (nn.Module): the target model or sub-module to apply activation + checkpointing. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> + >>> class MyModel(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.l1 = nn.Linear(10, 10) + >>> self.l2 = nn.Linear(10, 10) + >>> + >>> def forward(self, x): + >>> return self.l2(self.l1(x)) + >>> + >>> model = MyModel() + >>> checkpoint(model.l1) # apply activation checkpointing only to l1 + >>> model(torch.zeros(2, 10)).sum().backward() + + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint") + + def forward_pre_hook(module: nn.Module, inputs: Tuple[Any, ...]) -> None: + if checkpoint.state(module).enable_hook: + + def context_fns(): + return nullcontext(), _no_hook(module) + + checkpoint.state( + module + )._ac_generator = _checkpoint_without_reentrant_generator( + module, True, context_fns, _DEFAULT_DETERMINISM_MODE, False, *inputs + ) + next(checkpoint.state(module)._ac_generator) + + def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any: + if checkpoint.state(module).enable_hook: + try: + next(checkpoint.state(module)._ac_generator) + except StopIteration: + pass + else: + raise RuntimeError( + "Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!" + ) + + # Ensure that we no longer hold on to the generator. always_call=True helps ensure we + # clear this even in the case of exception in fwd pass. + checkpoint.state(module)._ac_generator = None + + checkpoint.state(module).enable_hook = True + module.register_forward_pre_hook(forward_pre_hook) + module.register_forward_hook(forward_hook, prepend=True, always_call=True) + return module diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/contract.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/contract.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6983023f76e26698a3c4e8ee477fae9e7eb508 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/contract.py @@ -0,0 +1,194 @@ +import uuid +from collections import OrderedDict +from functools import wraps +from typing import Callable, Dict, List, Optional, Type + +import torch.nn as nn +from torch.distributed._composable_state import _State + + +def generate_state_key(string="__composable_api_state_key"): + return f"{string}_{str(uuid.uuid4())}" + + +STATE_KEY = generate_state_key() +REGISTRY_KEY = generate_state_key() + + +# TODO: we can add additional info to RegistryItem to share across APIs. E.g., +# we can add args and kwargs here, and then we can detect whether fully_shard +# is combined with reentrant activation checkpointing and error out with a clear +# message. +class RegistryItem: + pass + + +def contract(state_cls: Type[_State] = _State): + r""" + Decorate a function as a composable distributed API, where the first + argument of the function must be an :class:`nn.Module` instance. The + decorator verifies that the wrapped function does not modify parameter, + buffer or sub-module fully-qualified names (FQN). + + When a function ``func`` is decorated by ``@contract()``, a + ``.state(module: nn.Module)`` method will be installed to the decorated + function. Then you can retrieve and modify the state on a module by calling + ``func.state(module)``. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> + >>> class MyModel(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.l1 = nn.Linear(10, 10) + >>> self.l2 = nn.Linear(10, 10) + >>> + >>> def forward(self, x): + >>> return self.l2(self.l1(x)) + >>> + >>> @contract() + >>> def my_feature(module: nn.Module) -> nn.Module: + >>> my_feature.state(module).some_state = "any value" + >>> return module + >>> + >>> model = MyModel() + >>> my_feature(model.l1) + >>> assert my_feature.state(model.l1).some_state == "any value" + >>> my_feature(model.l2) + >>> model(torch.randn(2, 10)).sum().backward() + """ + + # wraps will make functions decorated with contract() pickleable - needed for integration with torch.package + @wraps(state_cls) + def inner(func): + @wraps(func) + def wrapper(module: nn.Module, *args, **kwargs) -> Optional[nn.Module]: + # get existing global states + default_all_state: Dict[Callable, _State] = OrderedDict() + all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload] + STATE_KEY, default_all_state + ) + assert isinstance( + all_state, dict + ), "Distributed composable API states corrupted" + + # get global registry + default_registry: Dict[str, RegistryItem] = OrderedDict() + registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload] + REGISTRY_KEY, default_registry + ) + + assert isinstance( + registry, dict + ), "Distributed composable API registry corrupted" + + # make sure the API func has not been applied to the input module yet. + assert func not in all_state and func.__name__ not in registry, ( + "Each distinct composable distributed API can only be applied to a " + f"module once. {func.__name__} has already been applied to the " + f"following module.\n{module}" + ) + + # install states specific to the wrapped ``func`` + all_state.setdefault(func, state_cls()) + # register ``func`` in the global registry by name + registry.setdefault(func.__name__, RegistryItem()) + + orig_named_params = OrderedDict(module.named_parameters()) + orig_named_buffers = OrderedDict( + module.named_buffers(remove_duplicate=False) + ) + orig_named_modules = OrderedDict( + module.named_modules(remove_duplicate=False) + ) + + updated = func(module, *args, **kwargs) + + if updated is None: + updated = module + + new_named_params = OrderedDict(updated.named_parameters()) + new_named_buffers = OrderedDict( + updated.named_buffers(remove_duplicate=False) + ) + new_named_modules = OrderedDict( + updated.named_modules(remove_duplicate=False) + ) + + assert isinstance(updated, nn.Module), ( + "Output of composable distributed APIs must be either None or " + f"nn.Module, but got {type(updated)}" + ) + + def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str): + if orig_fqns == new_fqns: + return + + orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns) + orig_only = orig_fqn_set - new_fqn_set + new_only = new_fqn_set - orig_fqn_set + if len(orig_only) or len(new_only): + raise RuntimeError( + f"{check_key}" + "Composable distributed API implementations cannot modify " + "FQNs.\n" + f"Only in original FQNs: {orig_only},\n" + f"Only in new FQNs: {new_only}" + ) + else: + raise RuntimeError( + f"{check_key}" + "Composable distributed API implementations cannot modify " + "the order of FQNs.\n" + f"Original FQNs: {orig_only}\n" + f"New FQNs: {new_only}" + ) + + check_fqn( + list(orig_named_params.keys()), + list(new_named_params.keys()), + "Check parameters, ", + ) + check_fqn( + list(orig_named_buffers.keys()), + list(new_named_buffers.keys()), + "Check buffer, ", + ) + check_fqn( + list(orig_named_modules.keys()), + list(new_named_modules.keys()), + "Check modules, ", + ) + + # TODO: a stricter verification should also reject changing module + # types and monkey-patching forward() method implementations. + + # TODO: verify that installed distributed paradigms are compatible with + # each other. + + return updated + + def get_state(module: nn.Module) -> Optional[_State]: + return module.__dict__.setdefault( # type: ignore[call-overload] + STATE_KEY, + {}, # TODO(@yhcharles): this is a temporary fix, need a better way + ).get( + func + ) # type: ignore[call-overload] + + wrapper.state = get_state # type: ignore[attr-defined] + + return wrapper + + return inner + + +def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]: + r""" + Get an ``OrderedDict`` of composable APIs that have been applied to the + ``module``, indexed by the API name. If no API has been applied, then this + returns ``None``. + """ + return getattr(module, REGISTRY_KEY, None) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py new file mode 100644 index 0000000000000000000000000000000000000000..26444ccf1e087383be1a3c0783c3c0ea4d402640 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py @@ -0,0 +1,52 @@ +from dataclasses import dataclass +from typing import Optional + +import torch + + +@dataclass(frozen=True) +class MixedPrecisionPolicy: + """ + This configures FSDP's mixed precision. Unlike autocast, this applies mixed + precision at the module level, not op level, which means low-precision + activations are saved for backward and high-to-low-precision casts are + incurred only at module boundaries. + + FSDP works well with module-level mixed precision since it keeps the + high-precision sharded parameters in memory anyway. In other words, FSDP + does not require any extra memory to keep a high-precision copy of the + parameters for the optimizer step. + + Attributes: + param_dtype (Optional[torch.dtype]): This specifies the dtype for + the unsharded parameter and hence the dtype for forward/backward + computation and the parameter all-gather. If this is ``None``, then + the unsharded parameter uses the original dtype. The optimizer step + uses the sharded parameter in the original dtype. (Default: + ``None``) + reduce_dtype (Optional[torch.dtype]): This specifies the dtype for + gradient reduction (i.e. reduce-scatter or all-reduce). If this is + ``None`` but ``param_dtype`` is not ``None``, then the reduction + uses the compute dtype. This can be used to run gradient reduction + in full precision while using low precision for compute. (Default: + ``None``) + output_dtype (Optional[torch.dtype]): This specifies the dtype for + casting floating-point forward outputs. This can be used to + help implement cases where different modules have different mixed + precision policies. (Default: ``None``) + cast_forward_inputs (bool): This specifies whether FSDP should cast the + forward's floating-point input tensors to ``param_dtype`` or not. + """ + + param_dtype: Optional[torch.dtype] = None + reduce_dtype: Optional[torch.dtype] = None + output_dtype: Optional[torch.dtype] = None + cast_forward_inputs: bool = True + + def __post_init__(self): + # Clamp `reduce_dtype` to `None` if no casting is required: since + # gradients are computed in `param_dtype`, if `reduce_dtype` matches, + # then we do not need extra casting + if self.param_dtype == self.reduce_dtype: + # Bypass the frozen dataclass checks + object.__setattr__(self, "reduce_dtype", None) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py new file mode 100644 index 0000000000000000000000000000000000000000..94b0249177697d99ce4ee13355cdd0cdb4b1de27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py @@ -0,0 +1,151 @@ +import math +import traceback + +from dataclasses import dataclass +from enum import auto, Enum +from typing import Any, cast, List, Optional, Tuple + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._composable.contract import _get_registry +from torch.distributed._tensor import DeviceMesh, DTensor, Placement + + +@dataclass +class DataParallelMeshInfo: + mesh: DeviceMesh + shard_mesh_dim: Optional[int] = None + replicate_mesh_dim: Optional[int] = None + + def __post_init__(self): + if self.shard_mesh_dim is None and self.replicate_mesh_dim is None: + raise AssertionError( + "At least one of shard_mesh_dim and replicate_mesh_dim must not be None" + ) + + +@dataclass +class FSDPMeshInfo(DataParallelMeshInfo): + def __post_init__(self): + super().__post_init__() + if self.shard_mesh_dim is None: + raise AssertionError("Expects non-None shard_mesh_dim") + self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim) + self.shard_process_group = cast( + dist.ProcessGroup, self.mesh.get_group(self.shard_mesh_dim) + ) + self.shard_mesh_rank: int = self.shard_process_group.rank() + + +@dataclass +class DDPMeshInfo(DataParallelMeshInfo): + def __post_init__(self): + super().__post_init__() + if self.replicate_mesh_dim is None: + raise AssertionError("Expects non-None replicate_mesh_dim") + self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim) + self.replicate_process_group = cast( + dist.ProcessGroup, self.mesh.get_group(self.replicate_mesh_dim) + ) + self.replicate_mesh_rank: int = self.replicate_process_group.rank() + + +@dataclass +class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo): + def __post_init__(self): + # Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo` + super().__post_init__() + + +class TrainingState(Enum): + """Describes the training state of one FSDP state / parameter group.""" + + # Transition to forward starting pre-forward until post-forward + FORWARD = auto() + # Transition to pre-backward when unsharding in backward + PRE_BACKWARD = auto() + # Transition to post-backward when resharding and reducing gradients + POST_BACKWARD = auto() + # Idle before/after forward or before pre-backward/after post-backward + IDLE = auto() + + +def _raise_assert_with_print(*args: Any, **kwargs: Any): + print(f"[Rank {dist.get_rank()}] ", end="") + print(*args, **kwargs) + traceback.print_stack() + raise AssertionError(*args, **kwargs) + + +def _is_composable_with_fsdp(module: nn.Module) -> bool: + registry = _get_registry(module) + if registry is None: + return True + # Registry keys by function name + return "replicate" not in registry + + +def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size: + padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor + return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:]) + + +def _chunk_with_empty( + tensor: torch.Tensor, num_chunks: int, dim: int +) -> List[torch.Tensor]: + chunks = list(torch.chunk(tensor, num_chunks, dim=dim)) + while len(chunks) < num_chunks: + chunks.append(chunks[0].new_empty(0)) + return chunks + + +def _get_dim0_chunked_size( + chunk: torch.Tensor, unchunked_size: torch.Size +) -> torch.Size: + if chunk.numel() > 0: + return chunk.size() + # For 0 numel, we need to preserve trailing dims for DTensor APIs + return cast(torch.Size, torch.Size([0]) + unchunked_size[1:]) + + +def _from_local_no_grad( + local_tensor: torch.Tensor, + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + global_size: torch.Size, + global_stride: Tuple[int, ...], +) -> DTensor: + """ + This method is similar to ``DTensor.from_local()`` except it avoids some + CPU overhead by avoiding default args and not being differentiable. + """ + return DTensor( + # Use the local tensor directly instead of constructing a new tensor + # variable, e.g. with `view_as()`, since this is not differentiable + local_tensor, + device_mesh, + placements, + shape=global_size, + dtype=local_tensor.dtype, + requires_grad=local_tensor.requires_grad, + stride=global_stride, + ) + + +def _to_dtype_if_needed( + tensor: torch.Tensor, dtype: Optional[torch.dtype] +) -> torch.Tensor: + if dtype is not None and tensor.dtype != dtype: + return tensor.to(dtype) + return tensor + + +def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor: + if ( + not isinstance(x, torch.Tensor) + or not torch.is_floating_point(x) + or x.dtype == dtype + ): + return x + return x.to(dtype) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..37e3d1544cd176438b1173627b819f58a747e2a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py @@ -0,0 +1,133 @@ +import warnings +from typing import Callable, Iterable, Optional, Union + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._composable.contract import contract +from torch.distributed._composable_state import _get_module_state, _insert_module_state +from torch.distributed.fsdp._common_utils import _FSDPState +from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo + +from torch.distributed.fsdp._init_utils import ( + _init_buffer_state, + _init_core_state, + _init_device_handle, + _init_ignored_module_states, + _init_param_handle_from_module, + _init_prefetching_state, + _init_process_group_state, + _init_runtime_state, + _init_state_dict_state, + HYBRID_SHARDING_STRATEGIES, +) +from torch.distributed.fsdp._runtime_utils import ( + _register_post_forward_hook, + _register_pre_forward_hook, + _register_root_pre_forward_hook, +) +from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks +from torch.distributed.fsdp._wrap_utils import _auto_wrap +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.wrap import _Policy + + +@contract(state_cls=_FSDPState) +def fully_shard( + module: nn.Module, + *, + process_group: Optional[dist.ProcessGroup] = None, + policy: Optional[_Policy] = None, + strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + cpu_offload: Optional[CPUOffload] = None, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + device_id: Optional[Union[int, torch.device]] = None, + param_init_fn: Optional[Callable[[nn.Module], None]] = None, + sync_module_states: bool = False, + forward_prefetch: bool = False, + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, +) -> nn.Module: + """ + Applies ``FullyShardedDataParallel` (FSDP) semantics to ``module``. + """ + warnings.warn( + "``torch.distributed._composable.fully_shard`` is being deprecated." + "You can contintue to use the wrapper based FSDP." + "See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py." + "``torch.distributed._composable.fully_shard`` will be removed after PyTorch 2.5." + ) + + torch._C._log_api_usage_once("torch.distributed.fully_shard") + # Enforce the new auto wrap policy + if policy is not None and not isinstance(policy, _Policy): + raise ValueError(f"Expects a `_Policy` but got {policy}") + state = fully_shard.state(module) + state = _init_ignored_module_states(state, module, ignored_modules, ignored_states) + state = _init_device_handle(state, module, state._ignored_params, device_id) + _annotate_modules_for_dynamo(module, state._ignored_modules, True) + state = _init_process_group_state(state, process_group, strategy, policy) + if policy is not None: + root_kwargs = { + "process_group": process_group, + "strategy": strategy, + "mixed_precision": mixed_precision, + "cpu_offload": cpu_offload, + "ignored_modules": ignored_modules, + "device_id": device_id, + "param_init_fn": param_init_fn, + "sync_module_states": sync_module_states, + "forward_prefetch": forward_prefetch, + "ignored_states": ignored_states, + } + if strategy in HYBRID_SHARDING_STRATEGIES: + root_kwargs["process_group"] = (state.process_group, state._inter_node_pg) + _auto_wrap( + module, + policy, + state._ignored_modules, + state._ignored_params, + root_kwargs, + fully_shard, + ) + state = _init_core_state( + state, + strategy or ShardingStrategy.FULL_SHARD, + mixed_precision, + cpu_offload, + limit_all_gathers=True, + use_orig_params=True, + backward_prefetch_limit=1, + forward_prefetch_limit=1, + ) + state = _init_runtime_state(state) + state = _init_prefetching_state( + state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch + ) + state = _init_buffer_state(state, module) + state = _init_param_handle_from_module( + state, module, device_id, param_init_fn, sync_module_states + ) + state = _init_state_dict_state(state) + _register_all_state_dict_hooks(state) + _register_pre_forward_hook(state, module) + _register_post_forward_hook(state, module) + _register_root_pre_forward_hook(state, module) # prepend last + # Always insert the state for the passed-in module even if it has no + # managed parameters, in which case it has no handles and does not appear + # in `_fully_sharded_module_to_handles` + _insert_module_state(module, state) + for submodule in module.modules(): + if ( + submodule in state._fully_sharded_module_to_handle + and _get_module_state(submodule) is None + ): + _insert_module_state(submodule, state) + return module diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..b3205f9aff0352a390c1acbe503165c5cf07d60b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py @@ -0,0 +1,154 @@ +import weakref +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple + +import torch +import torch.nn as nn +from torch.distributed._composable_state import _State +from torch.nn.parallel import DistributedDataParallel + +from .contract import _get_registry, contract + +_ROOT_MODULE_PREFIX = "" + + +class _ReplicateState(_State): + def __init__(self) -> None: + super().__init__() + self.module: nn.Module = nn.ParameterList() + self.has_initialized: bool = False + self._param_list: nn.ParameterList = nn.ParameterList() + # TODO(@fegin): this variable is originally create for testing, we + # should remove this if possible. + self._param_names: List[str] = [] + + def _collect_params( + self, + module: nn.Module, + ignored_modules: Set[nn.Module], + ignored_params: Set[nn.Parameter], + prefix: str = _ROOT_MODULE_PREFIX, + ) -> None: + # skip if managed by fully_sharded API + if _is_fully_sharded(module): + return + + # if a module is ignored, all descendants of the module are ignored. + if module in ignored_modules: + return + + recurse_prefix = ( + f"{prefix}." if prefix != _ROOT_MODULE_PREFIX else _ROOT_MODULE_PREFIX + ) + + for n, p in module.named_parameters(recurse=False): + if p not in ignored_params: + self._param_list.append(p) + self._param_names.append(f"{recurse_prefix}{n}") + + for name, child_module in module.named_children(): + self._collect_params( + child_module, + ignored_modules, + ignored_params, + prefix=f"{recurse_prefix}{name}", + ) + + def init( + self, + module: nn.Module, + ignored_modules: Set[nn.Module], + **kwargs, + ) -> None: + if _is_fully_sharded(module): + raise RuntimeError( + "Cannot apply `replicate()` on a Module already managed by `fully_shard`" + ) + + if self.has_initialized: + return + + self.has_initialized = True + self.module = module + ignored_params = {p for m in ignored_modules for p in m.parameters()} + self._collect_params(module, ignored_modules, ignored_params) + module.register_forward_pre_hook(self.forward_pre_hook, with_kwargs=True) + module.register_forward_hook(self.forward_post_hook) # type: ignore[arg-type] + + if "device_id" in kwargs: + # replicate() supports a small usability enhancement where + # user can pass in device_id as a Union[int, torch.device] even for + # CPU devices so users don't have to change code for CPU/GPU runs. + # We derive the right device_ids to feed into DDP to support this. + if kwargs["device_id"] is not None: + device_id = kwargs["device_id"] + # Convert to device_ids that DDP expects. + if isinstance(device_id, torch.device) and device_id.type == "cpu": + # CPU modules receive device_ids None + kwargs["device_ids"] = None + else: + # GPU modules expect device_ids=[cuda_device] + kwargs["device_ids"] = [device_id] + else: + kwargs["device_ids"] = None + kwargs.pop("device_id") + + self._ddp = DistributedDataParallel(self._param_list, **kwargs) + # Weakref to the DDP instance is currently only used for testing. + replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp) + + def forward_pre_hook( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Any: + return self._ddp._pre_forward(*args, **kwargs) + + def forward_post_hook( + self, + module: nn.Module, + input: Tuple[torch.Tensor], + output: torch.Tensor, + ) -> torch.Tensor: + return self._ddp._post_forward(output) + + +@contract(state_cls=_ReplicateState) +def replicate( + module: nn.Module, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + **kwargs, +) -> nn.Module: + r"""Replicates a module + + Args: + module (torch.nn.Module): module to replicate + + Example:: + >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d) + >>> module = nn.Linear(3, 3) + >>> replicate(module) + """ + torch._C._log_api_usage_once("torch.distributed.replicate") + + # TODO(fegin): using kwargs is not a good idea if we would like to make + # replicate a formal API to replace DDP. + if "device_id" in kwargs: + if not isinstance(kwargs["device_id"], (int, torch.device)): + raise RuntimeError( + "Expected device_id to be int or torch.device, " + f"but got {type(kwargs['device_id'])}" + ) + + if ignored_modules is None: + ignored_modules = {} + else: + ignored_modules = set(ignored_modules) + replicate.state(module).init(module, ignored_modules, **kwargs) + + return module + + +def _is_fully_sharded(module: nn.Module) -> bool: + r"""Check if module is marked with fully_shard.""" + registry = _get_registry(module) + if registry is None: + return False + return "fully_shard" in registry diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b159208701e7e05fb95d45e89f36494f8dbe708 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/comm_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/comm_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f266ec46018b3066eb70e76ea729a666f7a3207e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/comm_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/op_coverage.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/op_coverage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7b9491af9eb0280483be731a76f4a08eb7fcca4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/op_coverage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/visualize_sharding.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/visualize_sharding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e5bfeb0769156fde109816207bc9c1e304638a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__pycache__/visualize_sharding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d19fdfa50cb70432e1a3dcb95981282a01a01e7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from .embedding_ops import * # noqa: F403 +from .matrix_ops import * # noqa: F403 +from .math_ops import * # noqa: F403 +from .tensor_ops import * # noqa: F403 +from .pointwise_ops import * # noqa: F403 +from .random_ops import * # noqa: F403 +from .view_ops import * # noqa: F403 +from .conv_ops import * # noqa: F403 +from .experimental_ops import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..328f0c6cb5642a6614856179c6dab4e8620d134d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e91d641b8735a39e00f772430a523836dd01bdcf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5d0fb248f334ce5904381d26854dd33e541e187 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe37e19c9d1f8459ea2ab8121667d2d5efd24db9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d14b92bcabe7eebd81637f1022afec6e8a41d31e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d131c559a15acc0a484b8ae51d47ce3ae998f34 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/math_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/math_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc38d6c4d4d13da5aa14c30e4a6850df4fc01dd5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/math_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/matrix_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/matrix_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b28543eccef79f99440505dad682ea4b405d7a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/matrix_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/pointwise_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/pointwise_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a37e0a9d1852436bf68b00f79d907be9c57c10c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/pointwise_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/random_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/random_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83620d731c18e6c0d5462ffb4be63a5a7dc549de Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/random_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/tensor_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/tensor_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a27071de71881706ce30b32f47e0d225264d4ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/tensor_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ea725c445d9fb7310d660a955cb1cda5b96ef7b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/view_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/view_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fac56f900d2aa2b942e98cb4083d4ce5f5644b3e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/view_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/basic_strategy.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/basic_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..80055281236fa9a7c186d00d0363eb947a22755b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/basic_strategy.py @@ -0,0 +1,184 @@ +import itertools +from dataclasses import dataclass + +from typing import List, Tuple + +from torch.distributed._tensor.op_schema import OpStrategy, PlacementStrategy +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + +from torch.distributed.device_mesh import DeviceMesh + + +@dataclass +class EinsumDims: + contracting_dims: List[str] + batch_dims: List[str] + lhs_out_only_dims: List[str] + rhs_out_only_dims: List[str] + + @classmethod + def parse_equation(cls, equation: str) -> Tuple[List[str], str]: + # parse einop equation and extract arg specs + """ + Parse the einsum equation str to input dim chars and output dim char + """ + inputs, outputs = equation.split("->") + input_dims, output_dims = inputs.split(","), outputs.split(",") + + # NOTE: only support at most two inputs, and single output + # extend to support more inputs if needed in future + assert len(input_dims) <= 2, "Only support at most two inputs" + assert len(output_dims) == 1, "Only support single output" + output_dim = output_dims[0] + return input_dims, output_dim + + @classmethod + def parse_dims(cls, input_dims: List[str], output_dim: str) -> "EinsumDims": + """ + Parse the dims and extract the contracting, batch, and free dimensions + for the left and right hand sides. + """ + dim_char_set = set() + for input_dim in input_dims: + for input_char in list(input_dim): + dim_char_set.add(input_char) + + # get a determinisitc order of all dim chars + all_dim_chars = sorted(dim_char_set) + + # parse input and output dimensions + lhs_out_only_dims, rhs_out_only_dims = [], [] + batch_dims, contracting_dims = [], [] + + for dim_char in all_dim_chars: + if dim_char not in output_dim: + contracting_dims.append(dim_char) + else: + is_batch_dim = True + for input_dim in input_dims: + is_batch_dim = is_batch_dim and dim_char in input_dim + + if is_batch_dim: + batch_dims.append(dim_char) + else: + assert ( + len(input_dims) == 2 + ), "free dimension only supported for two inputs!" + lhs, rhs = input_dims + if dim_char in lhs: + lhs_out_only_dims.append(dim_char) + elif dim_char in rhs: + rhs_out_only_dims.append(dim_char) + else: + raise RuntimeError("Invalid dimension character") + + return cls( + contracting_dims=contracting_dims, + batch_dims=batch_dims, + lhs_out_only_dims=lhs_out_only_dims, + rhs_out_only_dims=rhs_out_only_dims, + ) + + +def gen_einsum_strategies( + equation: str, + mesh: DeviceMesh, + *, + linearity: bool = False, +) -> OpStrategy: + """ + Generate a strategy list for the ops that follow einsum style notation. + """ + # parse einop equation and extract dims + input_dims, output_dim = EinsumDims.parse_equation(equation) + edims = EinsumDims.parse_dims(input_dims, output_dim) + + all_mesh_dim_strategies = [] + + # generate strategies for each mesh dim + for mesh_dim in range(mesh.ndim): + mesh_dim_strategies = [] + + # placement list stores placements of [output, input1, input2, ...] + # first we always have replicate all for inputs and output + placement_list: List[Placement] = [Replicate()] * (len(input_dims) + 1) + mesh_dim_strategies.append(placement_list) + + if mesh.size(mesh_dim) <= 1: + # only replicate strategy for mesh dim with size 1 + # TODO: see if this is valid for the submesh case + continue + + # split batch dim + for batch_dim in edims.batch_dims: + output_batch_dim = output_dim.index(batch_dim) + placement_list = [Shard(output_batch_dim)] + for input_dim in input_dims: + input_batch_dim = input_dim.index(batch_dim) + placement_list.append(Shard(input_batch_dim)) + + mesh_dim_strategies.append(placement_list) + + # split contracting dim + for contracting_dim in edims.contracting_dims: + placement_list = [_Partial()] + for input_dim in input_dims: + input_contracting_dim = input_dim.index(contracting_dim) + placement_list.append(Shard(input_contracting_dim)) + + mesh_dim_strategies.append(placement_list) + + # split lhs free dim + for lhs_dim in edims.lhs_out_only_dims: + lhs_free_dim = output_dim.index(lhs_dim) + # this means split the lhs input and output + # i.e. S(0), R -> S(0) + lhs_placement_list: List[Placement] = [ + Shard(lhs_free_dim), + Shard(lhs_free_dim), + Replicate(), + ] + mesh_dim_strategies.append(lhs_placement_list) + + # split rhs free dim + for rhs_dim in edims.rhs_out_only_dims: + rhs_free_dim = output_dim.index(rhs_dim) + rhs_placement_list: List[Placement] = [ + Shard(rhs_free_dim), + Replicate(), + Shard(rhs_free_dim), + ] + mesh_dim_strategies.append(rhs_placement_list) + + # linearity strategy + if linearity: + linearity_placement_list: List[Placement] = [_Partial()] + for input_dim in input_dims: + linearity_placement_list.append(_Partial()) + mesh_dim_strategies.append(linearity_placement_list) + + all_mesh_dim_strategies.append(mesh_dim_strategies) + + # generate strategies for entire mesh + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + # TODO: filter out invalid strategies, at this point we generate + # all possible strategies without considering the whether the tensor + # dim could be sharded or not, we would need to filter out invalid + # strategies base on the actual tensor shape + # (i.e. for Shard, tensor dim size must > mesh size) + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + strat = PlacementStrategy(output_specs=spec_list[0], input_specs=spec_list[1:]) + all_strategies.append(strat) + + return OpStrategy(all_strategies) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..7d581995bd816cd23afa17d65fb919dd8c601782 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py @@ -0,0 +1,289 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import cast, Dict, List, Optional, Tuple + +import torch +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.op_schema import ( + _is_inplace_op, + _is_out_variant_op, + OpSchema, + OutputSharding, +) +from torch.distributed._tensor.ops.utils import prod +from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + + +def _replace_char_in_str(string: str, new_char: str, idx: int) -> str: + return string[:idx] + new_char + string[idx + 1 :] + + +def _gen_reshard_suggestions( + op_schema: OpSchema, + input_dims: List[str], + input_specs: Tuple[DTensorSpec, ...], + dim_to_sharding: Dict[str, int], + pending_sum: List[int], +) -> OutputSharding: + suggested_arg_specs: List[DTensorSpec] = [] + for input_dim, input_spec in zip(input_dims, input_specs): + dim_map = [dim_to_sharding[dim] for dim in input_dim] + suggested_arg_specs.append( + DTensorSpec.from_dim_map( + mesh=input_spec.mesh, + dim_map=dim_map, + sums=pending_sum, + tensor_meta=input_spec.tensor_meta, + ) + ) + suggested_schema = OpSchema(op_schema.op, tuple(suggested_arg_specs), {}) + suggested_schema._inplace_rewrap_schema_suggestion(op_schema) + return OutputSharding( + None, + schema_suggestions=[suggested_schema], + failed_reason="Input placements op sharding propagation failed, need to reshard!", + ) + + +def einop_rule( + equation: str, + op_schema: OpSchema, + *, + linearity: bool = False, + enforce_sharding: Optional[Dict[str, int]] = None, +) -> OutputSharding: + """ + Propagate the sharding of inputs to output for ops whose data moves according to einsum notation. + + This is mostly borrowed from @zdevito's sharding simulator. Examples: + mk,kn->mn - einsum + ij,ij->ij - addition + ij,j->ij - broadcasted addition + ij->i - reduction + Other ops could use this propagation algorithm when applied, note + that einsum propagation only deal with list of specs (DTensor specs) + as it only works on list of tensors! + + linearity in einop_rule means that the calling op `f` follows this rule: + f(a + b) = f(a) + f(b) + + In this case we can propagate the partial sum, note that linearity in einop + only applies to partial sum, not other operations like min/max (which are + associative but not linear). + """ + # parse einop equation and extract arg specs + inputs, outputs = equation.split("->") + input_dims, output_dims = inputs.split(","), outputs.split(",") + input_specs = op_schema.args_spec + # NOTE: only support single output unless needed in future + output_dim = output_dims[0] + + dim_to_sharding: Dict[str, int] = {} + dim_to_size: Dict[str, int] = {} + # record pending sum, key is mesh dimension, value is pending sum + # counter across input specs + pending_sums_counter: Dict[int, int] = {} + seen_shardings: Dict[int, str] = {} + needs_reshard = False + + def merge_sharding(dim: str, a: int, b: int) -> int: + # merge the sharding of inputs if it's able to merge, i.e. we can merge + # replicate and shard to shard, but this will trigger an reshard operation + if a != b: + if a == -1 or b == -1: + # reshard the replicate to match the sharded one + nonlocal needs_reshard + needs_reshard = True + return a if a != -1 else b + else: + # TODO: further merge the sharding properly (i.e. reshard one input to replicate) + raise RuntimeError( + f"{equation}: dim {dim} sharded two different ways: {a} and {b}" + ) + else: + return a + + for input_dim, input_spec in zip(input_dims, input_specs): + # deal with partial sums + input_sums = input_spec.sums + for sum_dim in input_sums: + if sum_dim not in pending_sums_counter: + seen_shardings[sum_dim] = "+" + # update pending sum counter for pending sum mesh + # dimension with the occurrence from each input + pending_sums_counter[sum_dim] = pending_sums_counter.get(sum_dim, 0) + 1 + + for idx, (dim, mesh_dim) in enumerate(zip(input_dim, input_spec.dim_map)): + if enforce_sharding and dim in enforce_sharding: + if enforce_sharding[dim] != mesh_dim: + needs_reshard = True + dim_to_sharding[dim] = enforce_sharding[dim] + dim_to_size[dim] = input_spec.shape[idx] + elif dim not in dim_to_sharding: + dim_to_sharding[dim] = mesh_dim + dim_to_size[dim] = input_spec.shape[idx] + else: + dim_to_sharding[dim] = merge_sharding( + dim, dim_to_sharding[dim], mesh_dim + ) + assert dim_to_size[dim] == input_spec.shape[idx] + + # after merging sharding, we check if there're multiple + # sharding on the same mesh dim. + merged_sharding_for_dim = dim_to_sharding[dim] + if merged_sharding_for_dim != -1: + if ( + merged_sharding_for_dim in seen_shardings + and dim != seen_shardings[merged_sharding_for_dim] + ): + needs_reshard = True + seen_shardings[merged_sharding_for_dim] += dim + else: + seen_shardings[merged_sharding_for_dim] = dim + + if pending_sums_counter and not linearity: + # return reshard suggestion with no pending sum, because we already properly + # merge the sharding, this reshard suggestion is legit to use + return _gen_reshard_suggestions( + op_schema, input_dims, input_specs, dim_to_sharding, [] + ) + else: + # It's a op that support linearity, but not all input arguments are partial + # we fail the sharding propagation with suggestion to make all inputs be + # partial on the corresponding mesh dim (all inputs should be partial for + # the mesh dims in order to execute locally and delay the sum reduction) + for value in pending_sums_counter.values(): + if value != len(input_specs): + needs_reshard = True + + for mesh_dim, dims in seen_shardings.items(): + if len(dims) > 1: + # we found different input dims are being sharded on the same mesh dim + # in order to perform local op computation, we need to reshard inputs + # base on some simple heuristics, now we simply pick the one with least comm + # volume. (i.e. the input with least size) + # TODO: consider a more advanced heuristic to pick the best sharding + costs = [] + for d in dims: + cost = 0 + for input_dim, input_spec in zip(input_dims, input_specs): + if ( + d in input_dim + and input_spec.dim_map[input_dim.index(d)] == mesh_dim + ): + assert input_spec.tensor_meta is not None + global_shape = input_spec.tensor_meta.shape + local_shape = compute_local_shape( + global_shape, input_spec.mesh, input_spec.placements + ) + cost += prod(local_shape) * input_spec.mesh.size(mesh_dim) + costs.append(cost) + d_to_keep_sharding = dims[costs.index(max(costs))] + for d in dims: + # update dim_to_sharding to keep the sharding of the dim with + # highest comm and make the rest of the dims to replicate + if d != d_to_keep_sharding: + dim_to_sharding[d] = -1 + + pending_sums = list(pending_sums_counter.keys()) + if needs_reshard: + return _gen_reshard_suggestions( + op_schema, input_dims, input_specs, dim_to_sharding, pending_sums + ) + + # generate output pending sum if a dim is sharded, and it appears in input + # but not output + for dim, shard_on_mesh in dim_to_sharding.items(): + if dim not in output_dims[0] and shard_on_mesh != -1: + pending_sums.append(shard_on_mesh) + + # if no need to reshard, we directly generate the output sharding + output_dim_map = [] + output_shape = [] + for dim in output_dim: + if dim == "1": + # find output dim that is a singleton dimension, mark sharding and shape + output_dim_map.append(-1) + output_shape.append(1) + else: + output_dim_map.append(dim_to_sharding[dim]) + output_shape.append(dim_to_size[dim]) + + # XXX: since we still need to have intermediate shape calculation, we need + # to pass in the shape here. We should remove this once sharding decomp works + # for ops like addmm + assert input_specs[0].tensor_meta is not None + tensor_meta = TensorMeta( + torch.Size(output_shape), + input_specs[0].tensor_meta.stride, + input_specs[0].tensor_meta.dtype, + ) + return OutputSharding( + DTensorSpec.from_dim_map( + input_specs[0].mesh, + output_dim_map, + pending_sums, + tensor_meta=tensor_meta, + ) + ) + + +def pointwise_rule(op_schema: OpSchema, linearity: bool = False) -> OutputSharding: + """ + Propagate the sharding for pointwise operations. + + Examples: + ij,ij->ij - addition/mul + ij,j->ij - broadcasted addition + """ + alphabet = "abcdefghijklmnopqrstuvwxyz" + # find the max_dim first in case we need to broadcasting + input_specs = op_schema.args_spec + max_dim = max(input.ndim for input in input_specs) + dimchars = [] + singleton_counter: List[int] = [0] * max_dim + for input in input_specs: + start_dim = max_dim - input.ndim + p = alphabet[start_dim:max_dim] + # handle the "broadcasting to a common shape case" + # see https://pytorch.org/docs/stable/notes/broadcasting.html + # If any of the dimensions is singleton dimension (i.e. 1). + # we mark the dim char as a special "1" to distinguish with + # the non-singleton dimension, so that sharding propagation + # should just ignore the singleton dimension. + if len(input_specs) > 1: + for i in range(max_dim): + if i < start_dim: + # treat the leading miss dim chars as singleton + singleton_counter[i] += 1 + elif input.shape[i - start_dim] == 1: + # mark singleton dim char as a special "1" in einop rule + singleton_counter[i] += 1 + p = _replace_char_in_str(p, "1", (i - start_dim)) + + dimchars.append(p) + out_dimchars = alphabet[:max_dim] + # check if we replace the all inputs dim char with singleton dimension, + # if we replace all inputs, we also need to replace the output dimension. + for output_dim_idx in range(len(out_dimchars)): + out_dimchar = out_dimchars[output_dim_idx] + if singleton_counter[output_dim_idx] == len(input_specs): + out_dimchars = _replace_char_in_str(out_dimchars, "1", output_dim_idx) + + fmt = f"{','.join(p for p in dimchars)}->{out_dimchars}" + + enforce_sharding: Dict[str, int] = {} + if _is_inplace_op(op_schema.op): + # inplace op should keep the input sharding it writes to + for out_dimchar, mesh_dim in zip(out_dimchars, input_specs[0].dim_map): + enforce_sharding[out_dimchar] = mesh_dim + elif _is_out_variant_op(op_schema.op): + out_spec = cast(DTensorSpec, op_schema.kwargs_schema["out"]) + for out_dimchar, mesh_dim in zip(out_dimchars, out_spec.dim_map): + enforce_sharding[out_dimchar] = mesh_dim + + return einop_rule( + fmt, + op_schema, + linearity=linearity, + enforce_sharding=enforce_sharding, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b45ddb03ec7423ee32cec62bee35185fb765a0bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +from typing import List + +import torch +from torch.distributed._tensor.op_schema import OpSchema, OutputSharding +from torch.distributed._tensor.ops.utils import register_prop_rule +from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + +aten = torch.ops.aten + + +@register_prop_rule(aten.convolution.default) +def convolution_rules(op_schema: OpSchema) -> OutputSharding: + ( + input_spec, + weight_spec, + bias_spec, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + ) = op_schema.args_schema + + assert isinstance(input_spec, DTensorSpec) + assert isinstance(weight_spec, DTensorSpec) + assert isinstance(bias_spec, DTensorSpec) + assert input_spec.tensor_meta is not None + assert weight_spec.tensor_meta is not None + in_shape = input_spec.tensor_meta.shape + weight_shape = weight_spec.tensor_meta.shape + assert isinstance(stride, List) + assert isinstance(padding, List) + assert isinstance(dilation, List) + assert isinstance(weight_shape, torch.Size) + N, C_in, H_in, W_in = in_shape[0], in_shape[1], in_shape[2], in_shape[3] + C_out = weight_shape[0] + H_out = (H_in + 2 * padding[0] - dilation[0] * (weight_shape[2] - 1) - 1) // stride[ + 0 + ] + 1 + W_out = (W_in + 2 * padding[1] - dilation[1] * (weight_shape[3] - 1) - 1) // stride[ + 1 + ] + 1 + output_shape = [N, C_out, H_out, W_out] + output_stride = (C_out * H_out * W_out, H_out * W_out, W_out, 1) + output_dim_map = input_spec.dim_map + pending_sums = input_spec.sums + + tensor_meta = TensorMeta( + torch.Size(output_shape), + output_stride, + input_spec.tensor_meta.dtype, + ) + return OutputSharding( + DTensorSpec.from_dim_map( + input_spec.mesh, + output_dim_map, + pending_sums, + tensor_meta=tensor_meta, + ) + ) + + +@register_prop_rule(aten.convolution_backward.default) +def convolution_backward_rules(op_schema: OpSchema) -> OutputSharding: + input_spec = op_schema.args_schema[0] + ( + grad_output_spec, + input_spec, + weight_spec, + bias_shape_opt, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + output_mask, + ) = op_schema.args_schema + + assert isinstance(grad_output_spec, DTensorSpec) + assert isinstance(input_spec, DTensorSpec) + assert isinstance(weight_spec, DTensorSpec) + assert isinstance(bias_shape_opt, List) + assert input_spec.tensor_meta is not None + weight_tensor_meta = weight_spec.tensor_meta + bias_tensor_meta = TensorMeta( + torch.Size(bias_shape_opt), + (1,), + input_spec.tensor_meta.dtype, + ) + + grad_input_spec = input_spec + grad_weight_spec = DTensorSpec.from_dim_map( + input_spec.mesh, + [-1, -1, -1, -1], + [0], + tensor_meta=weight_tensor_meta, + ) + grad_bias_spec = DTensorSpec.from_dim_map( + input_spec.mesh, + [-1], + [0], + tensor_meta=bias_tensor_meta, + ) + return OutputSharding([grad_input_spec, grad_weight_spec, grad_bias_spec]) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6ad687387c842ea9536754ad5128ae5ae15aa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py @@ -0,0 +1,313 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +import itertools +from dataclasses import dataclass, field +from typing import cast, List, Optional + +import torch +import torch.distributed._functional_collectives as funcol +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + PlacementStrategy, + StrategyType, +) +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + is_tensor_shardable, + register_op_strategy, +) + +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +@dataclass +class MaskBuffer: + data: Optional[torch.Tensor] = None + + def materialize_mask(self, mask): + if self.data is not None: + raise RuntimeError("MaskBuffer has already been materialized") + self.data = mask + + def release_mask(self): + # TODO: evaluate if we need to release the mask buffer or the buffer + # can just have the same lifetime as the _Partial placement + if self.data is None: + raise RuntimeError("MaskBuffer has not been materialized") + self.data = None + + def apply_mask(self, tensor): + if self.data is None: + raise RuntimeError("MaskBuffer has not been materialized") + + # NOTE: _MaskPartial is being used by the embedding op and the gather op. + # For gather, the mask has the same dimension as the output tensor, whereas + # the output of the embedding op has an additional dimension compare to the input, + # hence the output masking logic below having two different cases. + if tensor.ndim == self.data.ndim: + tensor[self.data] = 0.0 + else: + tensor[self.data, :] = 0.0 + + +@dataclass(frozen=True) +class _MaskPartial(_Partial): + """ + A partial mask placement devised for rowwise sharded embedding op, where we need + to mask and adjust the indices to the local embedding shard, embedding masking + is a special type of the Partial placement + + NOTE: the lifecycle of this MaskPartial placement follows the corresponding DTensor + lifecycle, i.e. the indices_mask would only be alive during the lifetime of the DTensor. + """ + + logical_dim_size: int = -1 + mask_buffer: MaskBuffer = field(default_factory=MaskBuffer) + + def _partition_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + # override parent logic to perform partial mask for embedding + num_chunks = mesh.size(mesh_dim) + # get local shard size and offset on the embedding_dim + local_shard_size, local_offset_on_dim = Shard._local_shard_size_on_dim( + self.logical_dim_size, + num_chunks, + mesh.get_local_rank(mesh_dim), + return_offset=True, + ) + # Build the input mask and save it for the current partial placement + # this is so that the output of embedding op can reuse the same partial + # placement saved mask to perform mask + reduction + mask = (tensor < local_offset_on_dim) | ( + tensor >= local_offset_on_dim + local_shard_size + ) + # mask the input tensor + masked_tensor = tensor.clone() - local_offset_on_dim + masked_tensor[mask] = 0 + # materialize the mask buffer to be used for reduction + self.mask_buffer.materialize_mask(mask) + return masked_tensor + + def _reduce_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + # by the time we ned reduction, we should have already saved the mask + assert self.mask_buffer.data is not None + + # apply the mask to the tensor that pending reduction + self.mask_buffer.apply_mask(tensor) + + # clear the mask buffer + self.mask_buffer.release_mask() + + # perform sum reduction + return funcol.all_reduce( + tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim) + ) + + def _reduce_shard_value( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_spec: Placement, + ) -> torch.Tensor: + # by the time we ned reduction, we should have already saved the mask + assert self.mask_buffer.data is not None + + # apply the mask to the tensor that pending reduction + self.mask_buffer.apply_mask(tensor) + + # clear the mask buffer + self.mask_buffer.release_mask() + + # call reduce_shard_tensor of the shard_spec. + shard_spec = cast(Shard, shard_spec) + return shard_spec._reduce_shard_tensor(tensor, mesh, self.reduce_op, mesh_dim) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _MaskPartial): + return False + + # if either data is not None, we invalidate the sharding cache, as this indicates + # the current MaskPartial placement is still in use and should not be used for cache hit. + if self.mask_buffer.data is not None or other.mask_buffer.data is not None: + return False + + return ( + self.reduce_op == other.reduce_op + and self.logical_dim_size == other.logical_dim_size + ) + + def __hash__(self) -> int: + return 1 + hash( + (self.logical_dim_size, id(self.mask_buffer.data), self.reduce_op) + ) + + def __repr__(self) -> str: + """ + machine readable representation of the MaskPartial placement + """ + return f"_MaskPartial(logical_dim_size={self.logical_dim_size})" + + def __str__(self) -> str: + """ + human readable representation of the MaskPartial placement + """ + return "MaskP" + + +@register_op_strategy(aten.embedding.default) +def embedding_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """ + This strategy handles embedding op. We have two possible embedding shardings: + rowwise and colwise + # TODO: implement rowwise sharding + """ + weight_strategy = cast(OpStrategy, op_schema.args_schema[0]) + indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) + + weight_shape = weight_strategy.output_shape + indices_shape = indices_strategy.output_shape + output_emd_dim = len(indices_shape) + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [output, weight, input_indices] + # first we always have replicate all for inputs and output + all_replicate: List[Placement] = [Replicate()] * 3 + single_mesh_dim_strategies.append(all_replicate) + + # colwise sharding, output shard on last dim, weight shard on dim 1, input replicate + colwise_sharding = [Shard(output_emd_dim), Shard(1), Replicate()] + single_mesh_dim_strategies.append(colwise_sharding) + + # rowwise sharding, output is embedding partial, weight shard on dim 0, input accepts embedding partial + embedding_partial_placement = _MaskPartial(logical_dim_size=weight_shape[0]) + + # NOTE we want to reuse the same mask partial placement so that we can reuse the same mask that generates + # from the input indices and use it for output reduction + rowwise_sharding = [ + embedding_partial_placement, + Shard(0), + embedding_partial_placement, + ] + single_mesh_dim_strategies.append(rowwise_sharding) + + # batch dim sharding, weight replicated, input can shard on any dim, output follows input + for input_dim in range(len(indices_shape)): + batch_sharding = [Shard(input_dim), Replicate(), Shard(input_dim)] + single_mesh_dim_strategies.append(batch_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + if is_tensor_shardable(weight_shape, spec_list[1]) and is_tensor_shardable( + indices_shape, spec_list[2] + ): + # only add to the strategy list when both weight and indices are shardable + weight_spec, indices_spec = spec_list[1:] + redistribute_cost = [ + generate_redistribute_costs(weight_strategy, weight_spec), + generate_redistribute_costs(indices_strategy, indices_spec), + ] + strat = PlacementStrategy( + output_specs=spec_list[0], + input_specs=spec_list[1:], + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) + + +@register_op_strategy(aten.embedding_dense_backward.default) +def embedding_dense_backward_strategy( + mesh: DeviceMesh, op_schema: OpSchema +) -> StrategyType: + """ + This strategy handles embedding op. We have two possible embedding shardings: + rowwise and colwise + # TODO: implement rowwise sharding backward + """ + grad_out_strategy = cast(OpStrategy, op_schema.args_schema[0]) + indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) + + grad_out_shape = grad_out_strategy.output_shape + indices_shape = indices_strategy.output_shape + grad_out_ndim = len(grad_out_shape) + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [output, weight, input_indices] + # first we always have replicate all for inputs and output + all_replicate: List[Placement] = [Replicate()] * 3 + single_mesh_dim_strategies.append(all_replicate) + + # colwise sharding backward, grad_out shard on last dim, input replicate, + # weight grad shard colwise + colwise_sharding = [Shard(1), Shard(grad_out_ndim - 1), Replicate()] + single_mesh_dim_strategies.append(colwise_sharding) + + # batch dim sharding, weight replicated, grad_out/input have same sharding + # that can shard on any dim, weight grad partial + for input_dim in range(len(indices_shape)): + batch_sharding = [_Partial(), Shard(input_dim), Shard(input_dim)] + single_mesh_dim_strategies.append(batch_sharding) + + # grad_out partial, input replicate, weight grad keep partial + partial_sharding = [_Partial(), _Partial(), Replicate()] + single_mesh_dim_strategies.append(partial_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + if is_tensor_shardable(grad_out_shape, spec_list[1]) and is_tensor_shardable( + indices_shape, spec_list[2] + ): + # only add to the strategy list when both grad_out and indices are shardable + grad_out_spec, indices_spec = spec_list[1:] + redistribute_cost = [ + generate_redistribute_costs(grad_out_strategy, grad_out_spec), + generate_redistribute_costs(indices_strategy, indices_spec), + ] + strat = PlacementStrategy( + output_specs=spec_list[0], + input_specs=spec_list[1:], + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4f64b6df1b5fb295f40ed9af1daa29086a5a18b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +from typing import List + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +import torch +from torch.distributed._tensor.op_schema import OpSchema, OutputSharding +from torch.distributed._tensor.ops.utils import register_prop_rule +from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + +aten = torch.ops.aten + + +@register_prop_rule(aten.slice_backward.default) +def slice_backward_rules(op_schema: OpSchema) -> OutputSharding: + grad_output_spec, input_sizes, dim, start, end, step = op_schema.args_schema + assert isinstance(grad_output_spec, DTensorSpec) + assert isinstance(input_sizes, List) + assert grad_output_spec.tensor_meta is not None + grad_input_stride = list(np.cumprod(input_sizes[::-1])[:-1][::-1]) + grad_input_stride.append(1) + dim_map = grad_output_spec.dim_map + sums = grad_output_spec.sums + + grad_input_tensor_meta = TensorMeta( + torch.Size(input_sizes), + tuple(grad_input_stride), + grad_output_spec.tensor_meta.dtype, + ) + grad_input_spec = DTensorSpec.from_dim_map( + grad_output_spec.mesh, + dim_map, + sums, + tensor_meta=grad_input_tensor_meta, + ) + + return OutputSharding(grad_input_spec) + + +@register_prop_rule(aten.bernoulli.default) +@register_prop_rule(aten.bernoulli_.float) +def bernoulli_rules(op_schema: OpSchema) -> OutputSharding: + input_spec = op_schema.args_schema[0] + assert isinstance(input_spec, DTensorSpec) + return OutputSharding(input_spec) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..64d5a09bf4028596920e7a65fd9ef3bdd31f59aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py @@ -0,0 +1,957 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from dataclasses import dataclass +from enum import Enum +from typing import cast, List, Optional, Sequence, Tuple, Union + +import torch + +import torch.distributed.distributed_c10d as c10d +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + PlacementStrategy, + RuntimeSchemaInfo, + TupleStrategy, +) +from torch.distributed._tensor.ops.utils import ( + as_list, + generate_redistribute_costs, + is_tensor_evenly_shardable, + normalize_dim, + normalize_dims, + normalize_to_torch_size, + register_op_strategy, +) +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +aten = torch.ops.aten + + +class Reduction(Enum): + NONE = 0 + MEAN = 1 + SUM = 2 + + +@dataclass(frozen=True) +class NormReduction: + norm_type: Union[int, float, str] + + +ReductionOpType = Union[NormReduction, c10d.ReduceOp.RedOpType] + + +@dataclass(frozen=True) +class _NormPartial(_Partial): + """ + This placement is used for partial vector norm. + + For p-norms (where p not inf or -inf), the p-norm over n elements computes + (sum_i x_i^p)^(1/p) + where the sum is from i=1 to n. The reduction op is the p-norm itself. + For example, consider 2 ranks, a (4,) tensor sharded on dim-0, and 2-norm: + Rank 0: [t1, t2] | Rank 1: [t3, t4] + After computing 2-norm per gradient (partial placement): + Rank 0: [sqrt(t1^2 + t2^2)] | Rank 1: [sqrt(t3^2 + t4^2)] + Converting from partial to replicate wants to ultimately get: + Rank 0/1: [sqrt(t1^2 + t2^2 + t3^2 + t4^2)] + This can be achieved by computing 2-norm on each rank's result. This holds + similarly for inf and -inf norm. For 0-norm, the reduction op is sum. + """ + + norm_type: Union[int, float, str] = 2 + + def __post_init__(self): + """Set the appropriate reduce op based on the norm type.""" + # Use `object.__setattr__` to bypass frozen checks + if self.norm_type in (float("inf"), "inf"): + object.__setattr__(self, "reduce_op", c10d.ReduceOp.MAX) + elif self.norm_type in (float("-inf"), "-inf"): + object.__setattr__(self, "reduce_op", c10d.ReduceOp.MIN) + elif isinstance(self.norm_type, (int, float)): + object.__setattr__(self, "reduce_op", c10d.ReduceOp.SUM) + else: + raise NotImplementedError(f"Unsupported norm type: {self.norm_type}") + + def _partition_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + if self.reduce_op in (c10d.ReduceOp.MAX, c10d.ReduceOp.MIN): + return tensor + elif self.reduce_op == c10d.ReduceOp.SUM: + return tensor / mesh.size(mesh_dim=mesh_dim) + raise NotImplementedError(self.reduce_op) + + def _reduce_shard_value( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_spec: Placement, + ) -> torch.Tensor: + assert isinstance(shard_spec, Shard), f"{shard_spec}" + tensor = self._pre_reduce_transform(tensor) + reduced_tensor = super()._reduce_shard_value(tensor, mesh, mesh_dim, shard_spec) + return self._post_reduce_transform(reduced_tensor) + + def _reduce_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + tensor = self._pre_reduce_transform(tensor) + reduced_tensor = super()._reduce_value(tensor, mesh, mesh_dim) + return self._post_reduce_transform(reduced_tensor) + + def _pre_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor: + if self.reduce_op == c10d.ReduceOp.SUM: + assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}" + if self.norm_type != 0 and self.norm_type != 1: + return tensor**self.norm_type + return tensor + + def _post_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor: + if self.reduce_op == c10d.ReduceOp.SUM: + assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}" + if self.norm_type != 0 and self.norm_type != 1: + return tensor ** (1.0 / self.norm_type) + return tensor + + +def _infer_reduction_dims(dims_arg: object, ndim: int) -> Optional[List[int]]: + if dims_arg is None: + return None + dims = cast(List[int], as_list(dims_arg)) + dims = cast(List[int], normalize_dims(dims, ndim)) + empty_dims = [[0], [-1], []] + if ndim == 0 and dims_arg in empty_dims: + return None + return dims + + +def _infer_reduce_dims_map( + reduction_dims: List[int], input_ndim: int, keep_dim=False +) -> List[int]: + reduction_dims_map = [] + new_dim_count = 0 + for input_dim in range(input_ndim): + if input_dim in reduction_dims and not keep_dim: + # if input dim in reduction dims, mark it as -1 + reduction_dims_map.append(-1) + else: + # otherwise mark it as the new dim + reduction_dims_map.append(new_dim_count) + new_dim_count += 1 + + return reduction_dims_map + + +def replicate_reduction_dims( + placements: Tuple[Placement, ...], reduction_dims: List[int] +) -> Tuple[Placement, ...]: + # replicate the reduction dims if not reduction_linear + new_placements: List[Placement] = [] + + for p in placements: + if p.is_partial(): + new_placements.append(Replicate()) + elif isinstance(p, Shard) and p.dim in reduction_dims: + new_placements.append(Replicate()) + else: + new_placements.append(p) + + return tuple(new_placements) + + +def map_placements_after_reduction( + placements: Tuple[Placement, ...], + reduction_dims: List[int], + reduction_dims_map: List[int], + reduction_op: ReductionOpType, +) -> Tuple[Placement, ...]: + """ + Map each placement based on the output shape after reduction. + """ + new_placements: List[Placement] = [] + for placement in placements: + if isinstance(placement, (Replicate, _Partial)): + new_placements.append(placement) + else: + assert isinstance(placement, Shard) + shard_dim = placement.dim + new_shard_dim = reduction_dims_map[shard_dim] + if new_shard_dim == -1 or shard_dim in reduction_dims: + # if new_shard_dim collapsed or its in the reduction dims + # (i.e. for the case where keepdims=True), we generate partial + new_placements.append(get_placement_from_reduction_op(reduction_op)) + else: + new_placements.append(Shard(new_shard_dim)) + return tuple(new_placements) + + +def get_placement_from_reduction_op(reduction_op: ReductionOpType) -> Placement: + if isinstance(reduction_op, NormReduction): + return _NormPartial(norm_type=reduction_op.norm_type) + return _Partial(reduction_op) + + +def common_reduction_strategy( + mesh: DeviceMesh, + input_strategy: OpStrategy, + reduce_dims: List[int], + keep_dim: bool = False, + reduction_linear: bool = True, + reduction_op: ReductionOpType = c10d.ReduceOp.SUM, +) -> OpStrategy: + """ + reduction_linear means that the reduction `f` follows this rule: + f([f(a), f(b)]) = f([a, b]) + + reduction linear should be super set of linearity. + """ + # by default follow reduction input strategy + reduction_strategy = OpStrategy([]) + + for strtg in input_strategy.strategies: + if not reduction_linear: + # input placements for this strategy should clear out pending sum and sharding + # on the reduction dimension + input_placements = replicate_reduction_dims( + strtg.output_spec.placements, reduce_dims + ) + else: + input_placements = strtg.output_spec.placements + + input_spec = DTensorSpec( + mesh=mesh, + placements=input_placements, + tensor_meta=strtg.output_spec.tensor_meta, + ) + + reduce_dims_map = _infer_reduce_dims_map(reduce_dims, input_spec.ndim, keep_dim) + out_placements = map_placements_after_reduction( + input_spec.placements, reduce_dims, reduce_dims_map, reduction_op + ) + redistribute_cost = [generate_redistribute_costs(input_strategy, input_spec)] + reduction_strategy.strategies.append( + PlacementStrategy( + output_specs=DTensorSpec( + mesh=mesh, + placements=out_placements, + ), + input_specs=(input_spec,), + redistribute_cost=redistribute_cost, + ) + ) + + return reduction_strategy + + +LINEAR_REDUCTION_OP_MAP = { + aten.all.default: c10d.ReduceOp.SUM, + aten.all.dim: c10d.ReduceOp.SUM, + aten.sum.default: c10d.ReduceOp.SUM, + aten.sum.dim_IntList: c10d.ReduceOp.SUM, + aten.prod.default: c10d.ReduceOp.PRODUCT, + aten.prod.dim_int: c10d.ReduceOp.PRODUCT, + aten.prod.int_out: c10d.ReduceOp.PRODUCT, + aten.mean.default: c10d.ReduceOp.AVG, + aten.mean.dim: c10d.ReduceOp.AVG, + aten.mean.out: c10d.ReduceOp.AVG, + aten.max.default: c10d.ReduceOp.MAX, + aten.max.dim: c10d.ReduceOp.MAX, + aten.max.out: c10d.ReduceOp.MAX, + aten.min.default: c10d.ReduceOp.MIN, + aten.min.dim: c10d.ReduceOp.MIN, + aten.min.out: c10d.ReduceOp.MIN, +} + + +@register_op_strategy( + list(LINEAR_REDUCTION_OP_MAP.keys()), schema_info=RuntimeSchemaInfo(1) +) +def linear_reduction_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + args_schema = op_schema.args_schema + input_strategy = args_schema[0] + assert isinstance(input_strategy, OpStrategy) + dims = None + if len(op_schema.args_schema) > 1: + dims = _infer_reduction_dims(args_schema[1], input_strategy.output_ndim) + + reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims + + keep_dim = len(op_schema.args_schema) > 2 and bool(op_schema.args_schema[2]) + reduction_op = LINEAR_REDUCTION_OP_MAP[op_schema.op] + return common_reduction_strategy( + mesh, + input_strategy, + reduce_dims, + keep_dim=keep_dim, + reduction_linear=True, + reduction_op=reduction_op, + ) + + +@register_op_strategy( + [aten.var.correction, aten.var.correction_out], + schema_info=RuntimeSchemaInfo(1, ["keepdim"]), +) +def var_reduction_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + args_schema = op_schema.args_schema + input_strategy = args_schema[0] + assert isinstance(input_strategy, OpStrategy) + dims = None + if len(op_schema.args_schema) > 1: + dims = _infer_reduction_dims(args_schema[1], input_strategy.output_ndim) + + reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims + + keep_dim = cast(bool, op_schema.kwargs_schema.get("keepdim", False)) + return common_reduction_strategy( + mesh, input_strategy, reduce_dims, keep_dim=keep_dim, reduction_linear=False + ) + + +@register_op_strategy( + [aten.linalg_vector_norm.default], schema_info=RuntimeSchemaInfo(1) +) +def vector_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + args_schema = op_schema.args_schema + input_strategy = args_schema[0] + assert isinstance(input_strategy, OpStrategy) + norm_type = args_schema[1] if len(args_schema) > 1 else 2 + assert isinstance(norm_type, (int, float, str)), f"{norm_type}" + dim = args_schema[2] if len(args_schema) > 2 else None + keepdim = args_schema[3] if len(args_schema) > 3 else False + dims = _infer_reduction_dims(dim, input_strategy.output_ndim) + reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims + return common_reduction_strategy( + mesh, + input_strategy, + reduce_dims, + keep_dim=cast(bool, keepdim), + reduction_linear=True, + reduction_op=NormReduction(norm_type), + ) + + +@register_op_strategy( + [aten._foreach_norm.Scalar], schema_info=RuntimeSchemaInfo(1, needs_pytree=True) +) +def foreach_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> TupleStrategy: + args_schema = op_schema.args_schema + input_tuple_strategy = args_schema[0] + assert isinstance(input_tuple_strategy, TupleStrategy) + norm_type = args_schema[1] + assert isinstance(norm_type, (int, float, str)), f"{norm_type}" + output_tuple_strategy_childs: List[OpStrategy] = [] + for op_strategy in input_tuple_strategy.childs: + assert isinstance(op_strategy, OpStrategy), f"{op_strategy}" + reduce_dims = list(range(op_strategy.output_ndim)) + output_strategy = common_reduction_strategy( + mesh, + op_strategy, + reduce_dims, + reduction_linear=True, + reduction_op=NormReduction(norm_type), + ) + output_tuple_strategy_childs.append(output_strategy) + return TupleStrategy(output_tuple_strategy_childs) + + +@register_op_strategy( + [aten._log_softmax.default, aten._softmax.default], schema_info=RuntimeSchemaInfo(1) +) +def softmax_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + input_strategy, softmax_dim, _ = op_schema.args_schema + input_strategy = cast(OpStrategy, input_strategy) + softmax_dim = cast(int, softmax_dim) + softmax_dim = normalize_dim(softmax_dim, input_strategy.output_ndim) + + output_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + redistribute_costs = [] + input_src_spec = input_placement_strategy.output_spec + + # make sure input is replicated along the softmax dim + input_target_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims( + input_src_spec.placements, [softmax_dim] + ), + tensor_meta=input_src_spec.tensor_meta, + ) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_target_spec) + ) + output_target_spec = input_target_spec + output_strategy.strategies.append( + PlacementStrategy( + output_specs=output_target_spec, + input_specs=[input_target_spec], + redistribute_cost=redistribute_costs, + ) + ) + + return output_strategy + + +@register_op_strategy( + [ + aten._log_softmax_backward_data.default, + aten._softmax_backward_data.default, + ], + schema_info=RuntimeSchemaInfo(2), +) +def softmax_backward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + grad_out_strategy, out_strategy, softmax_dim, _ = op_schema.args_schema + grad_out_strategy = cast(OpStrategy, grad_out_strategy) + out_strategy = cast(OpStrategy, out_strategy) + softmax_dim = cast(int, softmax_dim) + softmax_dim = normalize_dim(softmax_dim, grad_out_strategy.output_ndim) + + grad_in_strategy = OpStrategy([]) + for grad_out_placement_strat, out_placement_strat in zip( + grad_out_strategy.strategies, out_strategy.strategies + ): + # follow the sharding of the grad_out or out depending on which has more shards + grad_out_src_spec = grad_out_placement_strat.output_spec + out_src_spec = out_placement_strat.output_spec + src_spec = ( + grad_out_src_spec + if grad_out_src_spec.num_shards >= out_src_spec.num_shards + else out_src_spec + ) + + # make sure inputs are replicated along the softmax dim + tgt_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims(src_spec.placements, [softmax_dim]), + ) + redist_grad_out_cost = generate_redistribute_costs(grad_out_strategy, tgt_spec) + redist_out_cost = generate_redistribute_costs(out_strategy, tgt_spec) + grad_in_strategy.strategies.append( + PlacementStrategy( + output_specs=tgt_spec, + redistribute_cost=[redist_grad_out_cost, redist_out_cost], + ) + ) + + return grad_in_strategy + + +@register_op_strategy( + [aten.nll_loss_forward.default, aten.nll_loss2d_forward.default], + schema_info=RuntimeSchemaInfo(3), +) +def nll_loss_forward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + assert len(op_schema.args_schema) == 5 + ( + input_strategy, + target_strategy, + weight_strategy, + reduction, + _, + ) = op_schema.args_schema + input_strategy = cast(OpStrategy, input_strategy) + target_strategy = cast(OpStrategy, target_strategy) + reduction = cast(int, reduction) + + input_shape = input_strategy.output_shape + channel_dim = 1 if len(input_shape) >= 2 else 0 + + output_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + op_args_target_specs = [] + redistribute_costs = [] + + # make sure input is replicated along the channel dim + input_src_spec = input_placement_strategy.output_spec + input_expected_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims( + input_src_spec.placements, [channel_dim] + ), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_expected_spec) + ) + + # target doesn't have channel dim, and it follows input on other dims + target_src_spec = target_strategy.strategies[idx].output_spec + target_expected_spec = DTensorSpec( + mesh=mesh, + placements=_skip_dim(input_expected_spec.placements, channel_dim), + tensor_meta=target_src_spec.tensor_meta, + ) + op_args_target_specs.append(target_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(target_strategy, target_expected_spec) + ) + + # weight tensor, if given, has to be a Tensor of size input_shape[channel_dim] + # make sure it is replicated + if weight_strategy is not None: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(weight_src_spec.placements), + tensor_meta=weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(weight_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(weight_strategy, weight_expected_spec) + ) + + if reduction == Reduction.NONE.value: + output_expected_spec = target_expected_spec + total_weight_expected_spec = DTensorSpec( + mesh=mesh, placements=tuple([Replicate()] * mesh.ndim) + ) + else: + if reduction == Reduction.MEAN.value: + reduction_op = c10d.ReduceOp.AVG + if not is_tensor_evenly_shardable( + target_expected_spec.shape, target_expected_spec + ): + raise ValueError( + "The intermediate results of nll_loss cannot be evenly sharded, \ + resulting in biased mean result." + ) + else: # reduction == Reduction.SUM.value: + reduction_op = c10d.ReduceOp.SUM + reduce_dims = list(range(target_expected_spec.ndim)) + reduce_dims_map = _infer_reduce_dims_map( + reduce_dims, target_expected_spec.ndim, keep_dim=False + ) + out_placements = map_placements_after_reduction( + target_expected_spec.placements, + reduce_dims, + reduce_dims_map, + reduction_op, + ) + output_expected_spec = DTensorSpec( + mesh=mesh, + placements=out_placements, + ) + + # whether reduction is sum or mean, the total weight has to be summed up if not replicated + total_weight_placements = map_placements_after_reduction( + target_expected_spec.placements, + reduce_dims, + reduce_dims_map, + c10d.ReduceOp.SUM, + ) + total_weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=total_weight_placements, + ) + + output_strategy.strategies.append( + PlacementStrategy( + output_specs=(output_expected_spec, total_weight_expected_spec), + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return output_strategy + + +@register_op_strategy( + [aten.nll_loss_backward.default, aten.nll_loss2d_backward.default], + schema_info=RuntimeSchemaInfo(4), +) +def nll_loss_backward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + assert len(op_schema.args_schema) == 7 + ( + grad_out_strategy, + input_strategy, + target_strategy, + weight_strategy, + reduction, + _, + total_weight_strategy, + ) = op_schema.args_schema + grad_out_strategy = cast(OpStrategy, grad_out_strategy) + input_strategy = cast(OpStrategy, input_strategy) + target_strategy = cast(OpStrategy, target_strategy) + reduction = cast(int, reduction) + total_weight_strategy = cast(OpStrategy, total_weight_strategy) + + input_shape = input_strategy.output_shape + channel_dim = 1 if len(input_shape) >= 2 else 0 + + grad_in_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + op_args_target_specs = [] + redistribute_costs = [] + + # make sure input is replicated along the channel dim + input_src_spec = input_placement_strategy.output_spec + input_expected_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims( + input_src_spec.placements, [channel_dim] + ), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_expected_spec) + ) + + # target doesn't have channel dim, and it follows input on other dims + target_src_spec = target_strategy.strategies[idx].output_spec + target_expected_spec = DTensorSpec( + mesh=mesh, + placements=_skip_dim(input_expected_spec.placements, channel_dim), + tensor_meta=target_src_spec.tensor_meta, + ) + op_args_target_specs.append(target_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(target_strategy, target_expected_spec) + ) + + # grad_out follows target if there is no reduction; + # otherwise, it should be a replicated scalar. + grad_out_src_spec = grad_out_strategy.strategies[idx].output_spec + if reduction == Reduction.NONE.value: + grad_out_expected_spec = target_expected_spec + else: + grad_out_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(grad_out_src_spec.placements), + tensor_meta=grad_out_src_spec.tensor_meta, + ) + op_args_target_specs.insert(0, grad_out_expected_spec) + redistribute_costs.insert( + 0, generate_redistribute_costs(grad_out_strategy, grad_out_expected_spec) + ) + + # weight tensor, if given, has to be a Tensor of size input_shape[channel_dim] + # make sure it is replicated + if weight_strategy is not None: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(weight_src_spec.placements), + tensor_meta=weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(weight_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(weight_strategy, weight_expected_spec) + ) + + # total_weight should always be replicated + total_weight_src_spec = total_weight_strategy.strategies[idx].output_spec + total_weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(total_weight_src_spec.placements), + tensor_meta=total_weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(total_weight_expected_spec) + redistribute_costs.append( + generate_redistribute_costs( + total_weight_strategy, total_weight_expected_spec + ) + ) + + grad_in_expected_spec = input_expected_spec + grad_in_strategy.strategies.append( + PlacementStrategy( + output_specs=grad_in_expected_spec, + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return grad_in_strategy + + +@register_op_strategy( + [aten.native_layer_norm.default], + schema_info=RuntimeSchemaInfo(1), +) +def layer_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + # args must be: input, normalized_shape, weight, bias, eps + # for None weight and bias, their corresponding objects will + # be None as well. layer_norm_strategy returns one OpStrategy + # for the triple return values (out, mean, rstd). + assert len(op_schema.args_schema) == 5 + ( + input_strategy, + normalized_shape, + weight_strategy, + bias_strategy, + _, + ) = op_schema.args_schema + + # the current layer norm implementation requires that all + # input DTensor's sharding must be in form of OpStrategy + assert isinstance(input_strategy, OpStrategy) + assert isinstance(normalized_shape, (int, Sequence, torch.Size)) + normalized_size = normalize_to_torch_size(normalized_shape) + + input_ndim = input_strategy.output_ndim + axis = input_ndim - len(normalized_size) + + # we use OpStrategy because the output (out, mean, rstd) + # should have the same placements + output_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + op_args_target_specs = [] + redistribute_costs = [] + input_src_spec = input_placement_strategy.output_spec + + # for the input tensor, we replicate it on the inner dims if necessary + # TODO: we can avoid forcing the redistribution once we figure out + # how to decompose layer norm + input_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(input_src_spec.placements, axis), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_target_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_target_spec) + ) + + if weight_strategy is not None: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + + # for the weight tensor, we replicate it on all dims if necessary + # TODO: we can avoid forcing the redistribution once we figure out + # how to decompose layer norm + weight_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(weight_src_spec.placements), + tensor_meta=weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(weight_target_spec) + redistribute_costs.append( + generate_redistribute_costs(weight_strategy, weight_target_spec) + ) + + if bias_strategy is not None: + assert isinstance(bias_strategy, OpStrategy) + bias_src_spec = bias_strategy.strategies[idx].output_spec + + # for the bias tensor, we replicate it on all dims if necessary + # TODO: we can avoid forcing the redistribution once we figure out + # how to decompose layer norm + bias_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(bias_src_spec.placements), + tensor_meta=bias_src_spec.tensor_meta, + ) + op_args_target_specs.append(bias_target_spec) + redistribute_costs.append( + generate_redistribute_costs(bias_strategy, bias_target_spec) + ) + + # the output spec is the same as input spec + output_target_spec = input_target_spec + output_strategy.strategies.append( + PlacementStrategy( + output_specs=output_target_spec, + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return output_strategy + + +@register_op_strategy( + [aten.native_layer_norm_backward.default], + schema_info=RuntimeSchemaInfo(2), +) +def layer_norm_bwd_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + # args must be: grad_out, input, normalized_shape, mean, rstd, + # weight, bias, output_mask. For None weight and bias, their + # corresponding objects will be None as well. + assert len(op_schema.args_schema) == 8 + ( + grad_out_strategy, + input_strategy, + normalized_shape, + mean_strategy, + rstd_strategy, + weight_strategy, + bias_strategy, + output_mask, + ) = op_schema.args_schema + + assert isinstance(grad_out_strategy, OpStrategy) + assert isinstance(input_strategy, OpStrategy) + assert isinstance(mean_strategy, OpStrategy) + assert isinstance(rstd_strategy, OpStrategy) + + assert isinstance(normalized_shape, (int, Sequence, torch.Size)) + normalized_size = normalize_to_torch_size(normalized_shape) + input_ndim = input_strategy.output_ndim + axis = input_ndim - len(normalized_size) + outer_dims = list(range(axis)) + + assert isinstance(output_mask, List) and len(output_mask) == 3 + + # output triple: (d_input, d_weight, d_bias) + out_tuple_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + # args for PlacementStrategy + output_specs_list: List[Optional[DTensorSpec]] = [] + op_args_target_specs = [] + redistribute_costs = [] + + input_src_spec = input_placement_strategy.output_spec + # arg: grad_out + # TODO: change the strategy to the following rule. + # d_input is basically a product of element-wise mul of + # grad_out, rstd, and normalized input, among which rstd + # and normalized input (x_hat) should have the same sharding + # placements, and grad_out's sharding is determined by the + # pointwise result of x_hat and weight/bias. + if output_mask[0]: + # TODO: now grad_out spec follows input spec. we may need + # to change it to apply a pointwise rule over grad_out, + # input, and weight. + grad_out_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(input_src_spec.placements, axis), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(grad_out_target_spec) + redistribute_costs.append( + generate_redistribute_costs(grad_out_strategy, grad_out_target_spec) + ) + output_specs_list.append(grad_out_target_spec) + else: + output_specs_list.append(None) + + # arg: input + input_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(input_src_spec.placements, axis), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_target_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_target_spec) + ) + + # arg: mean, rstd + mean_src_spec = mean_strategy.strategies[idx].output_spec + op_args_target_specs.append(mean_src_spec) + redistribute_costs.append([0.0 for _ in mean_strategy.strategies]) + rstd_src_spec = rstd_strategy.strategies[idx].output_spec + op_args_target_specs.append(rstd_src_spec) + redistribute_costs.append([0.0 for _ in rstd_strategy.strategies]) + + # arg: weight + # d_weight = sum(grad_out * (input - mean) / rstd, outer_dim, keepdim=False) + if output_mask[1]: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + # no need to redistribute weight since they should be replicated + # in forward pass + op_args_target_specs.append(weight_src_spec) + redistribute_costs.append([0.0 for _ in weight_strategy.strategies]) + # TODO: now d_weight spec follows input spec w/ a reduction. + # we may need to change to a pointwise rule over grad_out and + # input, then apply a reduction. + inp_placements = _replicate_dims_start_at(input_src_spec.placements, axis) + reduce_dims_map = _infer_reduce_dims_map( + outer_dims, input_src_spec.ndim, False + ) + out_placements = map_placements_after_reduction( + inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM + ) + output_specs_list.append( + DTensorSpec( + mesh=mesh, + placements=out_placements, + tensor_meta=weight_src_spec.tensor_meta, + ) + ) + else: + output_specs_list.append(None) + + # arg: bias + # d_bias = sum(grad_out, outer_dim, keepdim=False) + if output_mask[2]: + assert isinstance(bias_strategy, OpStrategy) + bias_src_spec = bias_strategy.strategies[idx].output_spec + # no need to redistribute weight since they should be replicated + # in forward pass + op_args_target_specs.append(bias_src_spec) + redistribute_costs.append([0.0 for _ in bias_strategy.strategies]) + # Currently we do not support the case where output_mask[0] is False while + # output_mask[1] is True. But it's easy to support that by accessing + # grad_out_spec via a local variable rather than the list. We just don't + # see the case. + grad_out_spec = output_specs_list[0] + assert isinstance(grad_out_spec, DTensorSpec) + # d_bias spec follows a reduction over grad_out + inp_placements = _replicate_dims_start_at(grad_out_spec.placements, axis) + reduce_dims_map = _infer_reduce_dims_map( + outer_dims, grad_out_spec.ndim, False + ) + out_placements = map_placements_after_reduction( + inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM + ) + output_specs_list.append( + DTensorSpec( + mesh=mesh, + placements=out_placements, + tensor_meta=bias_src_spec.tensor_meta, + ) + ) + else: + output_specs_list.append(None) + + out_tuple_strategy.strategies.append( + PlacementStrategy( + output_specs=tuple(output_specs_list), + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return out_tuple_strategy + + +def _replicate_dims_start_at( + placements: Sequence[Placement], start_dim: int = 0 +) -> Tuple[Placement, ...]: + new_placements: List[Placement] = [] + for p in placements: + if p.is_partial() or (isinstance(p, Shard) and p.dim >= start_dim): + new_placements.append(Replicate()) # make it replicate + else: + new_placements.append(p) # keep the placement + return tuple(new_placements) + + +# return new_placements which align with placements but skip the skipped_dim +def _skip_dim( + placements: Tuple[Placement, ...], skipped_dim: int +) -> Tuple[Placement, ...]: + new_placements: List[Placement] = [] + for p in placements: + if isinstance(p, Shard) and p.dim >= skipped_dim: + new_placements.append(Shard(p.dim - 1)) + else: + new_placements.append(p) + return tuple(new_placements) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9ee8b3f2a22b6ce2e899228440eec5503a4c40f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py @@ -0,0 +1,226 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +import itertools +from typing import List, Optional + +import torch +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + OutputSharding, + PlacementStrategy, +) +from torch.distributed._tensor.ops.basic_strategy import gen_einsum_strategies +from torch.distributed._tensor.ops.common_rules import einop_rule +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + infer_broadcast_dims_map, + is_tensor_shardable, + map_placements_after_broadcast, + register_op_strategy, + register_prop_rule, +) +from torch.distributed._tensor.placement_types import ( + DTensorSpec, + Placement, + Replicate, + Shard, +) + +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +@register_prop_rule(aten.t.default) +def transpose_rule(op_schema: OpSchema) -> OutputSharding: + return einop_rule("ij->ji", op_schema, linearity=True) + + +def _mm_like_strategy( + mm_equation: str, mesh: DeviceMesh, op_schema: OpSchema +) -> OpStrategy: + self_strategy, mat2_strategy = op_schema.args_schema + assert isinstance(self_strategy, OpStrategy) + assert isinstance(mat2_strategy, OpStrategy) + # generate all possible strategies for mm + mm_strategy = gen_einsum_strategies(mm_equation, mesh) + # filter out invalid strategies and associate costs + strategies = mm_strategy.strategies + filtered_strategies = [] + for strtg in strategies: + assert strtg.input_specs is not None + self_spec = strtg.input_specs[0] + mat2_spec = strtg.input_specs[1] + if is_tensor_shardable( + self_strategy.output_shape, self_spec + ) and is_tensor_shardable(mat2_strategy.output_shape, mat2_spec): + redistribute_cost = [ + generate_redistribute_costs(self_strategy, self_spec), + generate_redistribute_costs(mat2_strategy, mat2_spec), + ] + strtg.redistribute_cost = redistribute_cost + filtered_strategies.append(strtg) + + mm_strategy.strategies = filtered_strategies + + return mm_strategy + + +def _addmm_like_strategy( + mm_equation: str, mesh: DeviceMesh, op_schema: OpSchema +) -> OpStrategy: + self_strategy, mat1_strategy, mat2_strategy = op_schema.args_schema + assert isinstance(self_strategy, OpStrategy) + assert isinstance(mat1_strategy, OpStrategy) + assert isinstance(mat2_strategy, OpStrategy) + self_shape = self_strategy.output_shape + mm_out_shape = torch.Size( + [ + mat2_strategy.output_shape[-1] + if i == len(mat1_strategy.output_shape) - 1 + else dim_size + for i, dim_size in enumerate(mat1_strategy.output_shape) + ] + ) + # generate all possible strategies for mm + mm_strategy = gen_einsum_strategies(mm_equation, mesh) + # filter out invalid strategies and associate costs + strategies = mm_strategy.strategies + filtered_strategies = [] + for strtg in strategies: + # construct new strategy by consider the self arg + assert strtg.input_specs is not None + mat1_spec = strtg.input_specs[0] + mat2_spec = strtg.input_specs[1] + out_spec = strtg.output_spec + + # self arg's spec should follow the output of mm, but need + # to consider broadcast for the self arg + broadcast_dims_map = infer_broadcast_dims_map(mm_out_shape, self_shape) + self_placements = map_placements_after_broadcast( + out_spec.placements, mm_out_shape, broadcast_dims_map + ) + self_spec = DTensorSpec(mesh=mesh, placements=self_placements) + + if is_tensor_shardable( + mat1_strategy.output_shape, mat1_spec + ) and is_tensor_shardable(mat2_strategy.output_shape, mat2_spec): + # update input specs with new self spec + strtg.input_specs = (self_spec, mat1_spec, mat2_spec) + + # associate costs + redistribute_cost = [ + generate_redistribute_costs(self_strategy, self_spec), + generate_redistribute_costs(mat1_strategy, mat1_spec), + generate_redistribute_costs(mat2_strategy, mat2_spec), + ] + strtg.redistribute_cost = redistribute_cost + filtered_strategies.append(strtg) + + mm_strategy.strategies = filtered_strategies + + return mm_strategy + + +@register_op_strategy(aten.mm.default) +def mm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _mm_like_strategy("mk,kn->mn", mesh, op_schema) + + +@register_op_strategy(aten.addmm.default) +def addmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _addmm_like_strategy("mk,kn->mn", mesh, op_schema) + + +@register_op_strategy(aten.bmm.default) +def bmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _mm_like_strategy("bmk,bkn->bmn", mesh, op_schema) + + +@register_op_strategy(aten.baddbmm.default) +def baddmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _addmm_like_strategy("bmk,bkn->bmn", mesh, op_schema) + + +@register_op_strategy(aten._scaled_dot_product_flash_attention.default) +def scaled_dot_product_attention_strategy( + mesh: DeviceMesh, op_schema: OpSchema +) -> OpStrategy: + # NOTE: currently we only support some simple strategies to support tensor parallelism + # TODO: sdpa might be a good candidate for us to explore decomposed sharding propagation + # as it involves: matmul, pointwise, reduction ops together. + return_debug_mask = len(op_schema.args_schema) >= 6 and op_schema.args_schema[5] + q_input_strategy = op_schema.args_schema[0] + assert isinstance(q_input_strategy, OpStrategy) + # q/k/v have the same shape + qkv_shape = q_input_strategy.output_shape + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [outputs, inputs] + # in the spda case, we have 3 valid tensor outputs and 3 tensor inputs + # first we can always accept full replication for inputs and output + all_replicate: List[Placement] = [Replicate()] * 6 + single_mesh_dim_strategies.append(all_replicate) + + # second we can accept the sharding pattern of tensor parallelism, which + # shard on the num of head dim + qkv_sharding = Shard(1) # num head dim + output_sharding = Shard(1) # num head dim + logsumexp_sharding = Shard(1) # num head dim + if return_debug_mask: + debug_attn_mask_sharding: Placement = Shard(1) # num head dim + else: + # empty debug mask, replicated + debug_attn_mask_sharding = Replicate() + + num_heads_dim_sharding = [ + output_sharding, + logsumexp_sharding, + debug_attn_mask_sharding, + qkv_sharding, + qkv_sharding, + qkv_sharding, + ] + single_mesh_dim_strategies.append(num_heads_dim_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + assert len(spec_list) == 6 + input_expected_specs = spec_list[3:] + output_specs: List[Optional[DTensorSpec]] = list(spec_list[:3]) + # fix up output_specs and fill in None for the int and empty tensor return values + for i in range(2, 8): + output_specs.insert(i, None) + if all(is_tensor_shardable(qkv_shape, spec) for spec in input_expected_specs): + # only add to the strategy list when all inputs are shardable + redistribute_cost = [] + for input_idx, spec in enumerate(input_expected_specs): + qkv_strategy = op_schema.args_schema[input_idx] + assert isinstance(qkv_strategy, OpStrategy) + qkv_tensor_meta = qkv_strategy.strategies[0].output_spec.tensor_meta + spec.tensor_meta = qkv_tensor_meta + redistribute_cost.append( + generate_redistribute_costs(qkv_strategy, spec) + ) + + strat = PlacementStrategy( + output_specs=tuple(output_specs), + input_specs=tuple(input_expected_specs), + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9ca8ef66812a72fc5be30fef8f4a964b271780 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py @@ -0,0 +1,629 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import List, Sequence, Tuple + +import torch + +from torch.distributed._tensor.op_schema import ( + _is_inplace_op, + _is_out_variant_op, + OpSchema, + OpStrategy, + PlacementStrategy, + RuntimeSchemaInfo, + StrategyType, + TupleStrategy, +) + +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + infer_broadcast_dims_map, + map_placements_after_broadcast, + normalize_dim, + register_op_strategy, +) +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +aten = torch.ops.aten +# leave the remaining pointwise_ops list here for convenience, +# Below ops are some pointwise ops that are yet to be supported, +# they might not be a complete list. +# pointwise_ops = [ +# "fake_quantize_per_channel_affine", +# "fake_quantize_per_tensor_affine", +# "floor_divide", # floor_divide is deprecated +# "frexp", # multiple output pointwise op, need to add support +# "gradient", # need investigation on this op +# "imag", # complex data type only +# "quantized_batch_norm", +# "quantized_max_pool1d", +# "quantized_max_pool2d", +# "real", # complex data type only +# ] + + +linear_pointwise_ops = [ + aten.div.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op. + aten.div_.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op. + aten.to.dtype, + aten.add.Tensor, + aten.add_.Tensor, +] + + +pointwise_ops = [ + # please keep the entries below alphabetically sorted + aten.abs.default, + aten.abs.out, + aten.abs_.default, + aten.acos.default, + aten.acos.out, + aten.acos_.default, + aten.acosh.default, + aten.acosh.out, + aten.acosh_.default, + aten.add.Scalar, + aten.add.out, + aten.add_.Scalar, + aten.addcdiv.default, + aten.addcdiv.out, + aten.addcdiv_.default, + aten.addcmul.default, + aten.addcmul.out, + aten.addcmul_.default, + aten.angle.default, + aten.angle.out, + aten.asin.default, + aten.asin.out, + aten.asin_.default, + aten.asinh.default, + aten.asinh.out, + aten.asinh_.default, + aten.atan.default, + aten.atan.out, + aten.atan2.default, + aten.atan2.out, + aten.atan2_.default, + aten.atan_.default, + aten.atanh.default, + aten.atanh.out, + aten.atanh_.default, + aten.bitwise_and.Scalar, + aten.bitwise_and.Scalar_Tensor, + aten.bitwise_and.Scalar_out, + aten.bitwise_and.Tensor, + aten.bitwise_and.Tensor_out, + aten.bitwise_and_.Scalar, + aten.bitwise_and_.Tensor, + aten.bitwise_left_shift.Scalar_Tensor, + aten.bitwise_left_shift.Tensor, + aten.bitwise_left_shift.Tensor_Scalar, + aten.bitwise_left_shift.Tensor_Scalar_out, + aten.bitwise_left_shift.Tensor_out, + aten.bitwise_left_shift_.Tensor, + aten.bitwise_left_shift_.Tensor_Scalar, + aten.bitwise_not.default, + aten.bitwise_not.out, + aten.bitwise_not_.default, + aten.bitwise_or.Scalar, + aten.bitwise_or.Scalar_Tensor, + aten.bitwise_or.Scalar_out, + aten.bitwise_or.Tensor, + aten.bitwise_or.Tensor_out, + aten.bitwise_or_.Scalar, + aten.bitwise_or_.Tensor, + aten.bitwise_right_shift.Scalar_Tensor, + aten.bitwise_right_shift.Tensor, + aten.bitwise_right_shift.Tensor_Scalar, + aten.bitwise_right_shift.Tensor_Scalar_out, + aten.bitwise_right_shift.Tensor_out, + aten.bitwise_right_shift_.Tensor, + aten.bitwise_right_shift_.Tensor_Scalar, + aten.bitwise_xor.Scalar, + aten.bitwise_xor.Scalar_Tensor, + aten.bitwise_xor.Scalar_out, + aten.bitwise_xor.Tensor, + aten.bitwise_xor.Tensor_out, + aten.bitwise_xor_.Scalar, + aten.bitwise_xor_.Tensor, + aten.ceil.default, + aten.ceil.out, + aten.ceil_.default, + aten.clamp.default, + aten.clamp.out, + aten.clamp_.default, + aten.clip.default, + aten.clip.out, + aten.clip_.default, + aten.conj_physical.default, + aten.conj_physical.out, + aten.conj_physical_.default, + aten.copysign.Scalar, + aten.copysign.Scalar_out, + aten.copysign.Tensor, + aten.copysign.out, + aten.copysign_.Scalar, + aten.copysign_.Tensor, + aten.cos.default, + aten.cos.out, + aten.cos_.default, + aten.cosh.default, + aten.cosh.out, + aten.cosh_.default, + aten.deg2rad.default, + aten.deg2rad.out, + aten.deg2rad_.default, + aten.digamma.default, + aten.digamma.out, + aten.digamma_.default, + aten.div.Tensor, + aten.div.Tensor_mode, + aten.div.out, + aten.div.out_mode, + aten.div_.Tensor, + aten.div_.Tensor_mode, + aten.eq.Tensor, + aten.eq.Tensor_out, + aten.eq.Scalar, + aten.eq.Scalar_out, + aten.erf.default, + aten.erf.out, + aten.erf_.default, + aten.erfc.default, + aten.erfc.out, + aten.erfc_.default, + aten.erfinv.default, + aten.erfinv.out, + aten.erfinv_.default, + aten.exp.default, + aten.exp.out, + aten.exp2.default, + aten.exp2.out, + aten.exp2_.default, + aten.exp_.default, + aten.expm1.default, + aten.expm1.out, + aten.expm1_.default, + aten.float_power.Scalar, + aten.float_power.Scalar_out, + aten.float_power.Tensor_Scalar, + aten.float_power.Tensor_Scalar_out, + aten.float_power.Tensor_Tensor, + aten.float_power.Tensor_Tensor_out, + aten.float_power_.Scalar, + aten.float_power_.Tensor, + aten.floor.default, + aten.floor.out, + aten.floor_.default, + aten.fmod.Scalar, + aten.fmod.Scalar_out, + aten.fmod.Tensor, + aten.fmod.Tensor_out, + aten.fmod_.Scalar, + aten.fmod_.Tensor, + aten.frac.default, + aten.frac.out, + aten.frac_.default, + aten.ge.Scalar, + aten.ge.Tensor, + aten.gelu.default, + aten.gt.Tensor, + aten.gt.Tensor_out, + aten.gt.Scalar, + aten.gt.Scalar_out, + aten.gt.Scalar, + aten.gt.Tensor, + aten.hypot.default, + aten.hypot.out, + aten.hypot_.default, + aten.i0.default, + aten.i0.out, + aten.i0_.default, + aten.igamma.default, + aten.igamma.out, + aten.igamma_.default, + aten.igammac.default, + aten.igammac.out, + aten.igammac_.default, + aten.isnan.default, + aten.ldexp.default, + aten.ldexp.out, + aten.ldexp_.default, + aten.lt.Tensor, + aten.lt.Tensor_out, + aten.lt.Scalar, + aten.lt.Scalar_out, + aten.le.Scalar, + aten.le.Tensor, + aten.lerp.Scalar, + aten.lerp.Scalar_out, + aten.lerp.Tensor, + aten.lerp.Tensor_out, + aten.lerp_.Scalar, + aten.lerp_.Tensor, + aten.lgamma.default, + aten.lgamma.out, + aten.lgamma_.default, + aten.log.default, + aten.log.out, + aten.log10.default, + aten.log10.out, + aten.log10_.default, + aten.log1p.default, + aten.log1p.out, + aten.log1p_.default, + aten.log2.default, + aten.log2.out, + aten.log2_.default, + aten.log_.default, + aten.logaddexp.default, + aten.logaddexp.out, + aten.logaddexp2.default, + aten.logaddexp2.out, + aten.logical_and.default, + aten.logical_and.out, + aten.logical_and_.default, + aten.logical_not.default, + aten.logical_not.out, + aten.logical_not_.default, + aten.logical_or.default, + aten.logical_or.out, + aten.logical_or_.default, + aten.logical_xor.default, + aten.logical_xor.out, + aten.logical_xor_.default, + aten.logit.default, + aten.logit.out, + aten.logit_.default, + aten.masked_fill.Scalar, + aten.maximum.out, + aten.mul.Scalar, + aten.mul.Tensor, + aten.mul.out, + aten.mul_.Scalar, + aten.mul_.Tensor, + aten.mvlgamma.default, + aten.mvlgamma.out, + aten.mvlgamma_.default, + aten.native_dropout_backward.default, + aten.native_dropout_backward.out, + aten.nan_to_num.default, + aten.nan_to_num.out, + aten.nan_to_num_.default, + aten.ne.Scalar, + aten.neg.default, + aten.neg.out, + aten.neg_.default, + aten.nextafter.default, + aten.nextafter.out, + aten.nextafter_.default, + aten.polygamma.default, + aten.polygamma.out, + aten.polygamma_.default, + aten.positive.default, + aten.pow.Scalar, + aten.pow.Scalar_out, + aten.pow.Tensor_Scalar, + aten.pow.Tensor_Scalar_out, + aten.pow.Tensor_Tensor, + aten.pow.Tensor_Tensor_out, + aten.pow_.Scalar, + aten.pow_.Tensor, + aten.reciprocal.default, + aten.reciprocal.out, + aten.reciprocal_.default, + aten.rad2deg.default, + aten.rad2deg.out, + aten.rad2deg_.default, + aten.relu.default, + aten.relu_.default, + aten.remainder.Scalar, + aten.remainder.Scalar_Tensor, + aten.remainder.Scalar_out, + aten.remainder.Tensor, + aten.remainder.Tensor_out, + aten.remainder_.Scalar, + aten.remainder_.Tensor, + aten.round.decimals, + aten.round.decimals_out, + aten.round.default, + aten.round.out, + aten.round_.decimals, + aten.round_.default, + aten.rsqrt.default, + aten.rsqrt.out, + aten.rsqrt_.default, + aten.rsub.Scalar, + aten.sgn.default, + aten.sgn.out, + aten.sgn_.default, + aten.sigmoid.default, + aten.sigmoid.out, + aten.sigmoid_.default, + aten.sign.default, + aten.sign.out, + aten.sign_.default, + aten.signbit.default, + aten.signbit.out, + aten.silu.default, + aten.silu.out, + aten.sin.default, + aten.sin.out, + aten.sin_.default, + aten.sinc.default, + aten.sinc.out, + aten.sinc_.default, + aten.sinh.default, + aten.sinh.out, + aten.sinh_.default, + aten.sqrt.default, + aten.sqrt.out, + aten.sqrt_.default, + aten.square.default, + aten.square.out, + aten.square_.default, + aten.sub.Scalar, + aten.sub.Tensor, + aten.sub.out, + aten.sub_.Scalar, + aten.sub_.Tensor, + aten.tan.default, + aten.tan.out, + aten.tan_.default, + aten.tanh.default, + aten.tanh.out, + aten.tanh_.default, + aten.true_divide.Tensor, + aten.trunc.default, + aten.trunc.out, + aten.trunc_.default, + aten.where.self, + aten.where.self_out, + aten.xlogy.OutScalar_Self, + aten.xlogy.OutScalar_Other, + aten.xlogy.OutTensor, + aten.xlogy.Scalar_Other, + aten.xlogy.Scalar_Self, + aten.xlogy.Tensor, + aten.xlogy_.Scalar_Other, + aten.xlogy_.Tensor, + # backward point-wise ops + # please keep the entries below alphabetically sorted + aten.gelu_backward.default, + aten.sigmoid_backward.default, + aten.silu_backward.default, + aten.tanh_backward.default, + aten.threshold_backward.default, +] + + +def pointwise_strategy( + mesh: DeviceMesh, op_schema: OpSchema, linearity: bool = False +) -> OpStrategy: + max_shards_strategy_index = -1 + max_shards = -1 + + if _is_inplace_op(op_schema.op): + # inplace op should follow the first arg strategy + followed_strategy = op_schema.args_schema[0] + elif _is_out_variant_op(op_schema.op): + # out variant op should follow the out kwarg strategy + followed_strategy = op_schema.kwargs_schema["out"] + else: + # normal pointwise op, we choose to follow the arg with + # the max shards in case operands needs reshard + for idx, arg_strategy in enumerate(op_schema.args_schema): + if not isinstance(arg_strategy, OpStrategy): + continue + + arg_max_shards = arg_strategy.max_num_shards() + if arg_max_shards > max_shards: + max_shards_strategy_index = idx + max_shards = arg_max_shards + + followed_strategy = op_schema.args_schema[max_shards_strategy_index] + + assert isinstance( + followed_strategy, OpStrategy + ), f"no strategy to follow for {op_schema}!" + return common_pointwise_strategy( + mesh, op_schema.args_schema, followed_strategy, linearity + ) + + +def common_pointwise_strategy( + mesh: DeviceMesh, + args_schema: Sequence[object], + followed_strategy: OpStrategy, + linearity: bool, +) -> OpStrategy: + # handle broadcasting + common_shape = torch.broadcast_shapes( + *[arg.output_shape for arg in args_schema if isinstance(arg, OpStrategy)] + ) + pointwise_strategy = OpStrategy([]) + + for placement_strategy in followed_strategy.strategies: + spec_to_follow = placement_strategy.output_spec + out_placements: List[Placement] = [] + for placement in spec_to_follow.placements: + if isinstance(placement, Shard): + shard_dim = normalize_dim(placement.dim, len(spec_to_follow.shape)) + common_ndim = len(common_shape) + new_shard_dim = common_ndim - len(spec_to_follow.shape) + shard_dim + out_placements.append(Shard(new_shard_dim)) + elif isinstance(placement, _Partial) and not linearity: + # clear the partial placemnet if op does not support linearity + # by default we just replicate the partial, need to see if this + # is optimal for all cases + out_placements.append(Replicate()) + else: + out_placements.append(placement) + + input_specs: List[DTensorSpec] = [] + redistribute_costs: List[List[float]] = [] + for idx, input_arg in enumerate(args_schema): + if isinstance(input_arg, OpStrategy): + # every arg follow the out_placements, but need to handle broadcasting + input_arg_spec = input_arg.strategies[0].output_spec + input_arg_dims_map = infer_broadcast_dims_map( + common_shape, input_arg_spec.shape + ) + input_target_placements = map_placements_after_broadcast( + tuple(out_placements), + common_shape, + input_arg_dims_map, + ) + input_arg_target_spec = DTensorSpec( + mesh=mesh, + placements=input_target_placements, + tensor_meta=input_arg_spec.tensor_meta, + ) + input_specs.append(input_arg_target_spec) + redistribute_costs.append( + generate_redistribute_costs(input_arg, input_arg_target_spec) + ) + + pointwise_strategy.strategies.append( + PlacementStrategy( + output_specs=DTensorSpec( + mesh=mesh, + placements=tuple(out_placements), + ), + input_specs=input_specs, + redistribute_cost=redistribute_costs, + ) + ) + return pointwise_strategy + + +def linear_pointwise_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """ + Linear pointwise operators can propagate pending reductions. + For example, c = add(a, b); if a is pending sum, then c will be + pending sum as well without any communication overhead. + """ + return pointwise_strategy(mesh, op_schema, linearity=True) + + +for op in linear_pointwise_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(static_kwargkey=["out"]))( + linear_pointwise_strategy + ) + +for op in pointwise_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(static_kwargkey=["out"]))( + pointwise_strategy + ) + + +# TODO: add all for_each ops +for_each_ops = [ + aten._foreach_abs_.default, + aten._foreach_addcdiv_.Scalar, + aten._foreach_addcdiv_.ScalarList, + aten._foreach_addcdiv_.Tensor, + aten._foreach_addcmul.Scalar, + aten._foreach_addcmul_.Scalar, + aten._foreach_addcmul_.ScalarList, + aten._foreach_addcmul_.Tensor, + aten._foreach_div_.List, + aten._foreach_div_.ScalarList, + aten._foreach_lerp_.Scalar, + aten._foreach_maximum_.List, + aten._foreach_mul.Scalar, + aten._foreach_mul.List, + aten._foreach_mul_.Scalar, + aten._foreach_mul_.ScalarList, + aten._foreach_mul_.Tensor, + aten._foreach_mul_.List, + aten._foreach_neg.default, + aten._foreach_neg_.default, + aten._foreach_reciprocal_.default, + aten._foreach_sub_.Scalar, + aten._foreach_sqrt.default, + aten._foreach_sqrt_.default, + aten._foreach_zero_.default, +] + +for_each_linearity_ops = [ + aten._foreach_add.Scalar, + aten._foreach_add_.Scalar, + aten._foreach_add_.ScalarList, + aten._foreach_add.List, + aten._foreach_add_.List, +] + + +def foreach_list_pointwise_strategy( + mesh: DeviceMesh, op_schema: OpSchema, linearity: bool = False +) -> StrategyType: + """ + Apply the pointwise strategy to the zipped arguments. For example, if we + run a foreach add of two lists l1 and l2, then we apply the pointwise + strategy on each pair (l1[i], l2[i]). If the first argument is a list but + the second (or later) one is a tensor, then we broadcast the tensor by + replicating it into a list with the length of the first argument. + """ + + def args_tuple_strategies(args_schema: Tuple[object, ...]) -> List[TupleStrategy]: + first_arg = args_schema[0] + assert isinstance(first_arg, TupleStrategy) + strategy_len = len(first_arg.childs) + tuple_strategies: List[TupleStrategy] = [] + for arg_idx, arg in enumerate(args_schema): + if isinstance(arg, TupleStrategy): + # every tuple strategy should have the same length + assert len(arg.childs) == strategy_len + tuple_strategies.append(arg) + elif isinstance(arg, OpStrategy): + if arg_idx > 0: # implicitly broadcast + tuple_strategies.append( + TupleStrategy([arg for _ in range(strategy_len)]) + ) + else: + raise RuntimeError( + f"foreach list op only supports tuple strategy! {op_schema}" + ) + return tuple_strategies + + args_strategies = args_tuple_strategies(op_schema.args_schema) + follow_strategy: TupleStrategy = args_strategies[0] + foreach_strategy_list: List[OpStrategy] = [] + for child_idx, child_strtgy in enumerate(follow_strategy.childs): + assert isinstance(child_strtgy, OpStrategy) + args_schema: List[StrategyType] = [ + arg_strategy.childs[child_idx] for arg_strategy in args_strategies + ] + pointwise_strategy: OpStrategy = common_pointwise_strategy( + mesh, args_schema, child_strtgy, linearity + ) + foreach_strategy_list.append(pointwise_strategy) + return TupleStrategy(foreach_strategy_list) + + +def foreach_list_linear_pointwise_strategy( + mesh: DeviceMesh, op_schema: OpSchema +) -> StrategyType: + """ + for each list op stratgy that supports linearity + """ + return foreach_list_pointwise_strategy(mesh, op_schema, linearity=True) + + +for op in for_each_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(needs_pytree=True))( + foreach_list_pointwise_strategy + ) + +for op in for_each_linearity_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(needs_pytree=True))( + foreach_list_linear_pointwise_strategy + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b666d1d8bcd0b26c65f77f85d00e78b8f9d17524 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import torch +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + PlacementStrategy, + StrategyType, +) +from torch.distributed._tensor.ops.utils import is_tensor_partial, register_op_strategy +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +@register_op_strategy( + [aten.normal_.default, aten.uniform_.default, aten.native_dropout.default] +) +def random_op_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + self_strategy = op_schema.args_schema[0] + assert isinstance(self_strategy, OpStrategy) + + random_strategy = OpStrategy([]) + for arg_strategy in self_strategy.strategies: + arg_spec = arg_strategy.output_spec + if is_tensor_partial(arg_spec): + # TODO: figure out how inplace random op should behave when it's partial + raise RuntimeError(f"{op_schema.op} with _Partial is not supported yet!") + random_strategy.strategies.append(PlacementStrategy(output_specs=arg_spec)) + + return random_strategy diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4bfc4c60e91026f35bc87ba5ed791c3e1d6abd6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py @@ -0,0 +1,826 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import itertools +from typing import cast, List, Optional, Sequence, Tuple + +import torch + +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + OutputSharding, + PlacementStrategy, + RuntimeSchemaInfo, + StrategyType, + TupleStrategy, +) +from torch.distributed._tensor.ops.common_rules import pointwise_rule +from torch.distributed._tensor.ops.embedding_ops import _MaskPartial +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + is_tensor_dim_sharded, + is_tensor_partial, + is_tensor_shardable, + normalize_dim, + prod, + register_op_strategy, + register_prop_rule, +) +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +aten = torch.ops.aten + + +def default_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # Default strategy by default just propagate the first input strategy + select_strategy = op_schema.args_schema[0] + assert isinstance(select_strategy, OpStrategy) + default_strategy = [] + for strategy in select_strategy.strategies: + # we create new DTensorSpecs even for default strategy to assure that + # the tensor metas are distinct between the arguments and outputs + default_strategy.append( + PlacementStrategy( + output_specs=DTensorSpec( + mesh=strategy.output_spec.mesh, + placements=strategy.output_spec.placements, + ) + ) + ) + return OpStrategy(default_strategy) + + +register_op_strategy( + [ + aten.clone.default, + aten.contiguous.default, + aten.copy_.default, + aten.detach.default, + aten.fill_.Scalar, + aten.zero_.default, + ] +)(default_strategy) + +register_op_strategy( + aten._to_copy.default, schema_info=RuntimeSchemaInfo(static_kwargkey=["dtype"]) +)(default_strategy) + + +@register_op_strategy( + [ + aten.equal.default, + aten.is_same_size.default, + ] +) +def equal_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # equal_strategy deals with ops that comparing two tensor, we need to make sure + # sharding layout the same with two operands, we choose to follow the arg with max + # num of shards, still keep is_same_size here for completeness as they share the + # same strategy in theory. + self_strategy, other_strategy = op_schema.args_schema + assert isinstance(self_strategy, OpStrategy) + assert isinstance(other_strategy, OpStrategy) + + select_strategy = ( + self_strategy + if self_strategy.max_num_shards() >= other_strategy.max_num_shards() + else other_strategy + ) + equal_strategy = OpStrategy([]) + + for arg_strategy in select_strategy.strategies: + arg_spec = arg_strategy.output_spec + if is_tensor_partial(arg_spec): + # if the arg_spec have partial, reshard to replicate + # otherwise local shard tensor comparison would be invalid + output_spec = DTensorSpec( + mesh=arg_spec.mesh, + placements=tuple( + Replicate() if isinstance(p, _Partial) else p + for p in arg_spec.placements + ), + ) + equal_strategy.strategies.append( + PlacementStrategy(output_specs=output_spec) + ) + else: + equal_strategy.strategies.append(PlacementStrategy(arg_spec)) + return equal_strategy + + +@register_op_strategy( + [ + aten.empty_like.default, + aten.ones_like.default, + aten.rand_like.default, + aten.randn_like.default, + aten.zeros_like.default, + ], + schema_info=RuntimeSchemaInfo(1, ["dtype"]), +) +@register_op_strategy( + [aten.full_like.default], + schema_info=RuntimeSchemaInfo(2, ["dtype"]), +) +@register_op_strategy( + [ + aten.randint_like.default, + aten.randint_like.low_dtype, + aten.randint_like.low_dtype_out, + ], + schema_info=RuntimeSchemaInfo(3, ["dtype"]), +) +def create_like_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # create_like_strategy deals with ops that creating tensors with same + # shape as input, but with specific content that does not depend on + # the input, we can propagate sharding, but we have to make sure we + # move from partial to replicated. + select_strategy = op_schema.args_schema[0] + create_like_strategy = OpStrategy([]) + assert isinstance(select_strategy, OpStrategy) + for arg_strategy in select_strategy.strategies: + arg_spec = arg_strategy.output_spec + if is_tensor_partial(arg_spec): + # if the arg_spec have partial, accept partial + # in the input_specs but output replicate for + # those corresponding mesh dims + output_spec = DTensorSpec( + mesh=arg_spec.mesh, + placements=tuple( + Replicate() if isinstance(p, _Partial) else p + for p in arg_spec.placements + ), + ) + create_like_strategy.strategies.append( + PlacementStrategy(output_specs=output_spec, input_specs=(arg_spec,)) + ) + + else: + create_like_strategy.strategies.append(PlacementStrategy(arg_spec)) + + return create_like_strategy + + +@register_op_strategy( + [ + aten.new_empty.default, + aten.new_full.default, + aten.new_ones.default, + aten.new_zeros.default, + aten.new_empty_strided.default, # TODO: re-think new_empty_strided + ], + schema_info=RuntimeSchemaInfo(1, ["dtype"]), +) +def new_factory_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # TODO: maybe we should generate all possible shardings intead of just stay + # replicated for new factory methods + input_strategy = op_schema.args_schema[0] + new_factory_strategy = OpStrategy([]) + assert isinstance(input_strategy, OpStrategy) + for arg_strategy in input_strategy.strategies: + input_spec = arg_strategy.output_spec + replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim)) + new_factory_strategy.strategies.append( + PlacementStrategy(output_specs=replica_spec, input_specs=(input_spec,)) + ) + + return new_factory_strategy + + +@register_op_strategy(aten.bucketize.Tensor) +def gen_bucketize_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """Just propagate input sharding, but expect replicated for boundaries input.""" + input_strategy = op_schema.args_schema[0] + bucketize_strategy = OpStrategy([]) + assert isinstance(input_strategy, OpStrategy) + for arg_strategy in input_strategy.strategies: + arg_spec = DTensorSpec(mesh, arg_strategy.output_spec.placements) + replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim)) + bucketize_strategy.strategies.append( + PlacementStrategy( + output_specs=arg_spec, input_specs=(arg_spec, replica_spec) + ) + ) + + return bucketize_strategy + + +@register_op_strategy(aten.slice.Tensor, schema_info=RuntimeSchemaInfo(1)) +def gen_slice_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """Forward all shardings except the slice dimension.""" + defaults = (None, 0, None, None, 1) + input_strategy, dim, start, end, step = ( + op_schema.args_schema + defaults[len(op_schema.args_schema) :] + ) + assert isinstance(input_strategy, OpStrategy) + input_shape = input_strategy.output_shape + input_ndim = input_strategy.output_ndim + assert isinstance(dim, int) + if start is None: + start = 0 + if end is None or end > input_shape[dim]: + end = input_shape[dim] + assert isinstance(start, int) + assert isinstance(end, int) + assert isinstance(step, int) + + # normalize args + slice_dim = normalize_dim(dim, input_ndim) + start = normalize_dim(start, input_shape[dim]) + end = normalize_dim(end, input_shape[dim]) + + redundant_slice = start == 0 and end == input_shape[dim] and step == 1 + + slice_strategy = OpStrategy([]) + + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + if not is_tensor_dim_sharded(arg_spec, dim=slice_dim) or redundant_slice: + # only add the strategy if the slice dim is not sharded + out_spec = DTensorSpec(mesh, arg_spec.placements) + slice_strategy.strategies.append(PlacementStrategy(output_specs=out_spec)) + if not slice_strategy.strategies: + # if all strategies are filtered out, unsharding all specs on slice dim + # of the input strategy, and use that as the op strategy + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + unshard_spec = DTensorSpec( + mesh, unshard_tensor_dim(arg_spec.placements, dim=slice_dim) + ) + slice_strategy.strategies.append( + PlacementStrategy(output_specs=unshard_spec) + ) + return slice_strategy + + +def unshard_tensor_dim( + placements: Sequence[Placement], dim: int +) -> Tuple[Placement, ...]: + """Disallow the given tensor dimension to be sharded.""" + return tuple( + p if (not isinstance(p, Shard) or p.dim != dim) else Replicate() + for p in placements + ) + + +def replicate_tensor_dim( + placements: Sequence[Placement], dim: int +) -> Tuple[Placement, ...]: + """Force the given tensor dimension to be replicated.""" + # Not using p.is_shard() to avoid mypy complain about Placement not having + # attribute dim. + return tuple( + Replicate() if p.is_partial() or isinstance(p, Shard) and p.dim == dim else p + for p in placements + ) + + +@register_op_strategy(aten.slice_scatter.default, schema_info=RuntimeSchemaInfo(2)) +def gen_slice_scatter_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # 1. number of dimensions in input and src need to match. + # 2. number of elements on all non-dim need to match between input and src. + # 3. numer of elements in src in dim need to match the slice size. + # Given the above: + # - We suggest for src to follow the sharding of input, except on the scatter dimension, + # where our best bet for now is to make them replicated as a fall-back. + # TODO: Ideally we'd like to make sure the output is re-sharded afterwards to keep input sharding. + + input_strategy = op_schema.args_schema[0] + assert isinstance(input_strategy, OpStrategy) + input_ndim = input_strategy.output_ndim + slice_dim = ( + cast(int, op_schema.args_schema[2]) if len(op_schema.args_schema) > 2 else 0 + ) + slice_dim = normalize_dim(slice_dim, input_ndim) + + slice_scatter_strategy = OpStrategy([]) + # by default follow the input strategy for both input and src + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + if not ( + is_tensor_dim_sharded(arg_spec, dim=slice_dim) + or is_tensor_partial(arg_spec) + ): + # only add the strategy if the slice_scatter dim is not sharded or partial + slice_scatter_strategy.strategies.append( + PlacementStrategy(output_specs=arg_spec) + ) + + if not slice_scatter_strategy.strategies: + # if all strategies are filtered out, replicating all specs on slice_scatter dim + # of the input strategy, and use that as the op strategy + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + replicate_spec = DTensorSpec( + mesh, replicate_tensor_dim(arg_spec.placements, dim=slice_dim) + ) + slice_scatter_strategy.strategies.append( + PlacementStrategy(output_specs=replicate_spec) + ) + return slice_scatter_strategy + + +@register_op_strategy(aten._local_scalar_dense.default) +def replica_only_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """Only allow replication on the input/output.""" + replicate_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim)) + return OpStrategy([PlacementStrategy(replicate_spec)]) + + +@register_op_strategy(aten.gather.default) +def gather_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + input_strategy = cast(OpStrategy, op_schema.args_schema[0]) + dim = cast(int, op_schema.args_schema[1]) + index_strategy = cast(OpStrategy, op_schema.args_schema[2]) + + input_shape = input_strategy.output_shape + index_shape = index_strategy.output_shape + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [output, input, index] + # first we always have replicate all for inputs and output + all_replicate: List[Placement] = [Replicate()] * 3 + single_mesh_dim_strategies.append(all_replicate) + + # input sharding, input sharded, index accepts mask partial, output follows index + # this only works when the input is sharded on the gather dimension, and + # index has size 1 on the gather dimension + if index_shape[dim] == 1: + index_partial_placement = _MaskPartial(logical_dim_size=input_shape[dim]) + input_sharding = [ + index_partial_placement, + Shard(dim), + index_partial_placement, + ] + single_mesh_dim_strategies.append(input_sharding) + + # index sharding, input replicated, index sharded, output follows index + # this only works when the sharding dimension is the gather dimension + index_sharding = [Shard(dim), Replicate(), Shard(dim)] + single_mesh_dim_strategies.append(index_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + if is_tensor_shardable(input_shape, spec_list[1]) and is_tensor_shardable( + index_shape, spec_list[2] + ): + input_spec, index_spec = spec_list[1:] + redistribute_cost = [ + generate_redistribute_costs(input_strategy, input_spec), + generate_redistribute_costs(index_strategy, index_spec), + ] + strat = PlacementStrategy( + output_specs=spec_list[0], + input_specs=spec_list[1:], + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) + + +@register_op_strategy(aten.stack.default, RuntimeSchemaInfo(1, needs_pytree=True)) +def stack_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + args_schema = op_schema.args_schema + input_tuple_strategy = args_schema[0] + assert isinstance(input_tuple_strategy, TupleStrategy), f"{input_tuple_strategy}" + dim = cast(int, args_schema[1]) if len(args_schema) > 1 else 0 + + # Follow the 1st child strategy's placement strategies + child_strategy = input_tuple_strategy.childs[0] + assert isinstance(child_strategy, OpStrategy), f"{child_strategy}" + strategies: List[PlacementStrategy] = [] + + # For each arg strategy of the child to follow, we check if every other + # child has an equal strategy. If so, then that is a valid strategy. If + # there are no such valid strategies, then we replicate. + for arg_strategy in child_strategy.strategies: + arg_spec = arg_strategy.output_spec + # For each arg strategy (whether the one to follow or other), we + # replicate the stack dim since we cannot stack on a sharded dim + if is_tensor_dim_sharded(arg_spec, dim): + arg_spec = DTensorSpec( + mesh, unshard_tensor_dim(arg_spec.placements, dim=dim) + ) + all_compatible = True + for other_child_strategy in input_tuple_strategy.childs[1:]: + has_compatible_strategy = False + assert isinstance( + other_child_strategy, OpStrategy + ), f"{other_child_strategy}" + for other_arg_strategy in other_child_strategy.strategies: + other_arg_spec = other_arg_strategy.output_spec + if is_tensor_dim_sharded(other_arg_spec, dim): + other_arg_spec = DTensorSpec( + mesh, unshard_tensor_dim(other_arg_spec.placements, dim=dim) + ) + if other_arg_spec.placements == arg_spec.placements: + has_compatible_strategy = True + break + if not has_compatible_strategy: + all_compatible = False + break + if all_compatible: + input_specs = tuple( + arg_spec for _ in range(len(input_tuple_strategy.childs)) + ) + strategies.append( + PlacementStrategy( + output_specs=DTensorSpec(mesh, arg_spec.placements), + input_specs=input_specs, + ) + ) + if not strategies: + # Arbitrarily use each child strategy's 0th strategy's output spec + input_specs = tuple( + cast(OpStrategy, child_strategy).strategies[0].output_spec + for child_strategy in input_tuple_strategy.childs + ) + replicate_spec = DTensorSpec(mesh, tuple(Replicate() for _ in range(mesh.ndim))) + strategies.append(PlacementStrategy(output_specs=replicate_spec)) + return OpStrategy(strategies) + + +@register_prop_rule(aten.index_select.default, schema_info=RuntimeSchemaInfo(1)) +def prop_index_select(op_schema: OpSchema) -> OutputSharding: + values_spec, dim, indices_spec = op_schema.args_schema + + assert isinstance(values_spec, DTensorSpec) + assert isinstance(dim, int) + assert isinstance(indices_spec, DTensorSpec) + + all_indices_spec: List[Optional[DTensorSpec]] = [ + indices_spec if dim == i else None for i in range(values_spec.ndim) + ] + + result = prop_index( + OpSchema( + op=op_schema.op, + args_schema=(values_spec, all_indices_spec), + kwargs_schema=op_schema.kwargs_schema, + ) + ) + if result.schema_suggestions: + result.schema_suggestions = [ + OpSchema( + op=op_schema.op, + args_schema=(s.args_schema[0], dim, s.args_schema[1][dim]), + kwargs_schema=op_schema.kwargs_schema, + ) + for s in result.schema_suggestions + ] + return result + + +@register_prop_rule(aten.index.Tensor, schema_info=RuntimeSchemaInfo(needs_pytree=True)) +def prop_index(op_schema: OpSchema) -> OutputSharding: + """ + Expect replicated on the first input; _mostly_ pointwise on the second input. + + TODO: exception: when the dtype of second input is "bool", then a torch.nonzero needs to be triggered first. + """ + # Current sharding constraints: + # For values: + # 1. We currently require that the dimension of values_spec be replicated or partial + # if they are being indexed on. + # 2. Other dimensions of values_spec can remain sharded if they are so. + # For indices: + # Indices can be either sharded or replicated. All index tensors need to be sharded + # in a compatible way, following the pointwise rule (including resolving _Partial + # into either sharded or replicated) + + values_spec, multi_indices_spec = op_schema.args_schema + assert isinstance(values_spec, DTensorSpec) + assert isinstance(multi_indices_spec, list) + multi_indices_spec = cast(List[Optional[DTensorSpec]], multi_indices_spec) + valid_indices_spec: List[Tuple[int, DTensorSpec]] = [ + (i, a) for i, a in enumerate(multi_indices_spec) if a is not None + ] + + # 1. All indices have to be sharded equally. Moreover, indices can be broadcast. + # Here, we piggyback on the pointwise sharding rule for indices. + indices_out = pointwise_rule( + OpSchema( + op=op_schema.op, + args_schema=tuple(v[1] for v in valid_indices_spec), + kwargs_schema={}, + ) + ) + need_reshard_on_indices = indices_out.output_spec is None + + if not need_reshard_on_indices: + # this means that our inputs are already sharded properly and we will use that as our indices_spec + assert isinstance(indices_out.output_spec, DTensorSpec) + indices_spec: DTensorSpec = indices_out.output_spec + else: + assert indices_out.schema_suggestions is not None + valid_indices_suggestion = indices_out.schema_suggestions[0] + for i, v in enumerate(valid_indices_suggestion.args_spec): + multi_indices_spec[valid_indices_spec[i][0]] = v + # we'll need to call pointwise_rule again to see what's our ideal indices_spec and then + # use that to compute our ideal values_spec + indices_output_spec = pointwise_rule(valid_indices_suggestion).output_spec + assert isinstance(indices_output_spec, DTensorSpec) + indices_spec = indices_output_spec + + lookup_dims = {v[0] for v in valid_indices_spec} + + need_reshard_on_values = tuple( + (isinstance(vp, Shard) and (vp.dim in lookup_dims or isinstance(ip, Shard))) + for vp, ip in zip(values_spec.placements, indices_spec.placements) + ) + + if not need_reshard_on_indices and not any(need_reshard_on_values): + value_placements = values_spec.placements + + all_dims_consecutive = all( + b[0] - a[0] == 1 + for b, a in zip(valid_indices_spec[1:], valid_indices_spec[:-1]) + ) + if all_dims_consecutive: + # if all index vectors are consecutives, insert at the dimension of the first index + insert_dim: int = valid_indices_spec[0][0] + else: + # else, insert on the first dimension + insert_dim = 0 + + def place(vp: Placement, ip: Placement) -> Placement: + if isinstance(vp, Shard): + return Shard( + vp.dim + if vp.dim < insert_dim + # accounts for the offset in output dimensions + else vp.dim + + indices_spec.ndim + - sum(1 if vp.dim > v[0] else 0 for v in valid_indices_spec) + ) + if isinstance(ip, Shard): + return Shard(ip.dim + insert_dim) + # _Partial or Replicated + return vp + + value_placements = tuple( + place(vp, ip) + for vp, ip in zip(values_spec.placements, indices_spec.placements) + ) + result = OutputSharding( + output_spec=DTensorSpec( + mesh=values_spec.mesh, + placements=value_placements, + ) + ) + return result + else: + result = OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + DTensorSpec( + mesh=values_spec.mesh, + placements=tuple( + [ + Replicate() if need_reshard_on_values[i] else v + for i, v in enumerate(values_spec.placements) + ] + ), + tensor_meta=values_spec.tensor_meta, + ), + multi_indices_spec, + ), + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + return result + + +@register_prop_rule( + aten.cat.default, schema_info=RuntimeSchemaInfo(1, needs_pytree=True) +) +def cat_rule(op_schema: OpSchema) -> OutputSharding: + # torch.cat requires all tensors must either have the same shape (except + # in the concatenating dimension) or be "empty". "Empty" here strictly means + # tensor.shape is torch.Size([0]). When tensor.ndim > 1, it will be treated + # as a non-empty tensor and the shape must match on non-cat dimensions. + def is_empty(spec: DTensorSpec) -> bool: + return list(spec.shape) == [0] + + # the first arg is a list of input tensor specs + tensor_list_specs = cast(List[DTensorSpec], op_schema.args_schema[0]) + assert len(tensor_list_specs) > 0, "torch.cat expects a non-empty list of tensors" + non_empty_specs = [spec for spec in tensor_list_specs if not is_empty(spec)] + + if len(non_empty_specs) == 0: + # all tensors are empty, we can return any output sharding + return OutputSharding( + output_spec=DTensorSpec( + mesh=tensor_list_specs[0].mesh, + placements=tensor_list_specs[0].placements, + ) + ) + + assert all( + spec.ndim == non_empty_specs[0].ndim for spec in non_empty_specs + ), f"Expect all tensors to have same shape or empty, but got {tensor_list_specs}" + assert all( + spec.mesh == tensor_list_specs[0].mesh for spec in tensor_list_specs + ), f"Expect all tensors to have same mesh, but got {tensor_list_specs}" + + # ndim will also be the result's ndim + ndim = 1 + for spec in tensor_list_specs: + ndim = max(ndim, spec.ndim) + + dim = 0 # default dim = 0 + if len(op_schema.args_schema) > 1: + dim = cast(int, op_schema.args_schema[1]) + dim = normalize_dim(dim, ndim) + + # Make sure all tensors are replicated on cat dimension + need_reshard = False + tensor_list_specs_after: List[DTensorSpec] = [] + for spec in tensor_list_specs: + if not is_empty(spec) and ( + is_tensor_dim_sharded(spec, dim=dim) or is_tensor_partial(spec) + ): + need_reshard = True + tensor_list_specs_after.append( + DTensorSpec( + mesh=spec.mesh, + placements=replicate_tensor_dim(spec.placements, dim=dim), + tensor_meta=spec.tensor_meta, + ) + ) + else: + tensor_list_specs_after.append(spec) + + tensor_list_specs = tensor_list_specs_after + + # align non-cat dimensions placements based on reshard cost + non_empty_specs = [spec for spec in tensor_list_specs if not is_empty(spec)] + mesh = non_empty_specs[0].mesh + ndim = non_empty_specs[0].ndim + new_placements: List[Placement] = [] + for mesh_dim in range(mesh.ndim): + # compute the minimum cost of resharding on this mesh_dim + if any( + spec.placements[mesh_dim] != non_empty_specs[0].placements[mesh_dim] + for spec in non_empty_specs + ): + # only reshard if there is a mismatch + need_reshard = True + reshard_cost = [] + for shard_dim in range(ndim): + # compute the cost of resharding on this shard_dim + cost: float = 0.0 + for spec in non_empty_specs: + global_shape = spec.shape + if global_shape[shard_dim] < mesh.size(mesh_dim): + # found one tensor where the shard_dim is smaller than + # mesh_dim. In this case, we cannot shard on this shard_dim, + # and hence set cost to infinity. + cost = +float("inf") + elif ( + is_tensor_dim_sharded(spec, dim=shard_dim) + or prod(global_shape) == 0 + ): + continue + else: + local_shape = compute_local_shape( + global_shape, spec.mesh, spec.placements + ) + cost += prod(local_shape) * spec.mesh.size(mesh_dim) + reshard_cost.append(cost) + best_dim = reshard_cost.index(min(reshard_cost)) + new_placements.append(Shard(best_dim)) + else: + # no mismatch, keep the original placement + new_placements.append(non_empty_specs[0].placements[mesh_dim]) + + if need_reshard: + tensor_list_specs_after = [] + for spec in tensor_list_specs: + if is_empty(spec): + tensor_list_specs_after.append(spec) + else: + tensor_list_specs_after.append( + DTensorSpec( + mesh=spec.mesh, + placements=tuple(new_placements), + tensor_meta=spec.tensor_meta, + ) + ) + + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + tuple(tensor_list_specs_after), + *op_schema.args_schema[1:], + ), + kwargs_schema=op_schema.kwargs_schema, + ), + ], + ) + else: + # at this point, the cat dim is not sharded, + return OutputSharding( + output_spec=DTensorSpec( + mesh=non_empty_specs[0].mesh, + placements=non_empty_specs[0].placements, + ), + ) + + +@register_prop_rule( + [ + aten.split.Tensor, + aten.split_with_sizes.default, + aten.split_with_sizes_copy.default, + ], + schema_info=RuntimeSchemaInfo(1), +) +def split_rule(op_schema: OpSchema) -> OutputSharding: + output_spec_list: List[DTensorSpec] = [] + input_spec = cast(DTensorSpec, op_schema.args_schema[0]) + ndim = input_spec.ndim + split_size_or_sections = op_schema.args_schema[1] + dim = cast(int, op_schema.args_schema[2]) if len(op_schema.args_schema) > 2 else 0 + dim = normalize_dim(dim, ndim) + + # TODO: tensor to split cannot have _Partial + # in its placements for now. Will need to + # support in future. + if input_spec.sums: + raise NotImplementedError( + f"splitting distributed tensor with " + f"_Partial placement is not implemented!\n" + f"DTensorSpec={input_spec}" + ) + + # TODO: just like slice op, split replicates before + # splitting on a sharded dimension + need_reshard = False + if is_tensor_dim_sharded(input_spec, dim=dim): + need_reshard = True + input_spec = DTensorSpec( + mesh=input_spec.mesh, + placements=unshard_tensor_dim(input_spec.placements, dim=dim), + tensor_meta=input_spec.tensor_meta, + ) + + if need_reshard: + return OutputSharding( + None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=(input_spec,) + op_schema.args_schema[1:], + kwargs_schema=op_schema.kwargs_schema, + ), + ], + ) + + def size_split(N, i): + # Last chunk will be smaller if the tensor size N + # along the given dimension dim is not divisible by i. + assert i > 0 + return [i] * (N // i) + ([N % i] if N % i != 0 else []) + + output_size_list = ( + size_split(input_spec.shape[dim], split_size_or_sections) + if isinstance(split_size_or_sections, int) + else split_size_or_sections + ) + output_spec_list = [ + DTensorSpec( + mesh=input_spec.mesh, + placements=input_spec.placements, + ) + for _ in range(len(output_size_list)) + ] + return OutputSharding(output_spec_list) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe15e3781e520846a3bcb272b2a08fedbbe6c12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py @@ -0,0 +1,226 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import functools +import operator +from typing import cast, Iterable, List, Sequence, Tuple, Union + +import torch +from torch.distributed._tensor._collective_utils import redistribute_cost +from torch.distributed._tensor.api import DTensor +from torch.distributed._tensor.op_schema import OpStrategy, RuntimeSchemaInfo +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + + +# convenient wrapper to register sharding propagation rules +# pyre-fixme[3]: Return type must be annotated. +# pyre-fixme[2]: Parameter must be annotated. +def register_prop_rule(op, schema_info=None): + # pyre-fixme[53]: Captured variable `func` is not annotated. + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. + def wrapper(impl): + overloads = op if isinstance(op, list) else [op] + for overload in overloads: + DTensor._op_dispatcher.sharding_propagator.register_sharding_prop_rule( + overload, impl, schema_info + ) + return impl + + return wrapper + + +def register_op_strategy(op, schema_info=None): + # pyre-fixme[53]: Captured variable `func` is not annotated. + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. + + # For every ATen op that accepts any args in this list, + # the arg itself can impact the strides (and potentially the sharding strategy) + # of the output tensor. + # thus, we will detect ATen schemas with any of these args and ensure + # that they get specialized here. + arg_names_that_require_specializing_cache_strategy = [ + "memory_format", + ] + + def wrapper(impl): + if isinstance(op, list): + overloads = op + else: + overloads = [op] + + for overload in overloads: + curr_schema_info = None + if schema_info is None: + specialized_args = [ + a.name + for a in overload._schema.arguments + if a.name in arg_names_that_require_specializing_cache_strategy + ] + if any(specialized_args): + curr_schema_info = RuntimeSchemaInfo( + static_kwargkey=specialized_args + ) + else: + curr_schema_info = schema_info + DTensor._op_dispatcher.sharding_propagator.register_op_strategy( + overload, impl, curr_schema_info + ) + return impl + + return wrapper + + +def as_list( + x: Union[List[object], object] + # pyre-fixme[11]: Annotation `immutable_list` is not defined as a type. +) -> Union[List[object], torch.fx.immutable_collections.immutable_list]: # type: ignore[valid-type] + # During tracing, `aten.sum.dim_IntList` uses `immutable_list` for its args, + # which is an object but treated as a list by the tracer. Therefore, keep + # `immutable_list` intact here as well. + if type(x) is list or isinstance(x, torch.fx.immutable_collections.immutable_list): + return x + else: + return [x] + + +def normalize_dim(dim: int, ndim: int) -> int: + return dim if dim >= 0 else dim + ndim + + +def normalize_dims(dims: Union[int, Sequence[int]], ndim: int) -> Sequence[int]: + """Normalize a dim or a sequence of dims, so that they are all positive.""" + if isinstance(dims, int): + dims = (normalize_dim(dims, ndim),) + elif isinstance(dims, list): + dims = [normalize_dim(dim, ndim) for dim in dims] + elif isinstance(dims, tuple): + dims = tuple([normalize_dim(dim, ndim) for dim in dims]) + return dims + + +def normalize_to_torch_size(size) -> torch.Size: + """ + Unify variable types of size argument to torch.Size + Acceptable types include: + int, Sequence[int], Tuple[int], Tuple[Sequence[int]], + or torch.Size + """ + if isinstance(size, torch.Size): + return size + + if isinstance(size, int): + torch_size = [size] + elif len(size) == 1 and isinstance(size[0], Sequence): + torch_size = list(size[0]) + else: + torch_size = list(size) + return torch.Size(torch_size) + + +def prod(xs: Iterable[int]) -> int: + return functools.reduce(operator.mul, xs, 1) + + +def is_tensor_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool: + """Check if the shape is shardable according to the spec.""" + # number of shards in each tensor dimension + shards_map = [1] * len(shape) + for i, placement in enumerate(spec.placements): + if placement.is_shard(): + shard_dim = cast(Shard, placement).dim + shards_map[shard_dim] *= spec.mesh.size(i) + + for i, dim_size in enumerate(shape): + # TODO: maybe we should determine is_shardable based on + # whether it's evenly sharded or not + if shards_map[i] > 1 and dim_size < shards_map[i]: + return False + + return True + + +def is_tensor_evenly_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool: + """Check if the shape is evenly shardable according to the spec.""" + # number of shards in each tensor dimension + shards_map = [1] * len(shape) + for i, placement in enumerate(spec.placements): + if placement.is_shard(): + shard_dim = cast(Shard, placement).dim + shards_map[shard_dim] *= spec.mesh.size(i) + + for i, dim_size in enumerate(shape): + if shards_map[i] > 1 and (dim_size % shards_map[i] != 0): + return False + + return True + + +def is_tensor_dim_sharded(spec: DTensorSpec, dim: int) -> bool: + """Return True if tensor dim is sharded.""" + return any(p.is_shard(dim) for p in spec.placements) + + +def is_tensor_partial(spec: DTensorSpec) -> bool: + """Return True if tensor is partial on the mesh.""" + return any(p.is_partial() for p in spec.placements) + + +def infer_broadcast_dims_map( + common_shape: torch.Size, input_shape: torch.Size +) -> List[int]: + # infer the broadcast dims map, where it maps from the common shape dim to the input shape dim + # this is aligned with the broadcast semantics + common_ndim = len(common_shape) + input_ndim = len(input_shape) + broadcast_dims_map = [-1] * common_ndim + for idx in range(-1, -1 - input_ndim, -1): + if input_shape[idx] == common_shape[idx]: + broadcast_dims_map[common_ndim + idx] = input_ndim + idx + return broadcast_dims_map + + +def map_placements_after_broadcast( + placements: Tuple[Placement, ...], + shape: torch.Size, + broadcast_dims_map: List[int], +) -> Tuple[Placement, ...]: + """Map each placement based on the output shape after broadcast.""" + new_placements: List[Placement] = [] + for placement in placements: + if isinstance(placement, (Replicate, _Partial)): + new_placements.append(placement) + else: + assert isinstance(placement, Shard) + shard_dim = normalize_dim(placement.dim, len(shape)) + new_shard_dim = broadcast_dims_map[shard_dim] + if new_shard_dim != -1: + # there's a map from the common shape shard dim to + # the input shape shard dim before broadcasting, + # use that instead + new_placements.append(Shard(new_shard_dim)) + else: + # there's no map between common shape shard dim and + # the input shape shard dim before broadcasting, + # in this case it means implicit broadcasting happen + # in this dim, so we can just mark it as replicate + # and implict broadcast will broadcast automatically + # to the sharded shape + new_placements.append(Replicate()) + + return tuple(new_placements) + + +def generate_redistribute_costs( + src_strategy: OpStrategy, dst_spec: DTensorSpec +) -> List[float]: + redistribute_costs: List[float] = [] + for strat in src_strategy.strategies: + redistribute_costs.append(redistribute_cost(strat.output_spec, dst_spec)) + + return redistribute_costs diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/view_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/view_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..25fb92f6fcebfbed101150875f2cfad6e85c7079 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/ops/view_ops.py @@ -0,0 +1,717 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from dataclasses import dataclass +from typing import Callable, cast, Dict, Iterable, Optional, Sequence, Set, Tuple, Union + +import torch + +from torch import Tensor +from torch._subclasses.fake_tensor import unset_fake_temporarily +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.api import Shard +from torch.distributed._tensor.op_schema import ( + OpSchema, + OutputSharding, + RuntimeSchemaInfo, +) +from torch.distributed._tensor.ops.utils import ( + normalize_dim, + normalize_dims, + prod, + register_prop_rule, +) + +from torch.distributed._tensor.placement_types import DTensorSpec, Placement, Replicate +from torch.fx.experimental.proxy_tensor import disable_proxy_modes_tracing + +aten = torch.ops.aten + +Shape = Tuple[int, ...] + + +@dataclass +class DimSpec: + """Specifies how an output dimension maps to an input dimension.""" + + def inputs(self) -> Iterable["DimSpec"]: + return () + + +# Rules that map each dimension of the output to dimensions of the input tensor +DimMap = Tuple[DimSpec, ...] + + +@dataclass +class Singleton(DimSpec): + """Output dimension is a singleton.""" + + pass + + +@dataclass +class InputDim(DimSpec): + """Output dimension maps directly to an input dimension.""" + + input_dim: int + + +@dataclass +class Broadcast(DimSpec): + """Output is the broadcast of a singleton input dimension.""" + + dim: DimSpec + dim_size: int + + @classmethod + def new(cls, dim: DimSpec, dim_size: int) -> DimSpec: + return Broadcast(dim, dim_size) + + def inputs(self) -> Iterable[DimSpec]: + return (self.dim,) + + +@dataclass +class NewDim(DimSpec): + """This is a new dimension created by the op.""" + + size: int + + @classmethod + def new(cls, size: int) -> DimSpec: + return Singleton() if size == 1 else NewDim(size) + + +@dataclass +class Repeat(DimSpec): + """Output dimension is the input dimension repeated n-times.""" + + input_dim: DimSpec + times: int + + @classmethod + def new(cls, dim: DimSpec, times: int) -> DimSpec: + if times == 1: + return dim + elif isinstance(dim, Singleton): + # repeating a singleton is the same as broadcasting it + return Broadcast(dim, times) + else: + return Repeat(dim, times) + + def inputs(self) -> Iterable[DimSpec]: + return (self.input_dim,) + + +@dataclass +class Flatten(DimSpec): + """Flatten a set of input dimensions, ensuring right-most adjacent elements remain adjacent in the output.""" + + input_dims: Sequence[DimSpec] + + @classmethod + def new(cls, dims: Sequence[DimSpec]) -> DimSpec: + if len(dims) == 0: + # flattening a scalar leads to a singleton + return Singleton() + elif len(dims) == 1: + # flattening a single dimension is no-op + return dims[0] + else: + return Flatten(dims) + + def inputs(self) -> Iterable[DimSpec]: + return self.input_dims + + +@dataclass +class Split(DimSpec): + """ + This dimension is a member of a decomposition of the input dim. + + Note that input_dim itself could be a Flattened set of input dims. + """ + + input_dim: DimSpec + group_shape: Shape + split_id: int + + @classmethod + def new(cls, dim: DimSpec, group_shape: Tuple[int, ...], idx: int) -> DimSpec: + assert len(group_shape) > 0 + if len(group_shape) == 1: + # not really a group, just return the input dim back + assert idx == 0 + return dim + elif group_shape[idx] == 1: + return Singleton() + else: + # remove singletons from group + # group_mapping = [(new_index, (shape, old_index)) ...] + group_mapping = list( + enumerate((s, i) for i, s in enumerate(group_shape) if s != 1) + ) + new_group_shape = tuple(m[1][0] for m in group_mapping) + new_idx = next(filter(lambda x: x[1][1] == idx, group_mapping))[0] + return Split(dim, new_group_shape, new_idx) + + def inputs(self) -> Iterable[DimSpec]: + return (self.input_dim,) + + +def dim_pad_left(ndim: int, min_dims: int) -> DimMap: + return (Singleton(),) * max(0, min_dims - ndim) + tuple( + InputDim(i) for i in range(ndim) + ) + + +def dim_atleast_3d(ndim: int) -> DimMap: + if ndim == 0: + return (Singleton(), Singleton(), Singleton()) + elif ndim == 1: + return (Singleton(), InputDim(0), Singleton()) + elif ndim == 2: + return (InputDim(0), InputDim(1), Singleton()) + else: + return tuple(InputDim(i) for i in range(ndim)) + + +def expand(input_shape: Shape, shape: Shape) -> DimMap: + """Implement broadcast on multiple dimensions.""" + assert len(shape) >= len(input_shape) + + # 1. create padded input dimensions + padded_input = dim_pad_left(len(input_shape), len(shape)) + # 2. check that input shapes are compatible + mapping = [] + for p, desired_s in zip(padded_input, shape): + if isinstance(p, Singleton): + actual_s = 1 + assert desired_s >= 0 + else: + assert isinstance(p, InputDim), f"DimSpec not supported in expand: {p}" + actual_s = input_shape[p.input_dim] + assert actual_s == 1 or desired_s == -1 or desired_s == actual_s + mapping.append( + p + if desired_s in (1, -1) or desired_s == actual_s + else Broadcast.new(p, desired_s) + ) + return tuple(mapping) + + +def normalize_sizes(sizes: Union[Shape, Tuple[Shape]]) -> Shape: + if isinstance(sizes[0], int): + return cast(Shape, sizes) + elif len(sizes) == 1: + return cast(Shape, sizes[0]) # type: ignore[redundant-cast] + else: + raise RuntimeError("Size must be int... or tuple") + + +def dim_flatten(ndim: int) -> DimMap: + if ndim == 0: + return (Singleton(),) + elif ndim == 1: + return (InputDim(0),) + else: + return (Flatten.new(tuple(InputDim(i) for i in range(ndim))),) + + +def dim_movedim( + ndim: int, + input: Union[int, Sequence[int]], + destination: Union[int, Sequence[int]], +) -> DimMap: + input = normalize_dims(input, ndim) + destination = normalize_dims(destination, ndim) + + assert len(input) == len(destination) + input_set = set(input) + assert len(input_set) == len(input), "Found repeated input dims" + assert len(set(destination)) == len(destination), "Found repeated output dims" + assert max(input) < ndim + assert max(destination) < ndim + + dest = [-1] * ndim + for i, d in zip(input, destination): + dest[d] = i + + unused_inputs_iter = iter(i for i in range(ndim) if i not in input_set) + for i in range(ndim): + if dest[i] == -1: + dest[i] = next(unused_inputs_iter) + + return tuple(InputDim(i) for i in dest) + + +def dim_repeat(ndim: int, sizes: Shape) -> DimMap: + sizes = normalize_sizes(sizes) + assert ( + len(sizes) >= ndim + ), f"Number of dimensions of repeat dims {sizes} can not be smaller than number of dimensions of tensor {ndim}." + pad = len(sizes) - ndim + return tuple(Repeat.new(Singleton(), s) for s in sizes[:pad]) + tuple( + Repeat.new(InputDim(i), s) for i, s in enumerate(sizes[pad:]) + ) + + +def infer_size(total_size: int, sizes: Shape) -> Shape: + """ + One dimension input to view may be "-1". + + Infer the size of this dimension given the total_size. + """ + infers = [i for i, s in enumerate(sizes) if s == -1] + size = prod(sizes) + assert len(infers) <= 1, "can only infer one size" + if infers: + size = -size + missing_size = total_size // size + assert ( + total_size % size == 0 + ), f"size inferred for -1 is not integral {sizes} should have {total_size} elements." + return tuple(s if s != -1 else missing_size for s in sizes) + assert size == total_size, f"sizes do not match {total_size} vs {size}" + return sizes + + +def view_groups(from_size: Shape, to_size: Shape) -> DimMap: + """ + Decompose a reshape operation into forwarding, flattening, or splitting dimensions for each output dimension. + + A view or reshape operation can be decomposed into a set of 3 types of smaller operations: + 1) Forward a dimension from input to output + 2) Flatten a set of dimensions into a single dimension + 3) Split one dimension into multiple dimensions + + view_groups identifies these operations and returns, for each output dimension, what + is operation was performed in the input dimension. For example: + + view_groups([2, 3, 4], [2, 12]) -> ( + InputDim(0), + Flatten((InputDim(1), InputDim(2))) + ) + + - ouptut dimension 0 maps to input dimension 0 + - output dimension 1 maps to a flattened input dimensions 1 and 2 + + + view_groups([2, 3], [3, 2]) -> ( + Split(Flatten((InputDim(0), InputDim(1))), (3, 2), 0), + Split(Flatten((InputDim(0), InputDim(1))), (3, 2), 1), + ) + + - in the above, input is flattened into a single dimension and then split + into two separate dimensions with different sizes from the input. + """ + from_nelem = prod(from_size) + to_size = infer_size(from_nelem, normalize_sizes(to_size)) + + assert from_nelem == prod(to_size), "Total view shape does not add up" + + from_idx = 0 + to_idx = 0 + from_len = len(from_size) + to_len = len(to_size) + + result_pp = [] + + while from_idx < from_len or to_idx < to_len: + from_group_dim, to_group_shape = [], [] + + if from_idx >= from_len: + f = 1 + else: + f = from_size[from_idx] + from_group_dim.append(from_idx) + from_idx += 1 + + if to_idx >= to_len: + t = 1 + else: + t = to_size[to_idx] + to_group_shape.append(t) + to_idx += 1 + + # if any of the groups is singleton, great, we need to backtrack though + if f == 1 and t != 1: + # produces ([1], []) + to_idx -= 1 + to_group_shape = [] + elif f != 1 and t == 1: + # produces ([], [1]) + from_idx -= 1 + from_group_dim = [] + else: + # produces ([1], [1]), ([2], [2]), ([2,3], [6]) + while f != t: + if f < t: + nf = from_size[from_idx] + from_group_dim.append(from_idx) + from_idx += 1 + f *= nf + else: + nt = to_size[to_idx] + to_group_shape.append(nt) + to_idx += 1 + t *= nt + + if len(to_group_shape) > 0: + flattened = Flatten.new( + tuple(InputDim(fi) for fi in from_group_dim if from_size[fi] > 1) + ) + result_pp += [ + Split.new(flattened, tuple(to_group_shape), i) + for i in range(len(to_group_shape)) + ] + + return tuple(result_pp) + + +def dim_tile(ndim: int, dims: Tuple[int, ...]) -> DimMap: + if len(dims) < ndim: + dims = (1,) * (ndim - len(dims)) + dims + return dim_repeat(ndim, dims) + + +def dim_transpose(ndim: int, dim1: int, dim2: int) -> DimMap: + dim1 = normalize_dim(dim1, ndim) + dim2 = normalize_dim(dim2, ndim) + assert dim1 < ndim + assert dim2 < ndim + dimmap = [InputDim(i) for i in range(ndim)] + swapdim = dimmap[dim1] + dimmap[dim1] = dimmap[dim2] + dimmap[dim2] = swapdim + return tuple(dimmap) + + +def dim_squeeze(shape: Shape, dim: Optional[int] = None) -> DimMap: + # FIXME: this is wrong when dim=None and one of the dimensions + # equals size of the mesh. For example squeeze(DTensor(tensor(4), Shard[0])) could + # end up as squeeze(tensor(1)) if we have 4 devices; this would lead to + # removal of a dimension that is not actually a singleton. + return tuple( + InputDim(i) + for i, s in enumerate(shape) + if s > 1 or (dim is not None and i != normalize_dim(dim, len(shape))) + ) + + +def dim_unsqueeze(ndim: int, dim: int) -> DimMap: + dims = tuple(InputDim(i) for i in range(ndim)) + if dim < 0: + dim += ndim + 1 + return dims[:dim] + (Singleton(),) + dims[dim:] + + +def dim_reduction( + ndim: int, dim_or_dims: Optional[Union[int, Sequence[int]]], keepdim: bool +) -> DimMap: + """ + General fallback for reduction ops where _Partial() does not apply. + + This will cause incoming tensor to be replicated on the reducing dimensions. + """ + if dim_or_dims is None: + dim_or_dims = tuple(range(ndim)) + if isinstance(dim_or_dims, int): + dim_or_dims = (dim_or_dims,) + dim_or_dims = tuple(d if d >= 0 else d + ndim for d in dim_or_dims) + return tuple( + InputDim(i) if i not in dim_or_dims else Singleton() + for i in range(ndim) + if i not in dim_or_dims or keepdim + ) + + +@dataclass +class Op: + dim_map: Callable[..., DimMap] + shape_argnum: Optional[int] = None + + +ops: Dict[Callable[..., torch.Tensor], Op] = { + torch.atleast_1d: Op(dim_map=lambda x: dim_pad_left(x.ndim, 1)), + torch.atleast_2d: Op(dim_map=lambda x: dim_pad_left(x.ndim, 2)), + torch.atleast_3d: Op(dim_map=lambda x: dim_atleast_3d(x.ndim)), + torch.broadcast_to: Op( + dim_map=lambda input, shape: expand(input.shape, shape), shape_argnum=1 + ), + Tensor.expand: Op( + dim_map=lambda self, *sizes: expand(self.shape, normalize_sizes(sizes)), + shape_argnum=1, + ), + torch.flatten: Op(dim_map=lambda tensor: dim_flatten(tensor.ndim)), + torch.movedim: Op( + dim_map=lambda input, source, destination: dim_movedim( + input.ndim, source, destination + ) + ), + torch.permute: Op( + dim_map=lambda input, dims: tuple( + InputDim(i) for i in normalize_dims(dims, input.ndim) + ) + ), + torch.ravel: Op(dim_map=lambda tensor: dim_flatten(tensor.ndim)), + Tensor.repeat: Op(dim_map=lambda self, *sizes: dim_repeat(self.ndim, sizes)), + torch.reshape: Op( + dim_map=lambda input, shape: view_groups(input.shape, shape), + shape_argnum=1, + ), + torch.squeeze: Op(dim_map=lambda input, dim=None: dim_squeeze(input.shape, dim)), + torch.tile: Op(dim_map=lambda input, dims: dim_tile(input.ndim, dims)), + torch.transpose: Op( + dim_map=lambda input, dim0, dim1: dim_transpose(input.ndim, dim0, dim1) + ), + torch.unsqueeze: Op(dim_map=lambda input, dim: dim_unsqueeze(input.ndim, dim)), + Tensor.view: Op( + dim_map=lambda input, *shape: view_groups(input.shape, shape), + shape_argnum=1, + ), +} + + +def propagate_shape_and_sharding( + in_shard: Sequence[Placement], + local_in_shape: Shape, + rule: DimMap, + mesh_sizes: Shape, +) -> Tuple[Shape, Optional[Sequence[Placement]], torch.Tensor]: + """ + Determine output sharding and tensor shape based on given global tensor shape and input sharding. + + Takes as input the global shape of the tensor, and the input sharding, + and produce corresponding output sharding and shape of the output tensor. + + Sharding propagation follows mapped dimensions: + - An output dimension that maps directly to an input dimension is sharded equally + - An output dimension that is a flattened set of input dimensions can only be + sharded if only the leftmost flattened dimension is sharded. + - An output dimension that is a split of the input dimension can only be sharded + if the leftmost split size is divisible by the mesh dimension + """ + assert len(in_shard) == len(mesh_sizes) + sharded_in_dims: Set[int] = {s.dim for s in in_shard if isinstance(s, Shard)} + # for each input dim, for each mesh dim, provides a list of possible shardable dimensions + shardable_dims: torch.Tensor = torch.ones( + (len(local_in_shape), len(mesh_sizes)), dtype=torch.bool + ) + + # in case an input dimension disappears (e.g. collapsing, reduction) + # we cannot shard in that dimension (we need a replication fall-back rule) + + seen_input_dims: Set[int] = set() + + def collect_used_inputs(cmd: DimSpec) -> None: + if isinstance(cmd, InputDim): + seen_input_dims.add(cmd.input_dim) + for inp in cmd.inputs(): + collect_used_inputs(inp) + + for cmd in rule: + collect_used_inputs(cmd) + for dim in range(len(local_in_shape)): + shardable_dims[dim, :] = dim in seen_input_dims + + def get_dim_size(cmd: DimSpec) -> Tuple[int, Optional[InputDim]]: + if isinstance(cmd, InputDim): + seen_input_dims.add(cmd.input_dim) + return ( + local_in_shape[cmd.input_dim], + cmd if cmd.input_dim in sharded_in_dims else None, + ) + elif isinstance(cmd, Flatten): + for dim in cmd.input_dims[1:]: + if isinstance(dim, InputDim): + shardable_dims[dim.input_dim, :] = False + dim0 = cmd.input_dims[0] + return ( + prod(get_dim_size(a)[0] for a in cmd.input_dims), + dim0 + if isinstance(dim0, InputDim) and dim0.input_dim in sharded_in_dims + else None, + ) + elif isinstance(cmd, Split): + _, in_dim = get_dim_size(cmd.input_dim) + out_size = cmd.group_shape[cmd.split_id] + if cmd.split_id == 0 and in_dim is not None: + # we need to check that the input dimension is divisible + # by the size of the submesh we're sharding it on + # NOTE: it would be possible to shard the same input dimension + # on more than one mesh dimension. In that case, the dimension + # needs to be divisible by the product of mesh sizes. + # In order to keep the problem more tractable, we will not consider + # double resharding as a suggestion (e.g. [Shard(0), Shard(0) ]) + # but we will allow it if that's the input and it's compatible + + # 1. is this dimension shardable on each individual mesh dim? + for mesh_dim, mesh_dim_size in enumerate(mesh_sizes): + shardable_dims[in_dim.input_dim, mesh_dim] = ( + out_size % mesh_dim_size == 0 + ) + + # 2. here we special case things like [Shard(0), Shard(0)] + submesh_size = 1 + for size, shard in zip(mesh_sizes, in_shard): + if isinstance(shard, Shard) and shard.dim == in_dim: + submesh_size *= size + assert ( + out_size % submesh_size == 0 + ), f"Resulting dimension size {out_size} is not divisible by its mesh dimension {submesh_size}." + + # we will only shard our first component of the split + return out_size, in_dim if cmd.split_id == 0 else None + elif isinstance(cmd, Singleton): + return 1, None + elif isinstance(cmd, Broadcast): + return cmd.dim_size, None + elif isinstance(cmd, NewDim): + return cmd.size, None + elif isinstance(cmd, Repeat): + size, in_dim = get_dim_size(cmd.input_dim) + if in_dim is not None: + shardable_dims[in_dim.input_dim, :] = False + return size * cmd.times, None + else: + raise RuntimeError(f"cmd not found: {cmd}, in rule: {rule}") + + dim_map = {} + out_shape = [] + for dim, cmd in enumerate(rule): + out_size, in_dim = get_dim_size(cmd) + out_shape.append(out_size) + if in_dim is not None: + dim_map[in_dim.input_dim] = dim + + needs_reshard = any( + isinstance(placement, Shard) and not shardable_dims[placement.dim][mesh_dim] + for mesh_dim, placement in enumerate(in_shard) + ) + + output_placements = ( + None + if needs_reshard + else [Shard(dim_map[s.dim]) if isinstance(s, Shard) else s for s in in_shard] + ) + + return (tuple(out_shape), output_placements, shardable_dims) + + +def register_prop_rule_map( + aten_op_overload: torch._ops.OpOverload, + local_op_name: Callable[..., torch.Tensor], + schema_info: Optional[RuntimeSchemaInfo] = None, +) -> None: + spec: Op = ops[local_op_name] + + @register_prop_rule(aten_op_overload, schema_info=schema_info) + def reshape_prop(op_schema: OpSchema) -> OutputSharding: + rules = spec.dim_map(*op_schema.args_schema, **op_schema.kwargs_schema) + input_dtensor_spec = cast(DTensorSpec, op_schema.args_schema[0]) + mesh = input_dtensor_spec.mesh + + assert isinstance( + input_dtensor_spec, DTensorSpec + ), "Expected first input to be a DTensorSpec" + global_in_shape = input_dtensor_spec.shape + assert global_in_shape is not None, "Shape required." + + with disable_proxy_modes_tracing(), unset_fake_temporarily(): + ( + global_out_shape, + shard_out, + shardable_dims, + ) = propagate_shape_and_sharding( + input_dtensor_spec.placements, + tuple(global_in_shape), + rules, + mesh.shape, + ) + + if shard_out is not None: + # no reshard needed + output_dtensor_spec = DTensorSpec(mesh=mesh, placements=tuple(shard_out)) + + # We only need the local shape to lower the call into the local op + args = op_schema.args_schema + shape_argnum = spec.shape_argnum + if shape_argnum is not None: + # compute the local shape from the global shape, then return + # a resharding even if we don't really reshard, the only reason + # for this type of resharding is to lower the global shape to + # local shape + local_out_shape = compute_local_shape( + list(global_out_shape), mesh, shard_out + ) + + suggested_schema = OpSchema( + op=op_schema.op, + args_schema=args[:shape_argnum] + + (tuple(local_out_shape),) + + args[shape_argnum + 1 :], + kwargs_schema=op_schema.kwargs_schema, + ) + return OutputSharding( + output_spec=output_dtensor_spec, + schema_suggestions=[suggested_schema], + needs_redistribute=True, + ) + + return OutputSharding(output_spec=output_dtensor_spec) + + else: + # TODO: optimize this. we shouldn't simply blindly replicate + # unshardable dims ... + # FIXME: this can be wrong for situations where we have + # [Shard(0), Shard(0)] + suggested_placements = [ + p + if not isinstance(p, Shard) or shardable_dims[p.dim][mesh_dim] + else Replicate() + for mesh_dim, p in enumerate(input_dtensor_spec.placements) + ] + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + DTensorSpec( + placements=tuple(suggested_placements), + mesh=input_dtensor_spec.mesh, + tensor_meta=input_dtensor_spec.tensor_meta, + ), + ) + + op_schema.args_schema[1:], + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + + +register_prop_rule_map(aten.squeeze.default, torch.squeeze) +register_prop_rule_map( + aten.squeeze.dim, torch.squeeze, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map(aten.view.default, Tensor.view, schema_info=RuntimeSchemaInfo(1)) +register_prop_rule_map( + aten.reshape.default, torch.reshape, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten._unsafe_view.default, Tensor.view, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.unsqueeze.default, torch.unsqueeze, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.expand.default, Tensor.expand, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.permute.default, torch.permute, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.repeat.default, Tensor.repeat, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.transpose.int, torch.transpose, schema_info=RuntimeSchemaInfo(1) +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ed1b42cbe1582f3b974ccbb1befc90637ba18e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/nn/__init__.py @@ -0,0 +1,4 @@ +import torch +if torch.distributed.rpc.is_available(): + from .api.remote_module import RemoteModule +from .functional import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a043094bce4789715eb49d9923154df14c9ec5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/functional.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aed999516cd3d94ca2f3ed58503aaac63fe863f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/__pycache__/functional.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a66ed0fba6e1f145fa973ce4be1aaf4da70418d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/remote_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/remote_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..147281abccc450813f9daf619d7429690ead9119 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/remote_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/api/remote_module.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/remote_module.py new file mode 100644 index 0000000000000000000000000000000000000000..16e38b32712d70eab876d82c168b56b7a223914a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/nn/api/remote_module.py @@ -0,0 +1,760 @@ +#!/usr/bin/python3 +import collections +import io +import sys +import types +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +import torch +import torch.distributed.rpc as rpc +from torch import Tensor, device, dtype, nn +from torch.distributed.nn.jit import instantiator +from torch.distributed import _remote_device +from torch.distributed.rpc.internal import _internal_rpc_pickler +from torch.nn import Module +from torch.nn.parameter import Parameter +from torch.utils.hooks import RemovableHandle + +__all__ = ["RemoteModule"] + +_grad_t = Union[Tuple[Tensor, ...], Tensor] +# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use +# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be +# the type of the subclass, not the looser type of `Module`. +T = TypeVar("T", bound="Module") + +_NON_SCRIPTABLE_REMOTE_MODULE_MODULE = ( + instantiator.instantiate_non_scriptable_remote_module_template() +) + +_REMOTE_MODULE_PICKLED_ATTRIBUTES = ( + "on", + "device", + "is_device_map_set", + "is_scriptable", + "generated_methods", + "module_rref", +) + +_SerializedRemoteModule = collections.namedtuple("_SerializedRemoteModule", _REMOTE_MODULE_PICKLED_ATTRIBUTES) # type: ignore[misc] + +# These attributes are mostly from RemoteModule's parent class and are intentionally not pickled. +# A new attribute of RemoteModule should be either in _REMOTE_MODULE_PICKLED_ATTRIBUTES +# or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. +# Otherwise, it will not be pickled. +_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING = ( + "training", + "_parameters", + "_buffers", + "_non_persistent_buffers_set", + "_backward_hooks", + "_backward_pre_hooks", + "_is_full_backward_hook", + "_forward_hooks", + "_forward_hooks_with_kwargs", + "_forward_hooks_always_called", + "_forward_pre_hooks", + "_forward_pre_hooks_with_kwargs", + "_state_dict_hooks", + "_state_dict_pre_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", + "_state_dict_pre_hooks", + "_modules", + # The two attributes below are generated methods, not available at pickling time. + "forward_async", + "forward", +) + + +# RPC handler. +def _instantiate_template(module_interface_cls, enable_moving_cpu_tensors_to_cuda): + instantiator.instantiate_scriptable_remote_module_template( + module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + + +def _create_module(module_cls, args, kwargs, device): + module = module_cls(*args, **kwargs) + if not isinstance(module, nn.Module): + raise ValueError( + "Expect `module_cls(*args, **kwargs)` returns an instance of , " + f"but it returns an instance of {type(module)}." + ) + module.to(device) + return module + + +def _create_module_with_interface( + module_cls, args, kwargs, device, module_interface_cls +): + module = _create_module(module_cls, args, kwargs, device) + if module_interface_cls is not None: + module = torch.jit.script(module) + return rpc.RRef(module, module_interface_cls) + + +def _param_rrefs(module_rref, recurse) -> List[rpc.RRef[Parameter]]: + ret: List[rpc.RRef[Parameter]] = [] + for param in module_rref.local_value().parameters(recurse): + ret.append(rpc.RRef(param)) + return ret + + +def _raise_not_supported(name: str) -> None: + raise ValueError(f"Method ``{name}`` not supported for RemoteModule") + + +class _RemoteModule(nn.Module): + + def __new__(cls, *args, **kwargs): + # Use __new__ for logging purposes. + torch._C._log_api_usage_once("torch.distributed.nn.api.remote_module") + return super().__new__(cls) + + def __init__( + self, + remote_device: str, + module_cls: Type[nn.Module], + args: Optional[Tuple] = None, + kwargs: Optional[Dict[str, Any]] = None, + _module_interface_cls: Any = None, + ): + """ + RemoteModule instance can only be created after RPC initialization. + + It creates a user-specified module on a specified remote node. + It behaves like a regular ``nn.Module`` except that the ``forward`` method is + executed on the remote node. + It takes care of autograd recording to ensure the backward pass propagates + gradients back to the corresponding remote module. + It can be shared across processors using `RPC framework `__, + without incurring any overheads of copying the actual module, + which is equivalent to an :class:`~torch.distributed.rpc.RRef` + pointing to the remote module. + + The arguments of ``forward_async`` and ``forward`` are the same as + the ``forward`` method of the module returned by the ``module_cls``. + + Apart from ``forward_async`` and ``forward``, no other methods are supported from nn.Module for now. + + Particularly, to create a hybrid model, typically the local modules should be + created outside of remote modules, rather than as submodules of any remote module (by calling ``add_module``). + Hybrid Example: + >>> class HybridModel(nn.Module): + >>> def __init__(self): + >>> nn.Module.__init__(self) + >>> self.remote_embedding = RemoteModule(...) + >>> self.local_linear = nn.Linear(...) + + For example, if ``module_cls`` returns an instance of ``nn.Linear``, + that has ``forward`` method signature, ``def forward(input: Tensor) -> Tensor:``, + the generated ``RemoteModule`` will have 2 methods in signature of + ``def forward(input: Tensor) -> Tensor:`` and + ``def forward_async(input: Tensor) -> Future[Tensor]:``. + + .. note:: + If the remote module is placed on a cuda device, + any input CPU tensors will be automatically moved to the same cuda device, + and GPU tensors are returned over the wire according to the device map of the remote worker on TensorPipe RPC backend. + + Args: + remote_device (str): Device on the destination worker where we'd like to place this module. + The device can be a local device or a remote device specified by one of the following remote + formats: + + 1. "rank:/" (ex: "rank:0/cuda:0"). + 2. "/" (ex: "trainer0/cuda:0"). + + In addition, the device field can be optional and the default value is "cpu". + module_cls (nn.Module): For example, + >>> class MyModule(nn.Module): + >>> def forward(input): + >>> return input + 1 + >>> + >>> module_cls = MyModule + args (Sequence, optional): args to be passed to ``module_cls``. + kwargs (Dict, optional): kwargs to be passed to ``module_cls``. + _module_interface_cls (type, optional): The TorchScript interface type for the module + to be created. The type object should be decorated by @torch.jit.interface. + If not provided, the generated RemoteModule is not torchscript-able. + Warning, this is an experimental API and susceptible to frequent changes. + + Returns: + A remote module instance which wraps the :class:`~nn.Module` created by the + user-provided ``module_cls``, it has a blocking ``forward`` method and an + asynchronous ``forward_async`` method that returns a future of the ``forward`` call + on the user-provided module on the remote side. + + Example:: + Run the following code in two different processes: + + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> from torch import nn, Tensor + >>> from torch.distributed.nn.api.remote_module import RemoteModule + >>> + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> remote_linear_module = RemoteModule( + >>> "worker1/cpu", nn.Linear, args=(20, 30), + >>> ) + >>> input = torch.randn(128, 20) + >>> ret_fut = remote_linear_module.forward_async(input) + >>> ret = ret_fut.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + """ + super().__init__() + + enable_moving_cpu_tensors_to_cuda = self._prepare_init(remote_device) + + # Default arguments preparation. + args = args if args is not None else () + kwargs = kwargs if kwargs is not None else {} + + if _module_interface_cls is not None: + # Users reply on this field to know if this generated RemoteModule is TorchScript-able. + self.is_scriptable = True + + # Instantiate template on remote side. + fut = rpc.rpc_async( + self.on, + _instantiate_template, + (_module_interface_cls, enable_moving_cpu_tensors_to_cuda), + ) + + self._init_template( + _module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + + # Instantiate template on remote side. + fut = rpc.rpc_async( + self.on, + _instantiate_template, + (_module_interface_cls, enable_moving_cpu_tensors_to_cuda), + ) + + # Create the module on the remote side. + fut.wait() # Ensure remote_module_cls is available on remote side. + + # TODO: We need to change this to rpc.remote, and make it async (see the else branch below). + # For that we need to be able to apply _module_interface_cls to the RRef returned by rpc.remote + # See https://github.com/pytorch/pytorch/issues/58098 for more context. + self.module_rref = rpc.rpc_sync( + self.on, + _create_module_with_interface, + (module_cls, args, kwargs, self.device, _module_interface_cls), + ) + else: + self.is_scriptable = False + self.generated_methods = ( + _NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods + ) + # Create the module on the remote side. + self.module_rref = rpc.remote( + self.on, + _create_module, + (module_cls, args, kwargs, self.device), + ) + + self._install_generated_methods() + self._check_attribute_picklability() + + def remote_parameters(self, recurse: bool = True) -> List[rpc.RRef[Parameter]]: + """ + Return a list of :class:`~torch.distributed.rpc.RRef` pointing to the remote module's parameters. + + This can typically be used in conjunction + with :class:`~torch.distributed.optim.DistributedOptimizer`. + + Args: + recurse (bool): if True, then returns parameters of the remote + module and all submodules of the remote module. Otherwise, + returns only parameters that are direct members of the + remote module. + + Returns: + A list of :class:`~torch.distributed.rpc.RRef` (``List[RRef[nn.Parameter]]``) + to remote module's parameters. + """ + return rpc.rpc_sync(self.on, _param_rrefs, args=(self.module_rref, recurse)) + + def get_module_rref(self) -> rpc.RRef[nn.Module]: + """Return an :class:`~torch.distributed.rpc.RRef` (``RRef[nn.Module]``) pointing to the remote module.""" + return self.module_rref + + @torch.jit.export + def __getstate__(self): + raise RuntimeError( + "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC" + ) + + @torch.jit.export + def __setstate__(self, state): + raise RuntimeError( + "Cannot unpickle RemoteModule in python pickler. RemoteModule can only be unpickled when using RPC" + ) + + def register_buffer( + self, name: str, tensor: Optional[Tensor], persistent: bool = True + ) -> None: + _raise_not_supported(self.register_buffer.__name__) + + def register_parameter(self, name: str, param: Optional[Parameter]) -> None: + _raise_not_supported(self.register_parameter.__name__) + + def add_module(self, name: str, module: Optional[Module]) -> None: + _raise_not_supported(self.add_module.__name__) + + def apply(self: T, fn: Callable[[Module], None]) -> T: # type: ignore[return] + _raise_not_supported(self.apply.__name__) + + def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return] + _raise_not_supported(self.cuda.__name__) + + def ipu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return] + _raise_not_supported(self.ipu.__name__) + + def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return] + _raise_not_supported(self.xpu.__name__) + + def cpu(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.cpu.__name__) + + def type(self: T, dst_type: Union[dtype, str]) -> T: # type: ignore[return] + _raise_not_supported(self.type.__name__) + + def float(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.float.__name__) + + def double(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.double.__name__) + + def half(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.half.__name__) + + def bfloat16(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.bfloat16.__name__) + + def to(self, *args, **kwargs) -> T: # type: ignore[misc, return, type-var] + _raise_not_supported(self.to.__name__) + + def register_backward_hook( # type: ignore[return] + self, hook: Callable[[Module, _grad_t, _grad_t], Union[None, _grad_t]] + ) -> RemovableHandle: + _raise_not_supported(self.register_backward_hook.__name__) + + def register_forward_pre_hook( # type: ignore[return] + self, + hook: Union[ + Callable[[T, Tuple[Any, ...]], Optional[Any]], + Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]], + ], + prepend: bool = False, + with_kwargs: bool = False, + ) -> RemovableHandle: + _raise_not_supported(self.register_forward_pre_hook.__name__) + + def register_forward_hook( # type: ignore[return, override] + self, + hook: Union[ + Callable[[T, Tuple[Any, ...], Any], Optional[Any]], + Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]], + ], + prepend: bool = False, + with_kwargs: bool = False, + ) -> RemovableHandle: + _raise_not_supported(self.register_forward_hook.__name__) + + def state_dict(self, *args, **kwargs): + _raise_not_supported(self.state_dict.__name__) + + def load_state_dict( + self, + state_dict: Mapping[str, Any], + strict: bool = True, + assign: bool = False, + ): + _raise_not_supported(self.load_state_dict.__name__) + + def parameters(self, recurse: bool = True) -> Iterator[Parameter]: + raise ValueError( + "Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead." + ) + + def named_parameters( # type: ignore[return] + self, + prefix: str = "", + recurse: bool = True, + remove_duplicate: bool = True + ) -> Iterator[Tuple[str, Parameter]]: + _raise_not_supported(self.named_parameters.__name__) + + def buffers(self, recurse: bool = True) -> Iterator[Tensor]: # type: ignore[return] + _raise_not_supported(self.buffers.__name__) + + def named_buffers( # type: ignore[return] + self, + prefix: str = "", + recurse: bool = True, + remove_duplicate: bool = True + ) -> Iterator[Tuple[str, Tensor]]: + _raise_not_supported(self.named_buffers.__name__) + + def children(self) -> Iterator[Module]: # type: ignore[return] + _raise_not_supported(self.children.__name__) + + def named_children(self) -> Iterator[Tuple[str, Module]]: # type: ignore[return] + _raise_not_supported(self.named_children.__name__) + + def modules(self) -> Iterator[Module]: # type: ignore[return] + _raise_not_supported(self.modules.__name__) + + def named_modules( + self, + memo: Optional[Set[Module]] = None, + prefix: str = "", + remove_duplicate: bool = True, + ): + _raise_not_supported(self.named_modules.__name__) + + def train(self: T, mode: bool = True) -> T: + return self.module_rref.rpc_sync().train() # type: ignore[operator, union-attr] + + def eval(self: T) -> T: + return self.module_rref.rpc_sync().eval() # type: ignore[operator, union-attr] + + def requires_grad_(self: T, requires_grad: bool = True) -> T: # type: ignore[return] + _raise_not_supported(self.requires_grad_.__name__) + + def zero_grad(self, set_to_none: bool = True) -> None: + _raise_not_supported(self.zero_grad.__name__) + + def share_memory(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.share_memory.__name__) + + def extra_repr(self) -> str: # type: ignore[return] + _raise_not_supported(self.extra_repr.__name__) + + def _prepare_init(self, remote_device_str: str) -> bool: + """Prepare the initialization and returns whether to enable automatically moving CPU tensors to CUDA devices.""" + # Sanity check. + assert rpc._is_current_rpc_agent_set(), "RemoteModule only works in RPC." + + remote_device = _remote_device(remote_device_str) + self.on = remote_device.worker_name() if remote_device.worker_name() is not None else remote_device.rank() + self.device = str(remote_device.device()) + agent = rpc._get_current_rpc_agent() + # If the device map of the remote worker is set, + # then enable moving any input CPU tensors to the same cuda device. + self.is_device_map_set = bool( + agent._get_device_map(agent.get_worker_info(self.on)) # type: ignore[arg-type] + ) + # ``enable_moving_cpu_tensors_to_cuda`` is less strict than ``is_device_map_set``: + # If ``enable_moving_cpu_tensors_to_cuda`` is true, but the device map is not set, + # then any CPU tensors can still be moved to a cuda device to run forward, + # but the output must be moved back to CPU before being sent over the wire. + enable_moving_cpu_tensors_to_cuda = torch.device(self.device).type == "cuda" + return enable_moving_cpu_tensors_to_cuda + + def _init_template(self, module_interface_cls, enable_moving_cpu_tensors_to_cuda): + """Instantiate template on local side.""" + generated_module = instantiator.instantiate_scriptable_remote_module_template( + module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + self.generated_methods = generated_module._generated_methods + + def _check_attribute_picklability(self): + """Check if all the attribute has explicitly defined whether to be pickled (i.e., picklability).""" + for k in self.__dict__.keys(): + if ( + k not in _REMOTE_MODULE_PICKLED_ATTRIBUTES + and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING + ): + raise AttributeError( + f"Attribute {k} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or " + "``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``." + ) + + def _install_generated_methods(self): + for method in self.generated_methods: + method_name = method.__name__ + method = torch.jit.export(method) + setattr(self, method_name, types.MethodType(method, self)) + + @staticmethod + def init_from_module_rref( + remote_device: str, + module_rref: rpc.RRef[nn.Module], + _module_interface_cls: Any = None, + ): + """ + Besides the constructor, a RemoteModule instance can also be initialized given a module RRef. + + This alternate initialization method can be particularly useful if we want to create multiple + RemoteModule instances that share the same underlying module and reduce memory consumption. + + Moreover, this also provides a workaround for passing script RemoteModule over RPC, + which is not supported. The recommended way is as follows: + + 1. the sender creates a RemoteModule; + 2. the sender sends its ``module_rref`` over RPC; + 3. the receiver calls this method to initialize another RemoteModule using the same ``module_rref``. + + Example:: + Run the following code in two different processes: + + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> from torch import nn, Tensor + >>> from torch.distributed.nn.api.remote_module import RemoteModule + >>> + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> remote_module = RemoteModule( + >>> "worker1/cpu", nn.Linear, args=(20, 30), + >>> ) + >>> + >>> remote_module1 = rpc.rpc_sync( + >>> "worker1/cpu", + >>> RemoteModule.init_from_module_rref, + >>> ("worker1/cpu", remote_module1.get_module_rref()), + >>> ) + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Args: + remote_device (str): Device on the destination worker where we'd like to place this module. + The device can be a local device or a remote device specified by one of the following remote + formats: + + 1. "rank:/" (ex: "rank:0/cuda:0"). + 2. "/" (ex: "trainer0/cuda:0"). + + In addition, the device field can be optional and the default value is "cpu". + module_rref (RRef[nn.Module]): The module reference shared by both the caller and + the created remote module. + _module_interface_cls (type, optional): The TorchScript interface type for the module + to be created. The type object should be decorated by @torch.jit.interface. + If not provided, the generated RemoteModule is not torchscript-able. + Warning, this is an experimental API and susceptible to frequent changes. + + Returns: + A remote module instance which wraps the :class:`~nn.Module` created by the + user-provided ``module_rref``, it has a blocking ``forward`` method and an + asynchronous ``forward_async`` method that returns a future of the ``forward`` call + on the user-provided module on the remote side. + """ + # NOTE: if a new attribute is added to this class, also need to add it + # to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` for pickling/unpickling. + + remote_module = object.__new__(RemoteModule) + + enable_moving_cpu_tensors_to_cuda = remote_module._prepare_init(remote_device) + + if _module_interface_cls is not None: + # Users reply on this field to know if this generated RemoteModule is TorchScript-able. + remote_module.is_scriptable = True + + remote_module._init_template( + _module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + else: + remote_module.is_scriptable = False + remote_module.generated_methods = ( + _NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods + ) + remote_module.module_rref = module_rref + + remote_module._install_generated_methods() + remote_module._check_attribute_picklability() + + return remote_module + + +class RemoteModule(_RemoteModule): + """ + A RemoteModule instance can only be created after RPC initialization. + + It creates a user-specified module on a specified remote node. + It behaves like a regular ``nn.Module`` except that the ``forward`` method is + executed on the remote node. + It takes care of autograd recording to ensure the backward pass propagates + gradients back to the corresponding remote module. + + It generates two methods ``forward_async`` and ``forward`` based on the + signature of the ``forward`` method of ``module_cls``. ``forward_async`` + runs asynchronously and returns a Future. The arguments of ``forward_async`` + and ``forward`` are the same as the ``forward`` method of the module + returned by the ``module_cls``. + + For example, if ``module_cls`` returns an instance of ``nn.Linear``, + that has ``forward`` method signature: ``def forward(input: Tensor) -> Tensor:``, + the generated ``RemoteModule`` will have 2 methods with the signatures: + + | ``def forward(input: Tensor) -> Tensor:`` + | ``def forward_async(input: Tensor) -> Future[Tensor]:`` + + Args: + remote_device (str): Device on the destination worker where we'd like to place this module. + The format should be "/", where the device field can be parsed as torch.device type. + E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0". + In addition, the device field can be optional and the default value is "cpu". + module_cls (nn.Module): Class for the module to be created remotely. For example, + + >>> class MyModule(nn.Module): + >>> def forward(input): + >>> return input + 1 + >>> + >>> module_cls = MyModule + + args (Sequence, optional): args to be passed to ``module_cls``. + kwargs (Dict, optional): kwargs to be passed to ``module_cls``. + + Returns: + A remote module instance which wraps the :class:`~nn.Module` created by the + user-provided ``module_cls``, it has a blocking ``forward`` method and an + asynchronous ``forward_async`` method that returns a future of the ``forward`` call + on the user-provided module on the remote side. + + Example:: + Run the following code in two different processes: + + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> from torch import nn, Tensor + >>> from torch.distributed.nn.api.remote_module import RemoteModule + >>> + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> remote_linear_module = RemoteModule( + >>> "worker1/cpu", nn.Linear, args=(20, 30), + >>> ) + >>> input = torch.randn(128, 20) + >>> ret_fut = remote_linear_module.forward_async(input) + >>> ret = ret_fut.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Furthermore, a more practical example that is combined with + `DistributedDataParallel `__ (DDP) + can be found in this `tutorial `__. + """ + + def __init__( + self, + remote_device: str, + module_cls: Type[nn.Module], + args: Optional[Tuple] = None, + kwargs: Optional[Dict[str, Any]] = None, + ): + super().__init__(remote_device, module_cls, args, kwargs) + + +def _remote_module_receiver( + *remote_module_pickled_attrs, +): + """Deserializes a RemoteModule.""" + serialized_remote_module = _SerializedRemoteModule._make( + remote_module_pickled_attrs + ) + m = object.__new__(RemoteModule) + m.__dict__.update(serialized_remote_module._asdict()) + + # Unpickling the attribute `module_rref` must invoke RRef's `_deserialize()` method. + m.module_rref = rpc.PyRRef._deserialize(m.module_rref) + + # Install generated methods when unpickled. + for method in m.generated_methods: + method_name = method.__name__ + method = torch.jit.export(method) + setattr(m, method_name, types.MethodType(method, m)) + + return m + + +def _remote_module_reducer(remote_module): + """Serialize a RemoteModule.""" + pickled_attrs = {} + for k, v in remote_module.__dict__.items(): + # Pickling the attribute `module_rref` must invoke RRef's `_serialize()` method. + if k == "module_rref": + pickled_attrs[k] = v._serialize() + elif k in _REMOTE_MODULE_PICKLED_ATTRIBUTES: + pickled_attrs[k] = v + # Check if unpickled attributes are all in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. + elif k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING: + print( + f"The new attribute ``{k}`` of RemoteModule is ignored during RPC pickling. " + "To pickle this attribute, please add it to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES``. " + "Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.", + file=sys.stderr, + ) + + return ( + _remote_module_receiver, + tuple(pickled_attrs.values()), + ) + + +def _recursive_script_module_receiver( + recursive_script_module_serialized, +): + """Deserializes a RecursiveScriptModule that does not contain a script RemoteModule.""" + f = io.BytesIO(recursive_script_module_serialized) + m = torch.jit.load(f) + return m + + +def _recursive_script_module_reducer(recursive_script_module): + """Serialize a RecursiveScriptModule that does not contain a script RemoteModule, and raises an error otherwise.""" + if hasattr(recursive_script_module._c, "module_rref"): + raise RuntimeError( + "Passing a script RemoteModule over RPC is not supported. Please create a RemoteModule in the sender, " + "send the `module_rref` to the receiver, and create a new instance on the receiver end by passing this `module_rref`." + ) + + f = io.BytesIO() + torch.jit.save(recursive_script_module, f) + return (_recursive_script_module_receiver, (f.getvalue(),)) + + +_internal_rpc_pickler._register_reducer(RemoteModule, _remote_module_reducer) +_internal_rpc_pickler._register_reducer( + torch.jit.RecursiveScriptModule, _recursive_script_module_reducer +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/functional.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..857d090dedbe17c739bbfb99efd963c0a5ea5bd9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/nn/functional.py @@ -0,0 +1,440 @@ +import torch +import torch.distributed as dist +from torch.autograd import Function +# The two imports below are not always available depending on the +# USE_DISTRIBUTED compile flag. Make sure they raise import error +# if we're trying to use them. +from torch.distributed import group, ReduceOp + +def broadcast(tensor, src, group=group.WORLD): + """ + Broadcasts the tensor to the whole group. + + ``tensor`` must have the same number of elements in all processes + participating in the collective. + + Arguments: + tensor (Tensor): Data to be sent if ``src`` is the rank of current + process. + src (int): Source rank. + group (ProcessGroup, optional): The process group to work on. + + Returns: + Tensor: Received tensor from the broadcast op. + + """ + return _Broadcast.apply(src, group, tensor) + + +def gather(tensor, dst=0, group=group.WORLD): + """ + Gathers a list of tensors in a single process. + + Arguments: + tensor (Tensor): Input tensor. + dst (int, optional): Destination rank (default is 0). + group (ProcessGroup, optional): The process group to work on. + + Returns: + tuple[Tensor]: List of appropriately-sized tensors with the gathered data. + """ + return _Gather.apply(dst, group, tensor) + + +def scatter(tensors, src=0, group=group.WORLD): + """ + Scatters a list of tensors to all processes in a group. + + Each process will receive exactly one tensor and store its data in the + ``tensor`` argument. + + Arguments: + tensors (list[Tensor]): List of tensors to scatter on the source rank. + Receivers must pass ``None`. + src (int, optional): Source rank (default is 0). + group (ProcessGroup, optional): The process group to work on. + + Returns: + Tensor: Output tensor from the scatter operation. + + """ + return _Scatter.apply(src, group, *tensors) + + +def reduce(tensor, dst, op=ReduceOp.SUM, group=group.WORLD): + """ + Reduces the tensor data across all machines. + + Only the process with rank ``dst`` is going to receive the final result. + + Arguments: + tensor (Tensor): Input of the collective. + dst (int): Destination rank. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on. + + Returns: + Tensor: Output of the collective. + + """ + return _Reduce.apply(dst, op, group, tensor) + + +def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=group.WORLD): + """ + Reduces, then scatters a list of tensors to all processes in a group. + + Arguments: + output (Tensor): Output tensor. + input_list (list[Tensor]): List of tensors to reduce and scatter. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on. + + Returns: + Tensor: Output of the collective. + + """ + return _Reduce_Scatter.apply(op, group, output, *input_list) + + +def all_gather(tensor, group=group.WORLD): + """ + Gathers tensors from the whole group in a list. + + Arguments: + tensor (Tensor): Tensor to be broadcast from current process. + group (ProcessGroup, optional): The process group to work on. + + Returns: + tuple([Tensor]): Output of the collective. + + """ + return _AllGather.apply(group, tensor) + +def _all_gather_base(output_tensor, input_tensor, group=group.WORLD): + """ + Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor. + + Args: + output_tensor (Tensor): Output tensor. It should contain + correctly-sized tensors to be used for output of the collective. + input_tensor (Tensor): Tensor to be broadcast from current process. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + Examples: + >>> # All tensors below are of torch.int64 dtype. + >>> # We have 2 process groups, 2 ranks. + >>> # xdoctest: +SKIP("incorrect want text") + >>> output_tensor = torch.zeros(2, dtype=torch.int64) + >>> output_tensor + [tensor([0, 0])] # Rank 0 and 1 + >>> tensor = torch.arange(1, dtype=torch.int64) + 1 + rank + >>> tensor + tensor([1]) # Rank 0 + tensor([2]) # Rank 1 + >>> dist.all_gather_base(output_tensor, tensor) + >>> output_tensor + tensor([1,2]) # Rank 0 + tensor([1,2]) # Rank 1 + + .. warning:: + `_all_gather_base` is experimental and subject to change. + It is the caller's responsibility to ensure the output_tensor + is correctly sized. + + """ + return _AllGatherBase.apply(output_tensor, input_tensor, group) + + +def all_to_all(output_tensor_list, input_tensor_list, group=group.WORLD): + """ + Each process scatters list of input tensors to all processes in a group and return gathered list of tensors in output list. + + Arguments: + output_tensor_list (list[Tensor]): list of tensors to gather one per rank. + input_tensor_list (list[Tensor]): List of tensors to scatter one per rank. + group (ProcessGroup, optional): The process group to work on. + + Returns: + tuple([Tensor]): Output of the collective. + + """ + return _AlltoAll.apply(group, output_tensor_list, *input_tensor_list) + + +def all_to_all_single( + output, + input, + output_split_sizes=None, + input_split_sizes=None, + group=group.WORLD, +): + """ + Each process splits input tensor and then scatters the split list to all processes in a group. + + Then concatenate the received tensors from all the processes in the group and return single output tensor. + + Arguments: + output (Tensor): Gathered concatenated output tensor. + input (Tensor): Input tensor to scatter. + output_split_sizes: (list[Int], optional): Output split sizes for dim 0 + if specified None or empty, dim 0 of ``output`` tensor must divide + equally by ``world_size``. + input_split_sizes: (list[Int], optional): Input split sizes for dim 0 + if specified None or empty, dim 0 of ``input`` tensor must divide + equally by ``world_size``. + + Returns: + Tensor: Output of the collective. + + """ + return _AlltoAllSingle.apply( + group, output, output_split_sizes, input_split_sizes, input + ) + + +def all_reduce(tensor, op=ReduceOp.SUM, group=group.WORLD): + """ + Reduces the tensor data across all machines in such a way that all get the final result. + + After the call the returned tensor is going to be bitwise + identical in all processes. + + Arguments: + tensor (Tensor): Input of the collective. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on. + + Returns: + Tensor: Output of the collective + + """ + return _AllReduce.apply(op, group, tensor) + + +class _Broadcast(Function): + @staticmethod + def forward(ctx, src, group, tensor): + ctx.src = src + ctx.group = group + ctx.rank = dist.get_rank(group=group) + # torch.distributed makes all the calls in place + # we allocate new tensors to avoid this + tensor = tensor.clone() + dist.broadcast(tensor, src, group=group) + return tensor + + @staticmethod + def backward(ctx, grad_output): + gx = _Reduce.apply(ctx.src, ReduceOp.SUM, ctx.group, grad_output) + if ctx.src != ctx.rank: + gx.zero_() + return (None, None, gx) + + +class _Gather(Function): + @staticmethod + def forward(ctx, dst, group, tensor): + ctx.dst = dst + ctx.group = group + # Need to create a list of tensors here to do the + # aggregation, get it from the group size + # tensor should be correctly sized for the method + # gathering + tensor_list = [ + torch.zeros_like(tensor) for i in range(dist.get_world_size(group=group)) + ] + + tensor = tensor.contiguous() + if dist.get_rank(group=group) == dst: + dist.gather(tensor, tensor_list, dst, group=group) + else: + dist.gather(tensor, None, dst, group=group) + return tuple(tensor_list) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None, None) + (_Scatter.apply(ctx.dst, ctx.group, *grad_outputs),) + + +class _Scatter(Function): + @staticmethod + def forward(ctx, src, group, *tensors): + ctx.src = src + ctx.group = group + assert all(t.size() == tensors[0].size() for t in tensors) + output = torch.zeros_like(tensors[0]) + if dist.get_rank(group=group) == src: + dist.scatter(output, list(tensors), src, group=group) + else: + dist.scatter(output, None, src, group=group) + return output + + @staticmethod + def backward(ctx, grad_output): + return (None, None) + _Gather.apply(ctx.src, ctx.group, grad_output) + + +class _Reduce(Function): + @staticmethod + def forward(ctx, src, op, group, tensor): + ctx.src = src + ctx.group = group + tensor = tensor.clone() + dist.reduce(tensor, src, op=op, group=group) + return tensor + + @staticmethod + def backward(ctx, grad_output): + return (None, None, None) + (_Broadcast.apply(ctx.src, ctx.group, grad_output),) + + +class _Reduce_Scatter(Function): + @staticmethod + def forward(ctx, op, group, tensor, *input_tensor_list): + ctx.group = group + # Need contiguous tensors for collectives. + tensor = tensor.contiguous() + input_tensor_list = tuple(t.contiguous() for t in input_tensor_list) + dist.reduce_scatter(tensor, list(input_tensor_list), op=op, group=group) + return tensor + + @staticmethod + def backward(ctx, grad_output): + return (None, None, None) + _AllGather.apply(ctx.group, grad_output) + + +class _AllGather(Function): + @staticmethod + def forward(ctx, group, tensor): + # Need contiguous tensors for collectives. + tensor = tensor.contiguous() + + ctx.group = group + out_tensor_list = [ + torch.empty_like(tensor) for _ in range(dist.get_world_size(group=group)) + ] + + dist.all_gather(out_tensor_list, tensor, group=group) + return tuple(out_tensor_list) + + @staticmethod + def backward(ctx, *grad_outputs): + if dist.get_backend(group=ctx.group) is dist.Backend.NCCL: + rank = dist.get_rank(group=ctx.group) + gx = torch.empty_like(grad_outputs[rank]) + gx = _Reduce_Scatter.apply(ReduceOp.SUM, ctx.group, gx, *grad_outputs) + else: + # As many backends doesn't support ReduceScatter, we use AlltoAll with .sum() + # to emulate the ReduceScatter behavior + tensor_list = [torch.empty_like(tensor) for tensor in grad_outputs] + gxs = _AlltoAll.apply(ctx.group, tensor_list, *grad_outputs) + gx = torch.sum(torch.stack(gxs), dim=0) + return (None, gx) + +class _AllGatherBase(Function): + @staticmethod + def forward(ctx, output_tensor, input_tensor, group): + ctx.group = group + dist._all_gather_base(output_tensor, input_tensor.contiguous(), group=group) + return output_tensor + + @staticmethod + def backward(ctx, grad_output): + if dist.get_backend(group=ctx.group) is dist.Backend.NCCL: + world_size = dist.get_world_size(group=ctx.group) + out_size = list(grad_output.size()) + if out_size[0] % world_size != 0: + raise RuntimeError( + f'Tensor with dimensions: {out_size} does ' + f'not have first dimension divisible by world_size: {world_size}' + ) + out_size[0] = out_size[0] // dist.get_world_size(group=ctx.group) + gx = torch.empty(out_size, device=grad_output.device, dtype=grad_output.dtype) + dist._reduce_scatter_base(gx, grad_output, ReduceOp.SUM, ctx.group) + else: + raise RuntimeError("Backend not supported!") + return (None, gx, None) + +class _AlltoAll(Function): + @staticmethod + def forward(ctx, group, out_tensor_list, *tensors): + ctx.group = group + ctx.input_tensor_size_list = [ + tensors[i].size() for i in range(dist.get_world_size(group=group)) + ] + my_rank = dist.get_rank(group=group) + tensors = tuple(t.contiguous() for t in tensors) + # Implement it on means of scatter/gather, send/recv async operations have issues + if dist.get_backend(group=group) is dist.Backend.GLOO: + for i in range(dist.get_world_size(group=group)): + to_send = None + if i == my_rank: + to_send = list(tensors) + dist.scatter(out_tensor_list[i], to_send, i, group=group) + else: + dist.all_to_all( + out_tensor_list, + list(tensors), + group=group, + ) + return tuple(out_tensor_list) + + @staticmethod + def backward(ctx, *grad_outputs): + tensor_list = [ + torch.empty(size, device=grad_outputs[0].device, dtype=grad_outputs[0].dtype) + for size in ctx.input_tensor_size_list + ] + return (None, None) + _AlltoAll.apply(ctx.group, tensor_list, *grad_outputs) + + +class _AlltoAllSingle(Function): + @staticmethod + def forward(ctx, group, output, output_split_sizes, input_split_sizes, input): + ctx.group = group + ctx.input_size = input.size() + ctx.output_split_sizes = input_split_sizes + ctx.input_split_sizes = output_split_sizes + dist.all_to_all_single( + output, + input, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group, + ) + return output + + @staticmethod + def backward(ctx, grad_output): + tensor = torch.empty(ctx.input_size, device=grad_output.device, dtype=grad_output.dtype) + return (None, None, None, None) + ( + _AlltoAllSingle.apply( + ctx.group, + tensor, + ctx.output_split_sizes, + ctx.input_split_sizes, + grad_output.contiguous(), + ), + ) + + +class _AllReduce(Function): + @staticmethod + def forward(ctx, op, group, tensor): + ctx.group = group + ctx.op = op + tensor = tensor.clone() + dist.all_reduce(tensor, op=op, group=group) + return tensor + + @staticmethod + def backward(ctx, grad_output): + return (None, None) + (_AllReduce.apply(ctx.op, ctx.group, grad_output),) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d60296d76244092d6e7dccda285a5b54db42f698 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/instantiator.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/instantiator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c7862c9dd4ac0ebf07ce855576d0c70e6465dbd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/__pycache__/instantiator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/instantiator.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/instantiator.py new file mode 100644 index 0000000000000000000000000000000000000000..24f53c4f1a607c152fa670ff3591874bf5a896fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/instantiator.py @@ -0,0 +1,153 @@ +#!/usr/bin/python3 +import importlib +import logging +import os +import sys +import tempfile +from typing import Optional + +import torch +from torch.distributed.nn.jit.templates.remote_module_template import ( + get_remote_module_template, +) + + +logger = logging.getLogger(__name__) + + +_FILE_PREFIX = "_remote_module_" +_TEMP_DIR = tempfile.TemporaryDirectory() +INSTANTIATED_TEMPLATE_DIR_PATH = _TEMP_DIR.name +logger.info("Created a temporary directory at %s", INSTANTIATED_TEMPLATE_DIR_PATH) +sys.path.append(INSTANTIATED_TEMPLATE_DIR_PATH) + + +def get_arg_return_types_from_interface(module_interface): + assert getattr( + module_interface, "__torch_script_interface__", False + ), "Expect a TorchScript class interface decorated by @torch.jit.interface." + qualified_name = torch._jit_internal._qualified_name(module_interface) + cu = torch.jit._state._python_cu + module_interface_c = cu.get_interface(qualified_name) + assert ( + "forward" in module_interface_c.getMethodNames() + ), f"Expect forward in interface methods, while it has {module_interface_c.getMethodNames()}" + method_schema = module_interface_c.getMethod("forward") + + arg_str_list = [] + arg_type_str_list = [] + assert method_schema is not None + for argument in method_schema.arguments: + arg_str_list.append(argument.name) + + if argument.has_default_value(): + default_value_str = f" = {argument.default_value}" + else: + default_value_str = "" + arg_type_str = f"{argument.name}: {argument.type}{default_value_str}" + arg_type_str_list.append(arg_type_str) + + arg_str_list = arg_str_list[1:] # Remove "self". + args_str = ", ".join(arg_str_list) + + arg_type_str_list = arg_type_str_list[1:] # Remove "self". + arg_types_str = ", ".join(arg_type_str_list) + + assert len(method_schema.returns) == 1 + argument = method_schema.returns[0] + return_type_str = str(argument.type) + + return args_str, arg_types_str, return_type_str + + +def _write(out_path, text): + old_text: Optional[str] + try: + with open(out_path) as f: + old_text = f.read() + except OSError: + old_text = None + if old_text != text: + with open(out_path, "w") as f: + logger.info("Writing %s", out_path) + f.write(text) + else: + logger.info("Skipped writing %s", out_path) + + +def _do_instantiate_remote_module_template( + generated_module_name, str_dict, enable_moving_cpu_tensors_to_cuda +): + generated_code_text = get_remote_module_template( + enable_moving_cpu_tensors_to_cuda + ).format(**str_dict) + out_path = os.path.join( + INSTANTIATED_TEMPLATE_DIR_PATH, f"{generated_module_name}.py" + ) + _write(out_path, generated_code_text) + + # From importlib doc, + # > If you are dynamically importing a module that was created since + # the interpreter began execution (e.g., created a Python source file), + # you may need to call invalidate_caches() in order for the new module + # to be noticed by the import system. + importlib.invalidate_caches() + generated_module = importlib.import_module(f"{generated_module_name}") + return generated_module + + +def instantiate_scriptable_remote_module_template( + module_interface_cls, enable_moving_cpu_tensors_to_cuda=True +): + if not getattr(module_interface_cls, "__torch_script_interface__", False): + raise ValueError( + f"module_interface_cls {module_interface_cls} must be a type object decorated by " + "@torch.jit.interface" + ) + + # Generate the template instance name. + module_interface_cls_name = torch._jit_internal._qualified_name( + module_interface_cls + ).replace(".", "_") + generated_module_name = f"{_FILE_PREFIX}{module_interface_cls_name}" + + # Generate type annotation strs. + assign_module_interface_cls_str = ( + f"from {module_interface_cls.__module__} import " + f"{module_interface_cls.__name__} as module_interface_cls" + ) + args_str, arg_types_str, return_type_str = get_arg_return_types_from_interface( + module_interface_cls + ) + kwargs_str = "" + arrow_and_return_type_str = f" -> {return_type_str}" + arrow_and_future_return_type_str = f" -> Future[{return_type_str}]" + + str_dict = dict( + assign_module_interface_cls=assign_module_interface_cls_str, + arg_types=arg_types_str, + arrow_and_return_type=arrow_and_return_type_str, + arrow_and_future_return_type=arrow_and_future_return_type_str, + args=args_str, + kwargs=kwargs_str, + jit_script_decorator="@torch.jit.script", + ) + return _do_instantiate_remote_module_template( + generated_module_name, str_dict, enable_moving_cpu_tensors_to_cuda + ) + + +def instantiate_non_scriptable_remote_module_template(): + generated_module_name = f"{_FILE_PREFIX}non_scriptable" + str_dict = dict( + assign_module_interface_cls="module_interface_cls = None", + args="*args", + kwargs="**kwargs", + arg_types="*args, **kwargs", + arrow_and_return_type="", + arrow_and_future_return_type="", + jit_script_decorator="", + ) + # For a non-scriptable template, always enable moving CPU tensors to a cuda device, + # because there is no syntax limitation on the extra handling caused by the script. + return _do_instantiate_remote_module_template(generated_module_name, str_dict, True) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c74de7af35a4a00acf2f96c2686fef7b9ae2e855 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/remote_module_template.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/remote_module_template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df4194ca3b9438af75e2a58b1c885c24aee00ea4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/__pycache__/remote_module_template.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/remote_module_template.py b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/remote_module_template.py new file mode 100644 index 0000000000000000000000000000000000000000..ac731b434243721e1437a29ba4eed241b03e30ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/nn/jit/templates/remote_module_template.py @@ -0,0 +1,107 @@ +#!/usr/bin/python3 + + +def get_remote_module_template(enable_moving_cpu_tensors_to_cuda: bool): + return _TEMPLATE_PREFIX + ( + _REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA + if enable_moving_cpu_tensors_to_cuda + else _REMOTE_FORWARD_TEMPLATE + ) + + +_TEMPLATE_PREFIX = """from typing import * + +import torch +import torch.distributed.rpc as rpc +from torch import Tensor +from torch._jit_internal import Future +from torch.distributed.rpc import RRef +from typing import Tuple # pyre-ignore: unused import + + +{assign_module_interface_cls} + + +def forward_async(self, {arg_types}){arrow_and_future_return_type}: + args = (self.module_rref, self.device, self.is_device_map_set, {args}) + kwargs = {{{kwargs}}} + return rpc.rpc_async( + self.module_rref.owner(), + _remote_forward, + args, + kwargs, + ) + + +def forward(self, {arg_types}){arrow_and_return_type}: + args = (self.module_rref, self.device, self.is_device_map_set, {args}) + kwargs = {{{kwargs}}} + ret_fut = rpc.rpc_async( + self.module_rref.owner(), + _remote_forward, + args, + kwargs, + ) + return ret_fut.wait() + + +_generated_methods = [ + forward_async, + forward, +] + + +{jit_script_decorator} +""" + +# This template may cause typing error (the mismatch between ``Tuple[()]`` and ``Tuple[Any]``) +# even if the code is only used for instantiation but not execution. +# Therefore, only include handling moving CPU tensors to a cuda device if necessary. +# TODO: Merge these two templates together in the future once TorchScript syntax is improved. +_REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA = """ +def _remote_forward( + module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, {arg_types}){arrow_and_return_type}: + module = module_rref.local_value() + device = torch.device(device) + + if device.type != "cuda": + return module.forward({args}, {kwargs}) + + # If the module is on a cuda device, + # move any CPU tensor in args or kwargs to the same cuda device. + # Since torch script does not support generator expression, + # have to use concatenation instead of + # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``. + args = ({args},) + out_args: Tuple[()] = () + for arg in args: + arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,) + out_args = out_args + arg + + kwargs = {{{kwargs}}} + for k, v in kwargs.items(): + if isinstance(v, Tensor): + kwargs[k] = kwargs[k].to(device) + + if is_device_map_set: + return module.forward(*out_args, {kwargs}) + + # If the device map is empty, then only CPU tensors are allowed to send over wire, + # so have to move any GPU tensor to CPU in the output. + # Since torch script does not support generator expression, + # have to use concatenation instead of + # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, {kwargs}))``. + ret: Tuple[()] = () + for i in module.forward(*out_args, {kwargs}): + i = (i.cpu(),) if isinstance(i, Tensor) else (i,) + ret = ret + i + return ret +""" + +_REMOTE_FORWARD_TEMPLATE = """ +def _remote_forward( + module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, {arg_types}){arrow_and_return_type}: + module = module_rref.local_value() + + return module.forward({args}, {kwargs}) +"""