python_code
stringlengths 0
456k
|
---|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We're not responsible for pytest decorators
# mypy: disallow_untyped_decorators = False
"""
Collection of some testing utilities for the Fairscale library. Please complement as
you see fit, but refrain from ad-hoc test utils within the different feature sets and
relative imports.
"""
import contextlib
import functools
import gc
import inspect
import logging
import multiprocessing
import os
import random
from statistics import mean
import subprocess
import sys
import tempfile
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import numpy
import pytest
import torch
from torch import Tensor
import torch.distributed as dist
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from fairscale.internal import torch_version
from fairscale.nn.model_parallel import destroy_model_parallel, initialize_model_parallel
from fairscale.nn.model_parallel.random import model_parallel_cuda_manual_seed
if TYPE_CHECKING:
Base = nn.Module[Tensor]
else:
Base = nn.Module
skip_if_cuda = pytest.mark.skipif(torch.cuda.is_available(), reason="Testing only on CPUs to save time")
skip_if_no_cuda = pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.device_count() < 1, reason="CUDA required"
)
skip_if_single_gpu = pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.device_count() < 2, reason="multiple GPUs required"
)
skip_if_less_than_four_gpu = pytest.mark.skipif(
not torch.cuda.is_available() or torch.cuda.device_count() < 4, reason="4 GPUs or more required"
)
skip_if_py38 = pytest.mark.skipif(
sys.version_info.major == 3 and sys.version_info.minor == 8, reason="Python3.8 is skipped"
)
skip_if_py39_no_cuda = pytest.mark.skipif(
not torch.cuda.is_available() and sys.version_info.major == 3 and sys.version_info.minor == 9,
reason="Python3.9 without CUDA is skipped",
)
skip_due_to_flakyness = pytest.mark.skip(
reason="Flaky test to be fixed or removed",
)
available_devices = ["cpu"]
if torch.cuda.is_available():
available_devices.append("cuda")
filename_mpi: Optional[str] = None
class IdentityLayer(Base):
def __init__(self, size: int, scale: float = 1.0) -> None:
super(IdentityLayer, self).__init__()
self.weight = torch.nn.Parameter(scale * torch.randn(size))
def forward(self, *_: Any, **__: Any) -> Tensor:
return self.weight
def set_random_seed(seed: int, model_parallel: bool = True) -> None:
"""Set random seed for reproducibility."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if model_parallel:
model_parallel_cuda_manual_seed(seed)
def in_circle_ci() -> bool:
return os.path.exists("/home/circleci")
# Global variable to cache the results from the first nvidia-smi execution.
_smi_ver: Optional[str] = None
def torch_cuda_version(compiled: bool = False) -> Tuple[int, ...]:
if compiled:
numbering = torch.version.cuda.split(".")[:2]
else:
global _smi_ver
if _smi_ver is None:
def get_smi_ver() -> str:
"""Get CUDA version from nvidia-smi"""
for line in subprocess.check_output("nvidia-smi".split()).decode("utf-8").split("\n"):
if "CUDA Version" in line:
res = line.split()[8]
assert res.startswith("10.") or res.startswith("11."), res
return res
assert False
_smi_ver = get_smi_ver()
numbering = _smi_ver.split(".")[:2]
return tuple(int(n) for n in numbering)
def make_cudnn_deterministic() -> None:
"""Make cudnn (matmul) deterministic"""
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# TF32 also make things nondeterministic. Disable it.
torch.backends.cuda.matmul.allow_tf32 = False # type: ignore
torch.backends.cudnn.allow_tf32 = False # type: ignore
def dist_init(rank: int, world_size: int, filename: str, filename_rpc: str = "") -> bool:
"""
Initialize torch distributed, based on a temporary file shared across ranks, which makes it possible for unrelated
tests to be run concurrently.
Return false if not enough GPUs present in the system.
.. warning: This limits the usecase to all ranks being on the same node
"""
try:
torch.distributed.rpc.shutdown()
except Exception:
pass
print(f"dist init r={rank}, world={world_size}")
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(rank)
url = "file://" + filename
url_rpc = "file://" + filename_rpc
if torch_version() >= (1, 6, 0):
backend = "nccl" if torch.cuda.is_available() else "gloo"
if backend == "nccl" and torch.cuda.device_count() < world_size:
logging.warning("Requested world size cannot be reached on this machine, not enough GPUs")
return False
torch.distributed.init_process_group(backend=backend, rank=rank, world_size=world_size, init_method=url)
tp_options = {"init_method": url_rpc}
# Workaround for bug in torch v1.8.0. Should be fixed in v1.8.1
if torch_version() == (1, 8, 0):
if torch.cuda.is_available():
# Workaround for https://github.com/pytorch/pytorch/issues/53844
tp_options["_transports"] = ["ibv", "uv"] # type: ignore
else:
# Workaround for https://github.com/pytorch/pytorch/issues/54266
tp_options["_channels"] = ["mpt_uv", "basic", "cuda_ipc", "cuda_gdr", "cuda_xth", "cuda_basic"] # type: ignore
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(**tp_options),
)
else:
if world_size > 1:
# TensorPipe is not available in Torch 1.5
rpc.init_rpc(
name=f"Test{rank}",
rank=rank,
world_size=world_size,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(init_method=url_rpc),
)
elif torch.cuda.is_available():
torch.distributed.init_process_group(backend="nccl", rank=rank, world_size=world_size, init_method=url)
else:
return False
if torch.cuda.is_available() and torch.cuda.device_count():
torch.cuda.set_device(rank % torch.cuda.device_count())
return True
def get_worker_map() -> Dict[Any, Any]:
return {rank: f"Test{rank}" for rank in range(dist.get_world_size())}
def get_world_sizes() -> List[int]:
limit = torch.cuda.device_count()
return [x for x in [1, 2, 4, 8] if x <= limit]
def test_runner(
rank: int, test_func: Callable, deterministic: bool = False, *args: List[Any], **kwargs: Dict[str, Any]
) -> None:
# At this point we're in a new process, torch options need to be set again
if deterministic:
make_cudnn_deterministic()
torch.manual_seed(1357)
test_func(rank, *args, **kwargs)
def spawn_for_all_world_sizes(
test_func: Callable, world_sizes: List[int] = get_world_sizes(), args: Any = [], deterministic: bool = False
) -> None:
for world_size in world_sizes:
_, filename = tempfile.mkstemp()
_, filename_rpc = tempfile.mkstemp()
try:
# (lefaudeux) Let mp handle the process joining, join=False and handling context has
# been unstable in the past.
mp.spawn(
test_runner,
args=(test_func, deterministic, world_size, filename, filename_rpc, *args),
nprocs=world_size,
join=True,
)
finally:
rmf(filename)
rmf(filename_rpc)
def worker_process(
rank: int, world_size: int, filename: str, filename_rpc: str, func: Callable, args: Any, error_queue: Any
) -> None:
"""Main function for unit tests launched with torch_spawn"""
if not dist_init(rank, world_size, filename, filename_rpc):
logging.warning("failed initializing torch distributed")
teardown()
return
kwargs = {}
if "OMPI_COMM_WORLD_RANK" not in os.environ:
kwargs["pipeline_backend"] = "gloo"
initialize_model_parallel(1, world_size, **kwargs)
# Make sure that CUDA operations are repeatable
context = (
torch.backends.cudnn.flags(benchmark=False, deterministic=True) # type: ignore
if torch.cuda.is_available() and hasattr(torch.backends.cudnn, "flags")
else contextlib.suppress()
)
if torch.cuda.is_available() and not hasattr(torch.backends.cudnn, "flags"):
make_cudnn_deterministic()
try:
with context:
func(*args)
teardown()
except BaseException as e:
logging.warning(f" Rank {rank}: {e}")
# Make sure that the group is properly destroyed, even for tests which check for exceptions being raised
teardown()
# If the function raises 'Skipped', this indicates pytest.skip(), so
# forward it to parent so we can call pytest.skip() there
if e.__class__.__name__ == "Skipped":
error_queue.put(str(e))
return
raise e
def teardown() -> None:
destroy_model_parallel()
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
try:
# torch 1.5 hangs on shutdown if waiting for all processes
torch.distributed.rpc.shutdown(graceful=False)
except Exception:
pass
def torch_spawn(world_sizes: Optional[List[int]] = None) -> Callable:
if world_sizes is None:
world_sizes = get_world_sizes()
def prepare_test(func: Callable) -> Callable:
"""Function called with the test function as the argument. Generates a
replacement which serves as the actual test function."""
name = func.__name__
parameters = inspect.signature(func).parameters
if name.startswith("test"):
raise ValueError(
f"Tests marked with @torch_spawn (i.e. '{name}') should not have names beginning in 'test' as they will"
" be picked up by pytest without running the spawn wrapper"
)
@functools.wraps(func)
def replacement(*args: Any, **kwargs: Any) -> None:
assert args == tuple()
assert world_sizes is not None # mypy crutch
args = tuple(
kwargs[p] for p in parameters if p != "rank"
) # converting named parameters to positional parameters to pass to `spawn`
error_queue = multiprocessing.get_context("spawn").SimpleQueue()
if "OMPI_COMM_WORLD_RANK" in os.environ:
# TODO (Min): this global used to be assigned every time this file is imported.
# I changed it to be assigned on first use. Should be the same, but I am not
# sure this is used or is correct since different processes would have different
# file names to init_process_group below. By initing, here, we don't leave
# a temp file behind on importing time.
global filename_mpi
if filename_mpi is None:
filename_mpi = tempfile.mkstemp()[1]
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
torch.distributed.init_process_group("mpi", init_method=f"file://{filename_mpi}")
world_size = torch.distributed.get_world_size()
destroy_model_parallel()
initialize_model_parallel(1, world_size)
torch.cuda.set_device(torch.distributed.get_rank() % torch.cuda.device_count())
if world_size in world_sizes:
try:
func(*args)
teardown()
except BaseException as e:
teardown()
import traceback
print(f"{traceback.format_exc()}")
raise e
else:
pytest.skip("Requested world size doesn't match current world size")
else:
spawn_for_all_world_sizes(worker_process, world_sizes, (func, args, error_queue))
if not error_queue.empty():
msg = error_queue.get()
pytest.skip(msg)
# Register a function with the same name, prefixed with "test_" in the
# calling module, so it will be picked up by pytest
current_frame = inspect.currentframe()
assert current_frame is not None
caller_module = inspect.getmodule(current_frame.f_back)
setattr(caller_module, f"test_{name}", replacement)
return func
return prepare_test
class _Block(Base):
def __init__(self, embed_dim: int, num_heads: int) -> None:
super().__init__()
self.ln_1 = nn.LayerNorm(embed_dim)
self.ln_2 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads) # type: ignore
self.mlp = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 4),
nn.GELU(),
nn.Linear(embed_dim * 4, embed_dim),
)
def forward(self, *inputs: Any, **kwargs: Any) -> Tensor:
x = inputs[0]
attn_mask = torch.full((len(x), len(x)), -float("Inf"), device=x.device, dtype=x.dtype)
attn_mask = torch.triu(attn_mask, diagonal=1)
x = self.ln_1(x)
a, _ = self.attn(x, x, x, attn_mask=attn_mask, need_weights=False)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x
class GPT2(Base):
"""
GPT2 pytorch implementation, for testing purposes in the image-GPT context
Credits: https://github.com/teddykoker/image-gpt"""
def __init__(
self, embed_dim: int, num_heads: int, num_layers: int, num_positions: int, num_vocab: int, num_classes: int
) -> None:
super().__init__()
self.embed_dim = embed_dim
# start of sequence token
self.sos = torch.nn.Parameter(torch.zeros(embed_dim))
nn.init.normal_(self.sos)
self.token_embeddings = nn.Embedding(num_vocab, embed_dim)
self.position_embeddings = nn.Embedding(num_positions, embed_dim)
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(_Block(embed_dim, num_heads))
self.ln_f = nn.LayerNorm(embed_dim)
self.head = nn.Linear(embed_dim, num_vocab, bias=False)
self.clf_head = nn.Linear(embed_dim, num_classes)
def forward(self, x: Tensor, classify: bool = False) -> Any: # type: ignore
"""
Expect input as shape [sequence len, batch]
If classify, return classification logits
"""
length, batch = x.shape
h = self.token_embeddings(x)
# prepend sos token
sos = torch.ones(1, batch, self.embed_dim, device=x.device) * self.sos
h = torch.cat([sos, h[:-1, :, :]], dim=0)
# add positional embeddings
positions = torch.arange(length, device=x.device).unsqueeze(-1)
h = h + self.position_embeddings(positions).expand_as(h)
# transformer
for layer in self.layers:
h = layer(h)
h = self.ln_f(h)
logits = self.head(h)
if not classify:
# return logits
return logits
h = torch.mean(h, dim=0) # average pool over sequence
# return classification logits and generative logits
return self.clf_head(h), logits
def objects_are_equal(
a: Any,
b: Any,
raise_exception: bool = False,
dict_key: Optional[str] = None,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> bool:
"""
Test that two objects are equal. Tensors are compared to ensure matching
size, dtype, device and values.
"""
if type(a) is not type(b):
if raise_exception:
raise ValueError(f"type mismatch {type(a)} vs. {type(b)}")
return False
if isinstance(a, dict):
if set(a.keys()) != set(b.keys()):
if raise_exception:
raise ValueError(f"keys mismatch {a.keys()} vs. {b.keys()}")
return False
for k in a.keys():
if not objects_are_equal(a[k], b[k], raise_exception, k):
return False
return True
elif isinstance(a, (list, tuple, set)):
if len(a) != len(b):
if raise_exception:
raise ValueError(f"length mismatch {len(a)} vs. {len(b)}")
return False
return all(objects_are_equal(x, y, raise_exception) for x, y in zip(a, b))
elif torch.is_tensor(a):
try:
# assert_close doesn't strictly test shape, dtype and device
shape_dtype_device_match = a.size() == b.size() and a.dtype == b.dtype and a.device == b.device
if not shape_dtype_device_match:
if raise_exception:
msg = f"sizes: {a.size()} vs. {b.size()}, "
msg += f"types: {a.dtype} vs. {b.dtype}, "
msg += f"device: {a.device} vs. {b.device}"
raise AssertionError(msg)
else:
return False
# assert_close.
if torch_version() < (1, 12, 0):
torch.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
else:
torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
return True
except (AssertionError, RuntimeError) as e:
if raise_exception:
if dict_key and isinstance(e, AssertionError):
# Add dict key to the assertion error.
msg = e.args[0]
new_msg = f"For dict key '{dict_key}': {msg}"
raise AssertionError(new_msg) from None
else:
raise e
else:
return False
else:
return a == b
def check_same_model_params(model_a: torch.nn.Module, model_b: torch.nn.Module, message: str = "") -> None:
for p_a, p_b in zip(model_a.parameters(), model_b.parameters()):
assert torch.allclose(p_a, p_b, atol=1e-3), f"Model parameters differ\n{p_a} {p_b}\n" + message
for b_a, b_b in zip(model_a.buffers(), model_b.buffers()):
assert torch.allclose(b_a, b_b), f"Model buffers differ {b_a} - {b_b}\n" + message
def check_same_models_across_ranks(
model: torch.nn.Module, process_group: Any, params_should_be_equal: bool, check_broadcast_buffers: bool
) -> None:
world_size = dist.get_world_size(process_group)
rank = dist.get_rank(process_group)
for param in model.parameters():
# collect the params across the rank
receptacle = [param.clone() for _ in range(world_size)]
dist.all_gather(receptacle, param, group=process_group)
if rank == 0:
for sync_p in receptacle[1:]:
assert not params_should_be_equal or torch.all(
torch.eq(receptacle[0], sync_p)
), f"Models differ in between ranks {receptacle[0]} - {sync_p}"
# Check that all the buffers are in sync (authoritative rank is 0, its buffer is 0)
if check_broadcast_buffers:
for buffer in model.buffers():
receptacle = [buffer.clone() for _ in range(world_size)]
dist.all_gather(receptacle, buffer, group=process_group)
if rank == 0:
for sync_b in receptacle[1:]:
assert not params_should_be_equal or torch.all(
torch.eq(receptacle[0], sync_b)
), f"Models differ in between ranks {receptacle[0]} - {sync_b}"
class DeviceAndTypeCheckModule(Base):
"""A simple module for checking Tensor devices and dtypes."""
def __init__(
self,
expected_input_dtype: Optional[torch.dtype] = None,
expected_input_device: Optional[torch.device] = None,
expected_param_dtype: Optional[torch.dtype] = None,
expected_param_device: Optional[torch.device] = None,
expected_loss_dtype: Optional[torch.dtype] = None,
expected_loss_device: Optional[torch.device] = None,
expected_buffer_dtype: Optional[torch.device] = None,
):
super().__init__()
self.expected_input_dtype = expected_input_dtype
self.expected_input_device = expected_input_device
self.expected_param_dtype = expected_param_dtype
self.expected_param_device = expected_param_device
self.expected_loss_dtype = expected_loss_dtype
self.expected_loss_device = expected_loss_device
self.expected_buffer_dtype = expected_buffer_dtype
self.linear = nn.Linear(5, 5)
self.register_buffer("buffer", torch.rand((5,)))
def _check(
self,
key: str,
x: Union[torch.device, torch.dtype],
expected: Union[Optional[torch.device], Optional[torch.dtype]],
) -> None:
assert expected in {None, x}, f"{key} ({x}) != expected ({expected})"
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
x = input[0]
self._check("input.dtype", x.dtype, self.expected_input_dtype)
self._check("input.device", x.device, self.expected_input_device)
param = self.linear.weight
self._check("param.dtype", param.dtype, self.expected_param_dtype)
self._check("param.device", param.device, self.expected_param_device)
self._check("buffer.dtype", self.buffer.dtype, self.expected_buffer_dtype) # type: ignore
x = x + self.buffer
loss = (self.linear(x) + self.buffer).sum()
self._check("loss.dtype", loss.dtype, self.expected_loss_dtype)
self._check("loss.device", loss.device, self.expected_loss_device)
return loss
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
Copied from: github.com/pytorch/pytorch/blob/master/test/test_cuda.py
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
class DummyProcessGroup:
def __init__(self, rank: int, size: int):
self._rank = rank
self._size = size
def rank(self) -> int:
return self._rank
def size(self) -> int:
return self._size
class SGDWithPausingCompute(torch.optim.SGD):
def __init__(self, *args, **kwargs) -> None: # type: ignore
self.rank = kwargs["rank"]
del kwargs["rank"]
super().__init__(*args, **kwargs)
def step(self, closure: Optional[Any] = None) -> Any:
loss = super().step(closure=closure)
# This is used to make sure that OSS and ShardedDDP enforce a proper stream synchronization
# - Add a long cuda wait on a compute stream, non blocking from the CPU perspective
with torch.cuda.stream(torch.cuda.Stream()):
torch.cuda._sleep(100000000)
# - optionally change the params on a per rank basis
with torch.no_grad():
for param_group in self.param_groups:
for param in param_group["params"]:
param *= 1.0 + self.rank / 10.0
return loss
def state_dict_norm(state: Dict[str, torch.Tensor]) -> torch.Tensor:
"""Compute the norm from a state_dict for simple comparison."""
norm = torch.zeros(1)
for v in state.values():
if not v.is_floating_point():
v = v.float()
norm += v.norm()
return norm
def rmf(filename: str) -> None:
"""Remove a file like rm -f."""
try:
os.remove(filename)
except FileNotFoundError:
pass
@contextlib.contextmanager
def in_temporary_directory() -> Generator:
"""
Context manager to create a temporary direction and remove
it at the end of the context
"""
old_cwd = os.getcwd()
with tempfile.TemporaryDirectory() as temp_dir:
os.chdir(temp_dir)
try:
yield temp_dir
finally:
os.chdir(old_cwd)
@contextlib.contextmanager
def temp_files_ctx(num: int) -> Generator:
"""A context to get tempfiles and ensure they are cleaned up."""
files = [tempfile.mkstemp()[1] for _ in range(num)]
try:
yield tuple(files)
finally:
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name)
def dump_all_tensors(rank: int) -> None:
"""Useful tool for debugging memory issues from the python side."""
if rank != 0:
return
for obj in gc.get_objects():
try:
ttype = str(type(obj))
if torch.is_tensor(obj) or (hasattr(obj, "data") and torch.is_tensor(obj.data)):
print(ttype, obj.shape, obj.dtype, obj.device, obj.storage().size())
except Exception:
pass
print(torch.cuda.memory_summary())
def get_smi_memory() -> float:
"""Return process's GPU memory in MB."""
pid = os.getpid()
info_string = torch.cuda.list_gpu_processes()
for line in info_string.splitlines():
if str(pid) in line:
toks = line.split()
return float(toks[3])
# If the process is not in the list, we are not using the GPU.
return 0.0
def skip_a_test_if_in_CI() -> None:
"""Skip a test in circle CI"""
if os.path.exists("/home/circleci"):
pytest.skip("Sometimes a CI test failure is not reproducible locally, we skip them")
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
""" Golden data used in unit tests. """
adascale_test_data = [
# "input" value is a list of input tensors for micro-batch/rank 0 and micro-batch/rank 1.
{
"input": [[1.0, 0], [0, 1.0]],
"expected_gain": 4.0 / 3,
"expected_grad": [[0.5, 0.5], [0.5, 0.5]],
"expected_bias_grad": [1.0, 1.0],
},
{
"input": [[1.0, 1.0], [1.0, 1.0]],
"expected_gain": 1.0000001249999846,
"expected_grad": [[1.0, 1.0], [1.0, 1.0]],
"expected_bias_grad": [1.0, 1.0],
},
{
"input": [[-1.0, 1.0], [1.0, -1.0]],
"expected_gain": 2.0,
"expected_grad": [[0.0, 0.0], [0.0, 0.0]],
"expected_bias_grad": [1.0, 1.0],
},
{
"input": [[1.0, 4.0], [5.0, 0.5]],
"expected_gain": 1.4688796680497926,
"expected_grad": [[3.0, 2.25], [3.0, 2.25]],
"expected_bias_grad": [1.0, 1.0],
},
{
"input": [[-0.2, 3.0], [5.0, 0.5]],
"expected_gain": 1.8472893901708,
"expected_grad": [[2.4000000953674316, 1.75], [2.4000000953674316, 1.75]],
"expected_bias_grad": [1.0, 1.0],
},
# "inputs" to trigger multiple iteration tests, which make sure the
# smoothing factor calculation is also covered.
{
"inputs": [[[-0.2, 3.3], [5.2, 0.7]], [[1.0, 4.0], [3.1, 0.1]]],
"expected_gain": 1.6720968158031417,
"expected_grad": [[2.049999952316284, 2.049999952316284], [2.049999952316284, 2.049999952316284]],
"expected_bias_grad": [1.0, 1.0],
},
]
corr_mean_test_data = [
{
"inputs": [
[[1.0, 0.0, 2.0], [2.0, 0.0, 1.0]],
[[0.0, 1.0, 2.0], [2.0, 1.0, 0]],
[[3.0, 1.0, 2.0], [2.0, 1.0, -1.0]],
],
"expected_grad": [[1.5, 0.0, 1.5], [1.0, 1.0, 1.0], [2.5, 1.0, 0.5]],
# expected pearson correlation of two micro-batches
"expected_corr": [0.5, -1.0, 0.327327],
"expected_cos_similarity": [float("nan"), 0.8165, 0.8433],
}
]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch.distributed as dist
from .checkpoint import checkpoint_wrapper
from .data_parallel import FullyShardedDataParallel
if dist.is_available():
# Prevent import failure if dist is not available. #1057
from .data_parallel import ShardedDataParallel
from .moe import MOELayer, Top2Gate
from .pipe import Pipe, PipeRPCWrapper
from .misc import FlattenParamsWrapper
from .wrap import auto_wrap, config_auto_wrap_policy, default_auto_wrap_policy, enable_wrap, wrap
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Tongzhou Wang
# Licensed under the MIT License.
from contextlib import contextmanager
from itertools import chain
import typing
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import torch
from torch import Tensor
import torch.nn as nn
from fairscale.internal.state_dict import replace_by_prefix_
if TYPE_CHECKING:
from collections import OrderedDict # noqa: F401
# See no_pre_load_state_dict_hook context manager function in FSDP for more details.
_enable_pre_load_state_dict_hook = True
class FlatParameter(nn.Parameter):
"""A parameter that is initialized from a list of parameters and can be
turned into a list of views as needed.
"""
def __new__(cls, params: Sequence[nn.Parameter], requires_grad: bool = True) -> "FlatParameter":
"""Make an object using the parent's __new__ function."""
# A empty of non-list input doesn't make sense.
if not isinstance(params, (list, tuple)) or len(params) == 0:
raise ValueError("An non-empty list or tuple argument is needed")
# Normally, all items are Parameters. But during pickling, we will have a single
# Tensor as the input and later in __init__, the correct _param_numels and _param_shapes
# are set.
if not all(isinstance(p, (nn.Parameter, Tensor)) for p in params):
raise ValueError("List items need to be Parameter types")
# Flattening involves (1) making a tensor flat (i.e. single dimensional) and (2) making a module
# hierarchy flat (using a single tensor to replace a tree of tensors). Therefore,
# adding back nesting and hierarchy is counter-productive. If nesting is encountered
# in the future, the reasonable thing to do is likely for the top level FlatParameter to
# absorb the nested one and keep the result flat, free from hierarchy.
if any(isinstance(p, FlatParameter) for p in params):
raise ValueError("Nesting FlatParameter is not supported")
data = torch.cat([p.detach().reshape(-1) if isinstance(p, nn.Parameter) else p.reshape(-1) for p in params], 0)
return super(FlatParameter, cls).__new__(cls, data, requires_grad=requires_grad)
def __init__(self, params: Sequence[nn.Parameter], requires_grad: bool = True):
"""Initialize the _param_numels and _param_shapes lists."""
self._param_numels = [p.numel() for p in params]
assert self.numel() <= sum(
self._param_numels
), f"Something wrong with __new__ method, {self.numel()} vs. {sum(self._param_numels)}"
self._param_shapes = [p.size() for p in params]
# These are set by FPW class below, not by this class itself.
self._param_infos: List[Tuple[str, nn.Module, str]] = []
self._shared_param_infos: List[Tuple[str, str, nn.Module, str, nn.Module, str]] = []
def get_param_views(self, external_data: Optional[Tensor] = None) -> Iterator[Tensor]:
"""Return a generator of views that map to the original parameters."""
# Note, self.data could be sharded, so its numel is <= to the sum.
assert self.data.numel() <= sum(
self._param_numels
), f"Incorrect internal state {self.data.numel()} vs. {sum(self._param_numels)}"
data = external_data if external_data is not None else self
if data.numel() != sum(self._param_numels):
raise ValueError(
f"Incorrect numel of supplied data: got {data.numel()} but expected {sum(self._param_numels)}"
)
return (t.view(s) for (t, s) in zip(data.split(self._param_numels), self._param_shapes))
def metadata(self) -> Tuple[List[str], List[torch.Size], List[int]]:
"""Return tuple of (names, shapes, numels) metadata for this flat parameter."""
names = [".".join([m, n]) if m else n for (m, _, n) in self._param_infos]
return names, self._param_shapes, self._param_numels
def __setstate__(self, state: Tuple[Any, Any, Any, Any]) -> None:
"""Use by pickle to set the internal states."""
(self._param_numels, self._param_shapes, self._param_infos, self._shared_param_infos) = state
assert self.numel() <= sum(
self._param_numels
), f"Incorrect pickling {self.numel()} vs. {sum(self._param_numels)}"
def __reduce_ex__(self, proto: int) -> Tuple[Any, Any, Any]:
"""Support pickling between ranks."""
return (
FlatParameter, # Callable
# Args to the callable above
([self.data], self.requires_grad),
# Args to __setstate__
(self._param_numels, self._param_shapes, self._param_infos, self._shared_param_infos),
)
# Static types.
ParamGroups = Optional[Union[List[List[nn.Parameter]], List[nn.Parameter]]]
class FlattenParamsWrapper(nn.Module):
"""
A wrapper for transparently flattening a Module's parameters.
Compared to the original implementation [1], this version:
- removes tracing
- supports shared parameters
- handles state_dict/load_state_dict transparently
- is renamed to FlattenParamsWrapper
- refactored to use the FlatParameter class
- extended to support flattening multiple groups of params (useful
when different groups of params need different hyperparameters, like
learning rate or weight decay)
[1] https://github.com/SsnL/PyTorch-Reparam-Module
Args:
module (nn.Module):
The module to wrap.
param_list (Optional[List[List[nn.Parameter]]]):
Only flatten parameters appearing in the given groups.
If the param_list is an empty list, then no parameters will get flattened.
Note, if a single param is in one of the list, it still get flattened and the
original param is removed and replaced with the flatten one.
Default: None, flatten all parameters (if any)
flat_param_names (Optional[List[str]]):
originally, give each flat_param a unique name. Note a "flat_param_"
prefix will be added to those names.
"""
def __init__(
self,
module: nn.Module,
param_list: ParamGroups = None,
flat_param_names: Optional[List[str]] = None,
):
super().__init__()
self._fpw_module = module
self.is_flattened = False
# Handle param_list being None.
if param_list is None:
param_list = list(module.parameters())
# Be backward compatible and turn a single param list into a list of
# a single list.
if len(param_list) > 0 and isinstance(param_list[0], nn.Parameter):
param_list = [cast(List[nn.Parameter], param_list)]
# Since the parameters will be deleted, let's record the number original
# parameters managed by this class. This and get_param_views function
# below are used by fsdp_optim_utils.py to save/restore optimizer state,
# which mirrors the flatten parameters here.
self.num_params_managed = 0
self._param_sets = []
overall_param_set: Set[nn.Parameter] = set()
for p_list in param_list:
# Remove any duplicates from the list.
p_set: Set[nn.Parameter] = set(cast(List[nn.Parameter], p_list))
self.num_params_managed += len(p_set)
overall_param_set = overall_param_set.union(p_set)
# Convert from list of Parameters to set of (Module, name) tuples,
# which will survive in case the parameter instances are reset.
# Also, a shared param will correctly appear under multiple modules
# as they should.
new_p_set_with_names = set()
for m in self.modules():
for n, p in m.named_parameters(recurse=False):
if p in p_set:
new_p_set_with_names.add((m, n))
if new_p_set_with_names:
self._param_sets.append(new_p_set_with_names)
if len(overall_param_set) != self.num_params_managed:
# Each p_list above could have shared params. However, you can't
# have shared params cross different p_list. That means part of
# the flattened parameter must be shared, which is impossible to
# support.
raise ValueError(f"Incorrect param groups {len(overall_param_set)} vs {self.num_param_managed}")
self.flat_params: List[nn.Parameter] = []
# Prepare flat param names.
if flat_param_names is None:
flat_param_names = [f"{i}" for i, _ in enumerate(self._param_sets)]
if len(flat_param_names) != len(self._param_sets):
raise ValueError("Names and number of param lists must be equal")
if len(flat_param_names) != len(set(flat_param_names)):
raise ValueError("Each flat param must be given a unique name")
self.flat_param_names = [f"flat_param_{n}" for n in flat_param_names]
flat_param: Optional[nn.Parameter] = None
# Init all flat_params.
for new_p_set in self._param_sets:
params, param_infos, shared_param_infos = self._init_flatten_params(new_p_set)
flat_param = FlatParameter(params, params[0].requires_grad)
flat_param._param_infos = param_infos
flat_param._shared_param_infos = shared_param_infos
self.flat_params.append(flat_param)
self._flatten_params(self.flat_params)
# Register hook to be called after state_dict() to remove the
# "_fpw_module." prefix and before load_state_dict() to add it back.
self._register_state_dict_hook(_post_state_dict_hook)
self._register_load_state_dict_pre_hook(_pre_load_state_dict_hook)
# Flag to indicate whether state_dict() should automatically unflatten
# params. This defaults to True, but may be set to False if the user
# explicitly requests a flat state dict via flat_state_dict().
self._auto_unflatten_state_dict = True
@property
def module(self) -> Any:
"""Support fpw.module in case we are immitating DDP, which has .module
property to the underlying module.
"""
return self._fpw_module
@property
def flat_param(self) -> nn.Parameter:
"""We used to support only a single flat_param. This allows us to
be backward compatible.
"""
assert (
len(self.flat_params) == 1
), f"Incorrect access to flat_param: len(self.flat_params)={len(self.flat_params)}"
return self.flat_params[0]
def _init_flatten_params(
self, p_set: Set[Tuple[nn.Module, str]]
) -> Tuple[
List[nn.Parameter], List[Tuple[str, nn.Module, str]], List[Tuple[str, str, nn.Module, str, nn.Module, str]]
]:
"""Build metadata for need-to-be-flatten parameters and returns a list
contains the need-to-be-flatten parameters.
This also returns param_infos and shared_param_infos, which
will be attached to the flat parameter object.
Args:
p_set (set):
A set of (module, param_name) for a set of params that needed
to be flattened. There could be shared params in this set.
"""
param_infos = []
shared_param_memo: Dict[nn.Parameter, Tuple[str, nn.Module, str]] = {}
shared_param_infos = []
params = []
fp32 = []
fp16 = []
for module_name, m in self.named_modules():
for n, p in m.named_parameters(recurse=False):
if p.dtype != torch.float16:
fp32.append(module_name)
else:
fp16.append(module_name)
if p is not None and (m, n) in p_set:
if p in shared_param_memo:
mname, shared_m, shared_n = shared_param_memo[p]
shared_param_infos.append((module_name, mname, m, n, shared_m, shared_n))
else:
shared_param_memo[p] = (module_name, m, n)
param_infos.append((module_name, m, n))
params.append(p)
del shared_param_memo
fp16_msg, fp32_msg = ",".join(fp16), ",".join(fp32)
assert (
len(set(p.dtype for p in params)) == 1
), f"expects all parameters to have same dtype: fp32: {fp32_msg} \n fp16: {fp16_msg} "
assert (
len(set(p.requires_grad for p in params)) == 1
), f"expects all parameters to have same requires_grad {p_set}"
assert len(params) == len(set(params)), "params list should not have dups"
return params, param_infos, shared_param_infos
@property
def _param_infos(self) -> Iterator[Tuple[str, nn.Module, str]]:
return chain(*[p._param_infos for p in self.flat_params]) # type: ignore
@property
def _shared_param_infos(self) -> Iterator[Tuple[str, str, nn.Module, str, nn.Module, str]]:
return chain(*[p._shared_param_infos for p in self.flat_params]) # type: ignore
def _flatten_params(self, flat_params: List[nn.Parameter]) -> None:
"""Flatten the managed parameters and replaced the original
attributes with views to the flat params.
"""
assert not self.is_flattened
self.is_flattened = True
# register the flatten ones and save it to self.
assert len(self.flat_param_names) == len(flat_params), f"{len(self.flat_param_names)} vs. {len(flat_params)}"
for n, flat_param in zip(self.flat_param_names, flat_params):
self.register_parameter(n, flat_param)
self.flat_params = flat_params
# deregister the names as parameters
for _, m, n in self._param_infos:
delattr(m, n)
for _, _, m, n, _, _ in self._shared_param_infos:
delattr(m, n)
# register the views as plain attributes
self._unflatten_params_as_views()
def _unflatten_params(self, external_data: Optional[List[Optional[Tensor]]] = None) -> None:
"""Undo flattening and create separate parameters from the already flattened
self.flat_param or a user supplied external data.
"""
assert self.is_flattened or external_data is not None
self.is_flattened = False
ps = self.get_param_views(external_data)
for (_, m, n), p in zip(self._param_infos, ps):
if hasattr(m, n):
delattr(m, n)
m.register_parameter(n, nn.Parameter(p))
for (_, _, m, n, shared_m, shared_n) in self._shared_param_infos:
if hasattr(m, n):
delattr(m, n)
m.register_parameter(n, getattr(shared_m, shared_n))
# Delete the param views into the flat params since we will delete the
# flat params next
if hasattr(self._fpw_module, "_unflattened_param_views"):
delattr(self._fpw_module, "_unflattened_param_views")
for n in self.flat_param_names:
# This ensures the flat params are removed from the module.
delattr(self, n)
self.flat_params = []
def _unflatten_params_as_views(self) -> None:
"""Unlike ``_unflatten_params``, this function unflatten into views and keep
self.flat_param unchanged.
"""
assert self.is_flattened
ps = self.get_param_views()
param_views = []
for (_, m, n), p in zip(self._param_infos, ps):
setattr(m, n, p) # This will set as plain attr
param_views.append(p)
# Save param views for easy access if anyone still wants to access
# parameters of the module.
setattr(self._fpw_module, "_unflattened_param_views", param_views)
for (_, _, m, n, shared_m, shared_n) in self._shared_param_infos:
setattr(m, n, getattr(shared_m, shared_n))
@contextmanager
def unflatten_params(self, flat_params: Optional[List[Tensor]] = None) -> Generator:
"""
Unflatten params. If the current instance is already unflattened, then
it will remain unflattened after the context manager exits.
Args:
flat_params (List[Tensor], Optional):
flat params to use for unflattening.
If provided, the current instance must be in a flattened state
at the start of the context manager. The provided Tensor must be
appropriately sized and will only be used within the context
manager. After the context manager exits, we will revert to
using ``self.flat_params``
Default: None.
"""
assert (
flat_params is None or self.is_flattened
), "Unflattening with external flat_param requires current instance to be flattened"
orig_flattened = self.is_flattened
if orig_flattened:
orig_flat_params = self.flat_params
self._unflatten_params(cast(Optional[List[Optional[Tensor]]], flat_params))
# Put yield in a try...finally in case the caller catches the exception and handles
# it. In that case, we need to properly handle the undoing of state here.
try:
yield
finally:
if orig_flattened:
self._flatten_params(orig_flat_params)
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.module, name) # fallback to wrapped module
def __getitem__(self, key: int) -> Any:
"""Forward indexing calls in case the module is a nn.Sequential."""
return self.module.__getitem__(key)
@typing.overload
def state_dict(
self, destination: Mapping[str, Tensor], prefix: str = ..., keep_vars: bool = ...
) -> Mapping[str, Tensor]:
...
@typing.overload
def state_dict(self, prefix: str = ..., keep_vars: bool = ...) -> "OrderedDict[str, Tensor]":
...
# Since we have overloads above, we can use Any here.
def state_dict(self, *args: Any, **kwargs: Any) -> Any:
"""Return the wrapped module's state_dict."""
if self.is_flattened and self._auto_unflatten_state_dict:
# Returns the original version.
with self.unflatten_params():
return super().state_dict(*args, **kwargs)
else:
# Returns flattened version.
return super().state_dict(*args, **kwargs)
def flat_state_dict(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
"""Return the flattened state_dict."""
assert self.is_flattened
with self._no_auto_unflatten_state_dict():
return self.state_dict(*args, **kwargs)
@contextmanager
def _no_auto_unflatten_state_dict(self) -> Generator:
backup = self._auto_unflatten_state_dict
self._auto_unflatten_state_dict = False
# Put yield in a try...finally in case the caller catches the exception and handles
# it. In that case, we need to properly handle the undoing of state.
try:
yield
finally:
self._auto_unflatten_state_dict = backup
def load_state_dict(
self, state_dict: Union[Dict[str, Tensor], "OrderedDict[str, Tensor]"], strict: bool = True
) -> NamedTuple:
"""
Load a state dict. If necessary, ``unflatten_params`` will be called to
match the input state_dict.
"""
# Unflatten the module automatically if the state_dict is non-flat.
# Note, we check the flat_param_ prefix since custom names can be given and flat_param_0 is
# not always in the state dict's key list.
if (
self.num_params_managed > 0
and self.is_flattened
and not any(k.startswith("flat_param_") for k in state_dict.keys())
):
# This object is flatten but state_dict is not. So we unflatten and load.
with self.unflatten_params():
return super().load_state_dict(state_dict, strict)
else:
# Otherwise, load it as is but make older state dict compatible.
if "flat_param" in state_dict:
state_dict["flat_param_0"] = state_dict["flat_param"]
del state_dict["flat_param"]
return super().load_state_dict(state_dict, strict)
def forward(self, *inputs: Any, **kwinputs: Any) -> Any:
self._unflatten_params_as_views()
return self.module(*inputs, **kwinputs)
def get_param_views(self, external_data_list: Optional[List[Optional[Tensor]]] = None) -> Iterator[Tensor]:
"""Used to get a generator over all views from a list of external data list."""
params = self.flat_params
if external_data_list is None:
external_data_list = [None] * len(params)
assert len(external_data_list) == len(
params
), f"Incorrect external data list: {len(external_data_list)} vs. {len(params)}"
gens = []
for p, data in zip(params, external_data_list):
gens.append(p.get_param_views(data)) # type: ignore
return chain(*gens)
def metadata(self, flat_param_idx: int) -> Tuple[List[str], Sequence[torch.Size], List[int]]:
"""Return metadata for a flat param given its index in the flat_params list."""
return self.flat_params[flat_param_idx].metadata() # type: ignore
def _post_state_dict_hook(
module: nn.Module, state_dict: "OrderedDict[str, Tensor]", prefix: str, *args: Any
) -> "OrderedDict[str, Tensor]":
# Move everything from .fpw_module up one level.
replace_by_prefix_(state_dict, prefix + "_fpw_module.", prefix)
return state_dict
def _pre_load_state_dict_hook(
state_dict: Union[Dict[str, Tensor], "OrderedDict[str, Tensor]"], prefix: str, *args: Any
) -> None:
if not _enable_pre_load_state_dict_hook:
return
# Push everything down to ._fpw_module level.
replace_by_prefix_(state_dict, prefix, prefix + "_fpw_module.")
# The flat_param_* keys actually needs to move one level up.
flat_param_key = prefix + "_fpw_module.flat_param"
for k in list(state_dict.keys()):
if k.startswith(flat_param_key):
last_part = k.split(".")[-1]
assert last_part.startswith("flat_param_"), last_part
replace_by_prefix_(state_dict, k, prefix + last_part)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
# TODO(anj-s): Remove this once we have deprecated fairscale.nn.misc.checkpoint_wrapper path
# in favor of fairscale.nn.checkpoint.checkpoint_wrapper.
from fairscale.nn.checkpoint import checkpoint_wrapper
from .flatten_params_wrapper import FlattenParamsWrapper, _enable_pre_load_state_dict_hook
from .param_bucket import GradBucket, ParamBucket
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, List, Optional, Union
import torch
class Bucket:
"""
Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors
"""
def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:
self._params: List[torch.Tensor] = []
self._param_ids: List[int] = []
self._fill = 0
# The actual flat tensor
self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device)
def to( # type: ignore
self,
device: Optional[Union[int, torch.device]],
dtype: Optional[torch.dtype] = None,
non_blocking: bool = False,
keep_param_alignment: bool = True,
) -> "ParamBucket":
"""
Move the underlying buffer
"""
assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it"
self.buffer = self.buffer.to(device, dtype, non_blocking)
class ParamBucket(Bucket):
"""
Helper class to simplify the handling of parameter buckets
"""
def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:
super().__init__(size, dtype, device)
def to( # type: ignore
self,
device: Optional[Union[int, torch.device]],
dtype: Optional[torch.dtype] = None,
non_blocking: bool = False,
keep_param_alignment: bool = True,
) -> "ParamBucket":
"""
Move the underlying buffer
"""
super().to(device, dtype, non_blocking)
if keep_param_alignment:
self._reattach_params()
@torch.no_grad()
def add_param(self, param: torch.Tensor) -> None:
"""
Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer
"""
assert id(param) not in self._param_ids, "The same param cannot be checked in twice"
self._add_param_as_view(param)
self._params.append(param)
self._param_ids.append(id(param))
@torch.no_grad()
def _add_param_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:
assert self.buffer is not None
assert (
param.dtype == self.buffer.dtype
), f"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}"
assert (
param.device == self.buffer.device
), f"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}"
fill_next = self._fill + param.numel()
assert fill_next <= self.buffer.numel()
# Copy the current param value
if keep_existing_value:
self.buffer[self._fill : fill_next].copy_(param.data.flatten())
param.data = self.buffer[self._fill : fill_next].view_as(param.data)
self._fill = fill_next
@torch.no_grad()
def _reattach_params(self) -> None:
"""
Given the parameters which have been registered previously, rebuild the whole bucket
"""
assert len(self._params) > 0
self._fill = 0
for p in self._params:
if p.dtype != self.buffer.dtype:
p.data = p.data.to(self.buffer.dtype)
self._add_param_as_view(p, keep_existing_value=False)
class GradBucket(Bucket):
"""
Helper class to simplify the handling of gradient buckets
"""
def __init__(self, size: int, dtype: torch.dtype, device: torch.device, destination: int) -> None:
super().__init__(size, dtype, device)
self._max_size = size
self._is_collapsed = False
self.params_checked_in = 0
self.destination = destination
self.sent = True
self.callback: Optional[Callable[[Any], None]] = None
def reset_checked_in(self) -> None:
"""Reset the counter of the parameter grads which have been checked in"""
self.params_checked_in = 0
self.sent = False
@property
def all_checked_in(self) -> bool:
"""Have all the expected gradient check-in happened ?"""
return len(self._params) == self.params_checked_in
def can_add_grad_view(self, param: torch.Tensor) -> bool:
"""Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ?"""
return self._fill + param.numel() < self._max_size and id(param) not in self._param_ids
def to( # type: ignore
self,
device: Optional[Union[int, torch.device]],
dtype: Optional[torch.dtype] = None,
non_blocking: bool = False,
keep_param_alignment: bool = True,
) -> "GradBucket":
"""
Move the underlying buffer
"""
if self._is_collapsed:
self.rebuild()
super().to(device, dtype, non_blocking)
if keep_param_alignment:
self._reattach_grads()
def zero(self) -> None:
"""
Set all the grads to zero
"""
self.buffer.fill_(0.0)
@torch.no_grad()
def add_grad(self, param: torch.Tensor) -> None:
"""
Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer
"""
assert id(param) not in self._param_ids, "The same gradients cannot be checked in twice"
if param.grad is None:
param.grad = torch.zeros_like(param)
self._add_grad_as_view(param)
self._params.append(param)
self._param_ids.append(id(param))
@torch.no_grad()
def collapse(self) -> None:
"""
Release the buffer from memory. The bucket will need to be rebuilt before use
"""
if not self._is_collapsed:
for p in self._params:
assert p.grad is not None
p.grad.detach_()
p.grad = None
self.buffer = torch.zeros(0, dtype=self.buffer.dtype, device=self.buffer.device)
self._fill = 0
self.params_checked_in = 0
self._is_collapsed = True
@torch.no_grad()
def rebuild(self) -> None:
"""
Given the parameter gradients which have been registered previously, rebuild the whole bucket
"""
assert len(self._params) > 0
if self._is_collapsed:
self.buffer = torch.zeros(self._max_size, dtype=self._params[0].dtype, device=self._params[0].device)
for p in self._params:
self._add_grad_as_view(p)
self._is_collapsed = False
@torch.no_grad()
def shrink(self) -> None:
"""
Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory
"""
assert self.buffer.numel() > 0, "Cannot shrink a collapsed bucket, please rebuild"
self.buffer = self.buffer.resize_(self._fill).clone()
self._fill = 0
for p in self._params:
self._add_grad_as_view(p)
self._max_size = self._fill
@torch.no_grad()
def _reattach_grads(self) -> None:
"""
Given the parameters gradients which have been registered previously, rebuild the whole bucket
"""
assert len(self._params) > 0
self._fill = 0
for p in self._params:
self._add_grad_as_view(p, keep_existing_value=False)
@torch.no_grad()
def _add_grad_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:
assert self.buffer.numel() > 0, "Cannot add a gradient to a collapsed bucket, please rebuild"
assert param.dtype == self.buffer.dtype
assert param.device == self.buffer.device
fill_next = self._fill + param.numel()
assert fill_next <= self.buffer.numel()
# Copy the current grad value, if any
if param.grad is not None:
# keep param.grad in place
if keep_existing_value:
self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten())
param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data)
else:
param.grad = self.buffer[self._fill : fill_next].view_as(param.data)
self._fill = fill_next
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
from enum import Enum, auto
import functools
import logging
from math import inf
import os
import time
import traceback
import typing
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Set,
Tuple,
Union,
cast,
)
import torch
from torch.autograd import Variable
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from fairscale.internal.containers import apply_to_tensors
from fairscale.internal.parallel import (
ProcessGroupName,
chunk_and_pad,
enable_pytorch_sync_bn,
get_process_group_cached,
validate_process_group,
)
from fairscale.internal.params import calc_grad_norm, recursive_copy_to_device
from fairscale.internal.reduce_scatter_bucketer import ReduceScatterBucketer
from fairscale.internal.state_dict import replace_by_prefix_
from fairscale.nn.misc import FlattenParamsWrapper, _enable_pre_load_state_dict_hook
from fairscale.nn.wrap import auto_wrap, config_auto_wrap_policy, enable_wrap
from . import fsdp_optim_utils as ou
if TYPE_CHECKING:
from collections import OrderedDict # noqa: F401
# See #1057. On some platform, torch.distributed may not have ProcessGroup
# So we only import it during type checking, which is not done on default
# import and only done by developer (doing it on supported platforms I presume).
from torch.distributed import ProcessGroup
# TODO: Remove the toggle here when github open issue #801 is resolved.
if os.getenv("ENABLE_NCCL_BASE_COLLECTIVES", "1") == "0":
enable_nccl_base_collectives = False
else:
enable_nccl_base_collectives = True
class TrainingState(Enum):
"""
Simple enum to indicate what state FSDP is in. Used for asserting
to make sure APIs are called in the correct state.
..note::
BACKWARD_PRE and BACKWARD_POST states are used to ensure we
receives backward hooks in the correct order. It is used to catch
unexpected order of hooks being called (likely due to our
hook registration logic or autograd engine logic changes).
TODO (Min): It would be nice to capture the stepping state as well.
Maybe we can use the model.zero_grad() call, but not sure if it
is called if optim.zero_grad() is used instead.
It would be nice to have clear state transition be explicit like:
zero_grad -> fwd -> bwd -> optionally accum grad by repeating
fwd/bwd -> stepping -> loop back to zero_grad
"""
IDLE = auto()
FORWARD = auto()
BACKWARD_PRE = auto()
BACKWARD_POST = auto()
SUMMON_FULL_PARAMS = auto()
class FullyShardedDataParallel(nn.Module):
"""
A wrapper for sharding Module parameters across data parallel workers. This
is inspired by `Xu et al.`_ as well as the ZeRO Stage 3 from DeepSpeed_.
FullyShardedDataParallel is commonly shorten to FSDP.
.. _`Xu et al.`: https://arxiv.org/abs/2004.13336
.. _DeepSpeed: https://www.deepspeed.ai/
Pseudo-code usage::
import torch
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
torch.cuda.set_device(device_id)
sharded_module = FSDP(my_module)
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
x = sharded_module(x, y=3, z=torch.Tensor([1]))
loss = x.sum()
loss.backward()
optim.step()
It is also possible to shard individual layers separately and have an outer
wrapper handle any leftover parameters. This can be helpful to further
reduce GPU memory usage, reduce system memory usage when initializing large
models and to improve training speed by overlapping the all-gather step
across the forward pass. For example::
import torch
from fairscale.nn.wrap import wrap, enable_wrap, auto_wrap
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.utils.testing import dist_init, teardown, rmf
result = dist_init(0, 1, "/tmp/t1", "/tmp/t2")
assert result
fsdp_params = dict(wrapper_cls=FSDP, mixed_precision=True, flatten_parameters=True)
with enable_wrap(**fsdp_params):
l1 = wrap(torch.nn.Linear(5, 5))
assert isinstance(l1, FSDP)
# Wraps layer in FSDP by default if within context
# Separately Wraps children modules with more than 1e8 params
large_tfmr = torch.nn.Transformer(d_model=2048, num_encoder_layers=12,
num_decoder_layers=12)
l2 = auto_wrap(large_tfmr)
assert isinstance(l2.encoder, FSDP)
assert isinstance(l2.decoder, FSDP)
print(l2) # You can print the model to examine FSDP wrapping.
teardown()
rmf("/tmp/t1")
rmf("/tmp/t2")
.. warning::
The optimizer must be initialized *after* the module has been wrapped,
since FSDP will shard parameters in-place and this will break any
previously initialized optimizers.
.. warning::
If you wrap every parameter inside a nested FSDP and leaving the outer
FSDP empty without any parameter, checkpointing activation may trigger
an assert on the backward pass. The solution is to leave some parameters
to the outer FSDP.
.. warning::
If activation checkpointing is used with FSDP, it is strongly encouraged
to use ``checkpoint_wrapper`` function from FairScale instead of the
``checkpoint`` function from PyTorch.
Args:
module (nn.Module):
module to be wrapped with FSDP.
process_group (Optional):
process group for sharding
process_group_reduce_scatter (Optional):
process group for reduce scatter
it defaults to ProcessGroupName.reduce_scatter. A seperate process group is initialized and assigned to the reduce_scatter operation. And the
reduce_scatter operation overlaps with other operations in the backward propagation
If it is a specific ProcessGroup, the reduce_scatter operates on this ProcessGroup, and the overlap still happens.
To disable the overlap feature, set the process group to ProcessGroupName.default. In this case, the reduce_scatter
operation uses the same process group with the default group.
If reduce scatter process group size is differnt with the default process group size, the reduce_scatter
operation rolls back to use the same process group with the default process group.
reshard_after_forward (bool, Optional):
if ``True``, reshard parameters after the forward pass. This saves
memory but slows training. This is only relevant when resharding
individual layers.
disable_reshard_on_root (bool, Optional):
If ``True``, ``reshard_after_forward`` will be set to ``False`` if the module is a
FSDP root module to improve performance. For some cases, we do not reshard the full
parameters of an FSDP root module since those parameters are needed immediately for the
backward pass.
If ``False``, the performance will be lower, but it is needed because it helps to
save memory. Consider a case that an FSDP root module is a submodule of a model.
Backward pass may not start immediate after the FSDP root module finishes its forward.
So, reshard the parameters for the FSDP root modules can help to save memory in this case.
In certain cases, the performance is not even slower, because the cached full param
state may be stale due to load_local_state_dict() calls anyway.
Default: True.
mixed_precision (bool, Optional):
if ``True``, inputs, activations and gradients will be kept in FP16;
computation and communication will occur in FP16; and a (sharded)
master copy of the model weights will be maintained in FP32.
fp32_reduce_scatter (bool, Optional):
if ``True``, then reduce-scatter gradients in FP32. This is only
relevant when *``mixed_precision``* is ``True``.
flatten_parameters (bool, Optional):
if ``True``, flatten parameters into a single contiguous tensor,
which improves training speed.
move_params_to_cpu (bool, Optional):
if ``True``, offload params to CPU.
Default: False
compute_dtype (torch.dtype, Optional):
dtype for full parameters for computation. This defaults to
``torch.float32`` unless *``mixed_precision``* is set, in which case
it defaults to ``torch.float16``.
buffer_dtype (torch.dtype, Optional):
dtype for buffers for computation. This defaults to ``compute_dtype``.
move_grads_to_cpu (bool, Optional):
move gradient shard to CPU after reduction. This is useful when
combined with CPU-based optimizers. It defaults to the value of
*``move_params_to_cpu``*.
bucket_cap_mb (int, Optional):
FSDP will bucket parameters so that gradient reduction can
be more efficient for small parameters.
``bucket_cap_mb`` controls the bucket size in MegaBytes (MB). Buckets
are sub-divided based on world_size, so the max shard size is roughly
``bucket_cap_mb / world_size``. There is one bucketer (with potentially
multiple ``bucket_cap_mb`` sized buffers shared by all FSDP instances.
Large gradient tensors are directly reduced without using the buffers.
The buffers are there to reduce communication overhead for small tensors.
Overlapping with computation happens due to use of a different CUDA stream
than the computation CUDA stream. The total memory overhead per buffer is around
``bucket_cap_mb / world_size * (world_size + 1)``.
The buffers are allocated during the backward pass and freed at the end
of the backward pass to save more memory for other phases of the
training process.
Note, the memory vs. speed tradeoff of bucket size is very different
from that of the DDP engine. In DDP, the buffer size ``1MB + n*cap_mb``,
until n is big enough to cover the entire model size. The order
of which buffer is ready there is more rigid and DDP requires all
gradients to be computed in the backward. In FSDP, the buffer size
does not change with model size (it changes based on number of
<dtype, device, process_group> tuples) and gradient ready order matters
little since FSDP has a final flush call that ensures everything is reduced
and not all gradients need to be upfront known. Overlapping with compute is
done differently too.
Values <= 0 disable bucketing.
Default: 25.
compute_device (torch.device, Optional):
device for computation. If not given and module params are on a CUDA
device, the param's device will be used. If not given and module
params are on CPU, then the current CUDA device (as indicated by
``torch.cuda.current_device()`` will be used.
no_broadcast_optim_state: (bool, Optional)
do not broadcast this modules optimizer state when ``gather_full_optim_state_dict`` is called.
If you set this true, you are expected to overwrite the relevant state entries of the returned optimizer state dict
with the proper state at each rank. This is useful for situations, like Mixture Of Experts,
where all but a few parameters can fit on one node.
Default: False
state_dict_device (torch.device, Optional):
device for parameters returned by :func:`state_dict`. If not given,
this will default to ``compute_device``. Note that only the device
type will be respected (e.g., "cuda:0" and "cuda:1" are the same).
clear_autocast_cache (bool):
When using mixed precision training with `torch.amp.autocast`, if the model weights
are in FP32, autocast maintains a cache for downcasted weights. The cache can cause
GPU OOM during the forward pass. Setting this flag to true will help clearing this
cache as inner FSDP instances finish part of the forward pass to save GPU memory.
Default: False
force_input_to_fp32 (bool):
Set to ``True`` to force input floating point tensors to be FP32 (if they are FP16)
when the FSDP instance is in full precision mode. This helps avoid issues of running
SyncBatchNorm with AMP and checkpoint_wrapper.
Default: False
verbose (bool):
Set this to ``True`` to turn on verbose output for model's string representation.
Default: False
cpu_offload (bool, Optional):
if ``True``, offload params to CPU. Note: This arg will be deprecated in favor of
*``move_params_to_cpu``* in an upcoming release.
state_dict_on_rank_0_only (bool):
When set to ``True``, ``model.state_dict()`` will only returns full state dict on
rank 0 and return empty dict non-rank 0, which allow FullyShardedDataParallel to
skip the GPU -> CPU copy on non-rank 0 altogether and prevent OOM.
Default: False
gradient_predivide_factor (float, optional):
If supplied, pre-divide the gradients before scatter-reduce.
Default: None
allow_reset_parameters (bool):
If True, allow ``reset_parameters`` API to be proxied to the wrapped module.
Default: False
"""
def __init__(
self,
module: nn.Module,
process_group: Optional["ProcessGroup"] = None,
# The type for the process_group_reduce_scatter only can be either ProcessGroup or ProcessGroupName
process_group_reduce_scatter: Any = ProcessGroupName.reduce_scatter,
reshard_after_forward: bool = True,
disable_reshard_on_root: bool = True,
mixed_precision: bool = False,
fp32_reduce_scatter: bool = False,
flatten_parameters: bool = True,
move_params_to_cpu: bool = False,
compute_dtype: Optional[torch.dtype] = None,
buffer_dtype: Optional[torch.dtype] = None,
move_grads_to_cpu: Optional[bool] = None,
bucket_cap_mb: int = 25,
compute_device: Optional[torch.device] = None,
no_broadcast_optim_state: Optional[bool] = False,
state_dict_device: Optional[torch.device] = None,
clear_autocast_cache: bool = False,
force_input_to_fp32: bool = False,
verbose: bool = False,
cpu_offload: bool = False,
state_dict_on_rank_0_only: bool = False,
gradient_predivide_factor: Optional[float] = None,
allow_reset_parameters: bool = False,
):
init_start = time.time()
super().__init__()
self.process_group = process_group or get_process_group_cached()
# If ProcessGroupName.default is passed in, the reduce_scatter will use the same process group with
# the rest of operations. The overlap feature in the backward propagation is disabled.
if process_group_reduce_scatter == ProcessGroupName.default:
self.process_group_reduce_scatter = self.process_group
# If ProcessGroupName.reduce_scatter is passed in, the reduce_scatter use a seperate process group
# so that the overlap feature in the backward propagagion is enabled.
elif process_group_reduce_scatter == ProcessGroupName.reduce_scatter:
self.process_group_reduce_scatter = get_process_group_cached(ProcessGroupName.reduce_scatter)
else:
# If a specific process group is passed in, the reduce_scatter will use the passed in process group.
# Delay the import here since this type may not be available on certain platforms.
from torch.distributed import ProcessGroup
if isinstance(process_group_reduce_scatter, ProcessGroup):
self.process_group_reduce_scatter = process_group_reduce_scatter
else:
if not hasattr(process_group_reduce_scatter, "allgather") and hasattr(
process_group_reduce_scatter, "rank"
):
# Likely a dummy pg for unit test
self.process_group_reduce_scatter = process_group_reduce_scatter
else:
raise TypeError("unsupported type for reduce_scatter process group")
self.rank = self.process_group.rank()
self.world_size = self.process_group.size()
# In a unit test dummy enviromnent, the process_group_reduce_scatter can be None.
if self.process_group_reduce_scatter is not None:
reduce_scatter_group_size = self.process_group_reduce_scatter.size()
# Roll back to use the default process group for reduce scatter operation when
# the world size and reduce scatter process group size are differnt.
if self.world_size != reduce_scatter_group_size:
self.process_group_reduce_scatter = self.process_group
logging.warning(
"Rolled back to use the default process group for the reduce scatter "
"operation because the reduce_scatter process group "
f"size is {reduce_scatter_group_size}, which is different with the "
f"world size {self.world_size}. Please make sure the process_group "
"parameter uses all the available ranks for the optimal performance."
)
self.reshard_after_forward = self._orig_reshard_after_forward = reshard_after_forward
self.disable_reshard_on_root = disable_reshard_on_root
self.mixed_precision = mixed_precision
self.fp32_reduce_scatter = fp32_reduce_scatter
self.flatten_parameters = flatten_parameters
self.move_params_to_cpu = move_params_to_cpu or cpu_offload
self.compute_dtype = compute_dtype or (torch.float16 if mixed_precision else torch.float32)
self.buffer_dtype = buffer_dtype or self.compute_dtype
self.move_grads_to_cpu = self.move_params_to_cpu if move_grads_to_cpu is None else move_grads_to_cpu
self.bucket_cap_mb = bucket_cap_mb
self.compute_device = compute_device or _get_default_cuda_device(module)
self.uncollected_opt_state: Dict[int, Dict] = {}
self.no_broadcast_optim_state = no_broadcast_optim_state
self.state_dict_device = state_dict_device or self.compute_device
self.clear_autocast_cache = clear_autocast_cache
self.force_input_to_fp32 = force_input_to_fp32
self.verbose = verbose
self.state_dict_on_rank_0_only = state_dict_on_rank_0_only
self.gradient_predivide_factor: float = gradient_predivide_factor or self._get_gradient_predivide_factor(
self.world_size
)
self.gradient_postdivide_factor: float = self.world_size / self.gradient_predivide_factor
self.allow_reset_parameters = allow_reset_parameters
self.numel_padded_per_param: List[int] = []
self._tstart = time.time()
if self.fp32_reduce_scatter and not self.mixed_precision:
raise ValueError("fp32_reduce_scatter requires mixed_precision=True")
# skip validation if the process group was created above
if process_group:
validate_process_group(self.compute_device, self.process_group)
# enable pytorch sync_bn just in case model contains sync_bn layers.
enable_pytorch_sync_bn(module)
# Only handle params which are not already sharded. This enables
# sharding individual layers of a Module, with an outer wrapper to
# shard any leftover parameters.
param_names = []
params = []
for param_name, param in module.named_parameters():
if not hasattr(param, "_is_sharded"):
param_names.append(param_name)
params.append(param)
self._has_params = len(params) > 0
self._has_shared_params = False
self.buffer_size = sum(p.numel() for p in params)
# For now, it is either all flatten or none flatten. This will be extended to
# multiple flatten groups in my next PR.
to_be_flatten_params: List[List[Parameter]] = [[]]
non_flatten_params = params
param_name_groups = [[n] for n in param_names]
if self.flatten_parameters:
to_be_flatten_params = [params]
non_flatten_params = []
param_name_groups = [param_names]
del param_names
self._fsdp_wrapped_module: nn.Module = FlattenParamsWrapper(module, param_list=to_be_flatten_params)
del module # free original module in case it helps garbage collection
# Now, in this FSDP wrapper class, we keep a list of to-be-flatten and not-to-be-flatten
# params for doing sharding, gradient hooks, etc. Note, the ordering of the
# list matters: flatten params are always in the front.
#
# The self._num_flatten_params and self._param_name_groups are computed
# and kept here to support summon_full_params and shard-to-full weight
# consolidation.
self.params = cast(List[Parameter], self._fsdp_wrapped_module.flat_params) + non_flatten_params
self._num_flatten_params = len(self._fsdp_wrapped_module.flat_params)
self._param_name_groups = param_name_groups
# Check to see if the mixed precision setting is correct.
if self.compute_dtype is torch.float16 and self.mixed_precision is False:
for p in self.params:
if p.dtype is not torch.float16:
raise ValueError("Expecting FP16 param type in pure FP16 mode.")
else:
for p in self.params:
if p.dtype is not torch.float32:
raise ValueError("Expecting FP16 param type in FP32 & MP modes.")
# Shard module parameters in place
self._shard_parameters_()
# Make sure all parameters are sharded.
for n, p in self.named_parameters():
assert hasattr(p, "_is_sharded"), f"found unsharded parameter: {n} ; {p.size()}"
self._reset_lazy_init()
# Flag to indicate if we require gradient reduction in the backward
# pass. This will be False when inside the no_sync context manager.
self._require_backward_grad_sync: bool = True
# Enum to indicate if we're in the forward/backward pass, idle, etc.
self.training_state = TrainingState.IDLE
# Flag to indicate if the full params are gathered.
self.has_full_params: bool = False
# Register hook after state_dict() to remove the "_fsdp_wrapped_module."
# prefix and before load_state_dict() to add it back.
self._register_state_dict_hook(functools.partial(_post_state_dict_hook, self.state_dict_on_rank_0_only))
self._register_load_state_dict_pre_hook(_pre_load_state_dict_hook)
# Flag to indicate whether state_dict() should automatically summon the
# full params. This defaults to True, but may be set to False if the
# user explicitly requests the local state dict via local_state_dict().
self._return_full_state_dict = True
init_end = time.time()
logging.debug(
f"FSDP.__init__(done): total_init_time: {(init_end - init_start): .4f} num_params: {(sum(p.numel() for p in self.params))}"
)
# Flag to guard against preparing gradients multiple times per iteration.
# This is reset at the end of the backward pass.
self._pre_backward_hook_has_run = False
def _get_gradient_predivide_factor(self, world_size: int) -> float:
factor: int = 1
while world_size % factor == 0 and world_size / factor > factor:
factor *= 2
return float(factor)
def set_gradient_divide_factors(self, pre: float, post: float, recursive: bool) -> None:
"""Allowing user to override the pre and post divide factors.
Args:
pre (float): divide factor before the reduction.
post (float): divide factor after the reduction.
recursive (bool): recursively set it for all child FSDP instances or not.
"""
self.assert_state(TrainingState.IDLE)
if recursive:
for module in self.modules():
if isinstance(module, FullyShardedDataParallel) and module != self:
module.set_gradient_divide_factors(pre, post, False)
self.gradient_predivide_factor = pre
self.gradient_postdivide_factor = post
@property
def module(self) -> FlattenParamsWrapper:
"""make model.module accessible, just like DDP."""
assert isinstance(self._fsdp_wrapped_module, FlattenParamsWrapper)
return self._fsdp_wrapped_module
def append_shared_param(self, p: Parameter) -> None:
"""Add a param that's already owned by another FSDP wrapper.
.. warning:: This is experimental!
This only works with all sharing FSDP modules are un-flattened.
p must to be already sharded by the owning module.
Check the corresponding unit tests to see how is it used and tested.
In particular, the sharing FSDP wrappers are "siblings" not "parent"
and "child" of each other in the nested module structure.
Args:
p (Parameter):
The shared parameter.
"""
assert self._is_root is None
assert not self.flatten_parameters
assert isinstance(p, Parameter)
assert p._is_sharded
p._is_shared = True
assert (
len(list(filter(lambda p: not (hasattr(p, "_is_shared") and p._is_shared), self.params))) > 0
), "Must have at least 1 non-shared param."
self.params.append(p)
self._has_shared_params = True
def non_shared_params(self) -> List[nn.Parameter]:
"""Return the list of non-shared parameters."""
if self._has_shared_params:
return list(filter(lambda p: not (hasattr(p, "_is_shared") and p._is_shared), self.params))
else:
return self.params
def apply(self, fn: Callable[[nn.Module], None]) -> "FullyShardedDataParallel":
"""
Applies ``fn`` recursively to every submodule (as returned by
``.children()``) as well as self. Typical use includes initializing the
parameters of a model.
Compared to ``torch.nn.Module.apply``, this version additionally gathers
the full parameters before applying ``fn``. It should not be called from
within another ``summon_full_params`` context.
Args:
fn (nn.Module): function to be applied to each submodule
Returns:
Module: self
"""
is_uninitialized = self._is_root is None
self.assert_state(TrainingState.IDLE)
with self.summon_full_params(recurse=False):
return_value = super().apply(fn)
# summon_full_params will call _lazy_init, which sets _is_root. However,
# apply() may be called directly on children instances to do weight
# init, so we should reset the _is_root flag in this case.
if is_uninitialized and self._is_root:
for module in self.modules():
if isinstance(module, FullyShardedDataParallel):
module._reset_lazy_init()
return return_value
def _cast_buffers(
self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, memo: Optional[Set] = None
) -> None:
"""Move all buffers to the given *device* and *dtype*.
If *device* or *dtype* are not given, then they will default to
``self.compute_device`` and ``self.buffer_dtype``, respectively. In the
case of nested FSDP instances, we will respect the child instance's
``compute_device`` and ``buffer_dtype`` configuration.
Args:
device (torch.device, Optional):
device to cast buffers to (defaults to compute_device)
dtype (torch.dtype, Optional):
dtype to cast buffers to (defaults to buffer_dtype)
memo (Set, Optional):
set of modules that have already been processed
"""
if memo is None:
memo = set()
for module in self.modules():
if module is not self and isinstance(module, FullyShardedDataParallel):
# Allow any child FSDP instances to handle their own buffers.
module._cast_buffers(device=device, dtype=dtype, memo=memo)
elif module not in memo:
memo.add(module)
for name, buf in module.named_buffers(recurse=False):
if buf is None:
continue
buf = buf.to(device=device or self.compute_device)
if torch.is_floating_point(buf):
buf = buf.to(dtype=dtype or self.buffer_dtype)
setattr(module, name, buf)
@property
def params_with_grad(self) -> List[Parameter]:
"""[p for p in self.parameters() if p.grad is not None]"""
return [p for p in self.parameters() if p.grad is not None]
@torch.no_grad()
def clip_grad_norm_(
self,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
# filter_params_fn: Callable[[Any], Any] = None,
) -> torch.Tensor:
"""
Clip all gradients at this point in time. The norm is computed over all
gradients together, as if they were concatenated into a single vector.
Gradients are modified in-place.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'``
for infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
.. note:: This is analogous to `torch.nn.utils.clip_grad_norm_` but
handles the partitioning and multiple devices per rank under the
hood. The default torch util is not applicable here, because each
rank only has a partial view of all the grads in the model, so
calling it in the OSS context would lead to different scaling being
applied per subset of model parameters.
.. warning:: This needs to be called on all ranks, since synchronization
primitives will be used.
"""
# We don't call torch.cuda.synchronize() here, since clipping can be
# inside the train loop and we probably don't want to force a GPU-CPU sync.
# _lazy_init should be sufficient, since it will force the other streams
# to sync with the default stream (via _wait_for_previous_optim_step).
self._lazy_init()
assert self._is_root, "clip_grad_norm should only be called on the root (parent) instance"
self.assert_state(TrainingState.IDLE)
max_norm = float(max_norm)
norm_type = float(norm_type)
params_with_grad = self.params_with_grad
if not self.children_share_process_group:
raise NotImplementedError(
"clip_grad_norm requires that all params share one process group. clip_grad_by_value_ should work"
)
# Computes the max norm for this shard's gradients and sync's across workers
local_norm = calc_grad_norm(params_with_grad, norm_type).cuda()
if norm_type == inf:
total_norm = local_norm
dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)
else:
total_norm = local_norm**norm_type
dist.all_reduce(total_norm, group=self.process_group)
total_norm = total_norm ** (1.0 / norm_type)
if self.move_grads_to_cpu:
total_norm = total_norm.cpu()
# Now multiply each grad by (max_norm/total_norm), same as torch 1.7 https://tinyurl.com/3wtxhhqq)
clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)
if clip_coef < 1:
# multiply by clip_coef
for p in params_with_grad:
assert p.grad is not None
p.grad.detach().mul_(clip_coef.to(p.grad.device))
return total_norm
@torch.no_grad()
def _shard_parameters_(self) -> None:
"""
At initialization we wrap a module with full parameters and shard the
parameters in-place. Sharding is implemented by viewing each parameter
as a 1D Tensor and retaining only a single slice, where the slice size
is determined by the number of data parallel workers.
Wrapping modules with many small parameters (or with a very large data
parallel world size) will result in many small parameter shards and slow
performance. In this case it's better to set *``flatten_parameters``* to
``True``, so that all of the small parameters in the module are combined
into a single contiguous Tensor and sharded once.
After this initial sharding is complete, the user can initialize a
``torch.optim.Optimizer`` in the usual way, i.e.::
.. code-block:: python
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
The optimizer will see only a single slice of parameters and will thus
allocate less memory for optimizer state, avoiding redundancy across
data parallel workers.
"""
self.numel_padded_per_param = []
for p in self.params:
assert not hasattr(p, "_is_sharded")
assert p.is_floating_point()
if self.mixed_precision:
assert p.dtype == torch.float32
# If world_size is 1, then we all-reduce grads instead of sharding.
p._is_sharded = self.world_size > 1
p._orig_size = p.data.size()
if not p._is_sharded:
p._is_sharded = False
self.numel_padded_per_param.append(0)
continue
p._is_sharded = True
# TODO (Min): broadcast from rank 0 to avoid each rank need to init with the same seed?
# Replace p.data with the relevant shard.
orig_data = p.data
p.data, num_padded = self._get_shard(p.data)
self.numel_padded_per_param.append(num_padded)
free_storage_(orig_data)
assert len(self.numel_padded_per_param) == len(self.params)
def _get_shard(self, tensor: torch.Tensor) -> Tuple[torch.Tensor, int]:
"""Return the local shard of a full tensor."""
# Shard using torch.chunk to match all-gather/reduce-scatter.
chunks = list(torch.flatten(tensor).chunk(self.world_size))
while len(chunks) < self.world_size:
chunks.append(chunks[0].new_empty(0))
# Determine number of padding elements.
num_to_pad = chunks[0].numel() - chunks[self.rank].numel()
assert num_to_pad >= 0, num_to_pad
shard = chunks[self.rank].clone()
if num_to_pad > 0:
shard = F.pad(shard, [0, num_to_pad])
return shard, num_to_pad
def extra_repr(self) -> str:
repr = (
f"world_size={self.world_size}, "
f"flatten_parameters={self.flatten_parameters}, "
f"mixed_precision={self.mixed_precision}, "
)
if self.verbose:
repr = (
f"self={id(self)} is_root={self._is_root}, "
f"rank={self.rank}, " + repr + f"reshard_after_forward={self.reshard_after_forward}, "
f"compute_dtype={self.compute_dtype}, "
f"buffer_dtype={self.buffer_dtype}, "
f"fp32_reduce_scatter={self.fp32_reduce_scatter}, "
f"compute_device={self.compute_device}, "
f"move_params_to_cpu={self.move_params_to_cpu}, "
f"move_grads_to_cpu={self.move_grads_to_cpu}, "
f"bucket_cap_mb={self.bucket_cap_mb}, "
f"clear_autocast_cache={self.clear_autocast_cache}, "
f"force_input_to_fp32={self.force_input_to_fp32}, "
)
return repr
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.module, name)
def __getstate__(self) -> Dict[str, str]:
"""Serialize the state of the current FSDP instance.
Some properties are not serializable (e.g., process groups, streams), so
we remove them and try to reconstruct them in :func:`__setstate__`.
"""
state = copy.copy(self.__dict__)
state["is_sharded"] = [p._is_sharded for p in self.params]
state["orig_sizes"] = [p._orig_size for p in self.params]
if state["process_group"] is not None:
state["process_group"] = "MISSING" # process_group isn't pickleable
if state["process_group_reduce_scatter"] is not None:
state["process_group_reduce_scatter"] = "MISSING" # process_group_reduce_scatter isn't pickleable
self._reset_lazy_init()
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
"""Intercept state setting and perform needed changes on params."""
super().__setstate__(state)
def fixup(p: Parameter, is_sharded: bool, size: torch.Size) -> Parameter:
assert isinstance(p, Parameter)
p.data = p.data.clone() # move tensors out of shared memory
p._is_sharded = is_sharded
p._orig_size = size
return p
self.params = [
fixup(p, is_sharded, size) for p, is_sharded, size in zip(self.params, self.is_sharded, self.orig_sizes)
]
del self.is_sharded
del self.orig_sizes
self._reset_lazy_init()
def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
"""Returns an iterator over the module parameters, yielding all the parameters
part of the model.
"""
return super().parameters(recurse=recurse)
def named_parameters(self, *args: Any, **kwargs: Any) -> Iterator[Tuple[str, Parameter]]:
"""Returns an iterator over the module parameters, yielding both the name of the
parameter as well as the parameter.
With FSDP, the `named_parameters` function implemented in `nn.Module` will not
be able to return the name and param when we use flattened parameters unless
we call this function under a `summon_full_params` context.
If you want the full param to be returned, you should call this function
under a `summon_full_params` context when using flattened or original params.
.. warning:: This overloaded method will *not* be called in the case of a parent module
containing a FSDP-wrapped child module. Calling parent.named_parameters()
*will* return original *unclean* key strings, i.e. _fsdp_wrapped_module and
_fpw_module are included the key string.
"""
named_param = super().named_parameters(*args, **kwargs)
for name, param in named_param:
if (
hasattr(self, "flatten_parameters")
and self.flatten_parameters
and hasattr(self, "training_state")
and self.training_state != TrainingState.SUMMON_FULL_PARAMS
):
yield name, param
else:
yield _clean_path(name), param
def __getitem__(self, key: int) -> Any:
"""Forward indexing calls in case the module is a nn.Sequential."""
return self.module.__getitem__(key)
@typing.overload
def state_dict(
self, destination: Mapping[str, torch.Tensor], prefix: str = ..., keep_vars: bool = ...
) -> Mapping[str, torch.Tensor]:
...
@typing.overload
def state_dict(self, prefix: str = ..., keep_vars: bool = ...) -> "OrderedDict[str, torch.Tensor]":
...
# Since we have overloads above, we can use Any here.
def state_dict(self, *args: Any, **kwargs: Any) -> Any:
"""
Returns the whole (unsharded) state of the module. Parameters are not
sharded, so the resulting state_dict can be loaded directly by the
wrapped Module without any sharding-specific logic. Returned tensors
will be full precision (e.g., FP32).
.. warning:: This needs to be called on all ranks, since synchronization
primitives will be used.
"""
if torch.cuda.is_available():
torch.cuda.synchronize()
is_uninitialized = self._is_root is None # See comment below on why we use this.
self._lazy_init()
def maybe_cast_buffers(dtype: Optional[torch.dtype] = None) -> None:
if self.mixed_precision:
self._cast_buffers(dtype=dtype)
if self._return_full_state_dict:
if self.training_state != TrainingState.SUMMON_FULL_PARAMS:
with self.summon_full_params(recurse=False, volatile=True):
maybe_cast_buffers(torch.float32)
state_dict = super().state_dict(*args, **kwargs)
else:
maybe_cast_buffers(torch.float32)
state_dict = super().state_dict(*args, **kwargs)
else:
maybe_cast_buffers(torch.float32)
state_dict = self.module.flat_state_dict(*args, **kwargs)
if self.move_params_to_cpu:
for k in state_dict.keys():
state_dict[k] = state_dict[k].cpu()
# In case we are in mixed precision, restore buffers back to buffer_dtype.
maybe_cast_buffers()
# We shouldn't change the init state in case this was an inner module and
# users simply wanted to get state_dict before training.
if is_uninitialized and self._is_root:
for module in self.modules():
if isinstance(module, FullyShardedDataParallel):
module._reset_lazy_init()
return state_dict
@typing.overload
def local_state_dict(
self, destination: Mapping[str, torch.Tensor], prefix: str = ..., keep_vars: bool = ...
) -> Mapping[str, torch.Tensor]:
...
@typing.overload
def local_state_dict(self, prefix: str = ..., keep_vars: bool = ...) -> "OrderedDict[str, torch.Tensor]":
...
# Since we have overloads above, we can use Any here.
def local_state_dict(self, *args: Any, **kwargs: Any) -> Any:
"""
Returns the local (sharded) state of the module. Parameters are sharded,
so the resulting state_dict can only be loaded after the Module has been
wrapped with FSDP.
"""
# Check state, specifically, we shouldn't be in SUMMON_FULL_PARAMS since
# that will produce full state, not sharded state.
self.assert_state(
[TrainingState.IDLE, TrainingState.FORWARD, TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST]
)
with contextlib.ExitStack() as stack:
# Tell any nested FSDP instances not to auto summon full params.
for module in get_fsdp_instances(self):
stack.enter_context(module._no_return_full_state_dict())
# We need to specially call FSDP's state_dict function in case
# self.state_dict is a function from a child class of FSDP.
return FullyShardedDataParallel.state_dict(self, *args, **kwargs)
@contextlib.contextmanager
def _no_return_full_state_dict(self) -> Generator:
backup = self._return_full_state_dict
self._return_full_state_dict = False
try:
yield
finally:
self._return_full_state_dict = backup
def _load_state_dict(
self, state_dict: Union[Dict[str, torch.Tensor], "OrderedDict[str, torch.Tensor]"], strict: bool = True
) -> NamedTuple:
"""
Load a whole (unsharded) state_dict.
.. warning:: This needs to be called on all ranks, since synchronization
primitives will be used.
"""
if self._return_full_state_dict:
with self.summon_full_params():
return self.module.load_state_dict(state_dict, strict)
else:
torch.cuda.synchronize()
self._lazy_init()
return self.module.load_state_dict(state_dict, strict)
def load_state_dict(
self, state_dict: Union[Dict[str, torch.Tensor], "OrderedDict[str, torch.Tensor]"], strict: bool = True
) -> NamedTuple:
is_uninitialized = self._is_root is None # See comment below on why we use this.
sd = self._load_state_dict(state_dict, strict)
# We shouldn't change the init state in case this was an inner module and
# users simply wanted to load_state_dict before training.
if is_uninitialized and self._is_root:
for module in self.modules():
if isinstance(module, FullyShardedDataParallel):
module._reset_lazy_init()
return sd
def load_local_state_dict(
self, state_dict: Union[Dict[str, torch.Tensor], "OrderedDict[str, torch.Tensor]"], strict: bool = True
) -> NamedTuple:
"""Load a local (sharded) state_dict."""
# Check state, specifically, we shouldn't be in SUMMON_FULL_PARAMS since
# that will load full state, not sharded state.
self.assert_state(
[TrainingState.IDLE, TrainingState.FORWARD, TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST]
)
with contextlib.ExitStack() as stack:
# Tell any nested FSDP instances not to auto summon full params.
for module in get_fsdp_instances(self):
stack.enter_context(module._no_return_full_state_dict())
output = self._load_state_dict(state_dict, strict)
# After loading the local state, if the a FSDP wrapper has the full
# params built, it will not use the updated value. Therefore we call
# _free_full_params() here to force it get updated on the next time when
# it needs to be built.
#
# There are 2 cases why this can happen:
# 1. in training, outermost wrapper may have reshrad_after_forward to
# False. (_is_root is True); therefore, full param is built and kept.
# 2. in eval, inner modules may get called directly, hence having multiple
# "root" instance, therefore, we need to loop over all instances
# below to free full params.
for module in get_fsdp_instances(self):
module._free_full_params()
return output
@contextlib.contextmanager
def no_sync(self) -> Generator:
"""
A context manager to disable gradient synchronizations across FSDP
processes. Within this context, gradients will be accumulated on module
variables, which will later be synchronized in the first
forward-backward pass after exiting the context.
.. note:: This likely results in higher memory usage because FSDP will
accumulate the full model gradients (instead of gradient shards)
until the eventual sync.
.. note:: Gradient accumulation can be done without this context,
avoiding the extra GPU memory overhead, but with the extra
networking overhead. I.e. the trainer loop should just do
multiple fwd/bwd without step() without the no_sync context.
"""
self._lazy_init()
assert self._is_root, "no_sync on inner FSDP is not supported"
self.assert_state(TrainingState.IDLE)
# This instance may wrap other FSDP instances and we
# need to set all of them to accumulate gradients.
old_flags = []
for m in get_fsdp_instances(self):
old_flags.append((m, m._require_backward_grad_sync))
m._require_backward_grad_sync = False
try:
yield
finally:
for m, old_flag in old_flags:
assert m._require_backward_grad_sync is False
m._require_backward_grad_sync = old_flag
def reset_parameters(self) -> None:
"""Special reset_parameters API handling.
We don't by default allow this API because it has at least 2 issues:
1. calling it after wrapping can crash due to unexpected tensor size
and dimensions due to flattening and sharding. So summon_full_params
context might be required.
2. calling it after wrapping can result in incorrect init values due
to flattening.
See this gist for an example of the init issue when parameters are
flatten.
https://gist.github.com/407bb158f0d0612e157c2cbcf5c8b76a
Or, like in 1, init function can silently init the weight differently
because of the dimensions.
Finally, be advised that init on CPU vs. on GPU can have different
values. If models are originally on CPU and after wrapping it is moved
to GPU, calling this will again be problematic.
"""
if self.allow_reset_parameters:
self.module.reset_parameters()
else:
raise RuntimeError("reset parameters after FSDP wrapping is not allowed")
def _apply(self, fn: Callable[[nn.Module], None]) -> "FullyShardedDataParallel":
"""Hook into model conversion, like .half() and .float()
When users call module.half() or module.float() after FSDP wrapping,
we need to update some internal states here.
Args:
fn (Callable):
same as nn.Module's _apply.
Returns:
(Any):
same as nn.Module's _apply.
"""
# Just a pre-caution. Conversion happens while IDLE is the safest.
self.assert_state(TrainingState.IDLE)
# In order to determine how to change compute_dtype, we need to
# remember the dtype before this call.
if len(self.params):
dtype_before = self.params[0].dtype
# Call nn.Module's _apply.
ret = super()._apply(fn)
# Make sure we update p._full_param_padded according to the new dtype if we are
# not in mixed_precision. In mixed precision, doing m.half() or m.float() really
# don't make much sense. But we need allow it in case user just wanted to temporarily
# .half() and then .float() back for some reason.
if not self.mixed_precision:
for p in self.params:
if hasattr(p, "_full_param_padded"):
allocated = False
if p._full_param_padded.storage().size() == 0:
allocated = True
alloc_storage_(p._full_param_padded, size=p._full_param_padded.size())
p._full_param_padded = p._full_param_padded.to(dtype=p.data.dtype, device=p.data.device)
if allocated:
free_storage_(p._full_param_padded)
# Update compute_dtype because otherwise, p._full_param_padded will
# still be in that dtype.
if len(self.params):
dtype_after = self.params[0].dtype
if dtype_before != dtype_after:
# There are 4 cases below. Only 2 result in compute_dtype change
# to the dtype_after.
# 16 -> 32, 32 -> 16
# mixed n/a no change
# not mixed change change
if not self.mixed_precision:
self.compute_dtype = dtype_after
return ret
@contextlib.contextmanager
def summon_full_params(self, recurse: bool = True, volatile: bool = False) -> Generator:
"""
A context manager to expose full params for the current FSDP instance.
Can be useful *after* forward/backward for a model to get the params for
additional processing or checking. Parameters will be gathered in full
precision (e.g., FP32).
.. note:: This can be used on inner FSDPs.
.. note:: This can *not* be used within a forward or backward pass. Nor
can forward and backward be started from within this context.
.. note:: The full parameters will be freed after the context manager
exits; it is up to the caller to clone them if needed.
.. note:: The full parameters can be modified, but only the portion
corresponding to the local param shard will persist after the
context manager exits (unless ``volatile=True``, in which case there
are no guarantees about persistence).
Args:
recurse (bool, Optional): recursively summon all params for nested
FSDP instances (default: True)
volatile (bool, Optional): if ``True``, modifications to params are
not guaranteed to persist after the context manager exists;
enabling this can be slightly more efficient (default: False)
"""
if recurse:
with contextlib.ExitStack() as stack:
# Summon all params for any nested FSDP instances.
for module in self.modules():
if isinstance(module, FullyShardedDataParallel):
stack.enter_context(module.summon_full_params(recurse=False, volatile=volatile))
# Yield to the caller, with full params in all nested instances.
yield
# Exiting from the ExitStack will re-shard params.
return
else:
torch.cuda.synchronize()
self._lazy_init()
self.assert_state(TrainingState.IDLE)
# Set the state so that we assert when trying to go into fwd/bwd.
self.training_state = TrainingState.SUMMON_FULL_PARAMS
full_tensors = self._rebuild_full_params(force_full_precision=True)
assert full_tensors is not None
with contextlib.ExitStack() as stack:
if self.module.is_flattened:
# Update flattened views to point to fully-sized tensors. We
# use self.params instead of full_tensors since the
# latter may contain padding.
stack.enter_context(
self.module.unflatten_params(
flat_params=[p.data for p in self.params[: self._num_flatten_params]]
)
)
try:
yield
finally:
stack.close()
non_shared_params = self.params
# filter out shared params for all but the owner FSDP module.
if len(full_tensors) < len(non_shared_params):
non_shared_params = self.non_shared_params()
assert len(full_tensors) == len(
non_shared_params
), f"{len(full_tensors)} vs. {len(non_shared_params)}"
for p, (full_tensor, safe_to_free) in zip(non_shared_params, full_tensors):
if not volatile:
# Copy any changes made to the full params back into
# the corresponding local shards.
local_shard, _ = self._get_shard(full_tensor)
p._fp32_shard.copy_(local_shard.view_as(p._fp32_shard))
if safe_to_free:
free_storage_(full_tensor)
self.has_full_params = False
self._use_fp32_param_shard()
self.training_state = TrainingState.IDLE
def _reset_lazy_init(self) -> None:
"""Reset instance so :func:`_lazy_init` will run on the next forward."""
self._is_root: Optional[bool] = None
self._streams: Dict[str, torch.cuda.Stream] = {}
self._reducer: Optional[ReduceScatterBucketer] = None
for p in self.params:
if hasattr(p, "_fp32_shard"):
del p._fp32_shard # reset _init_param_attributes
self._output_pre_backward_hook_registered: Optional[List] = None
self.reshard_after_forward = self._orig_reshard_after_forward
def _lazy_init(self) -> None:
"""Initialization steps that should happen lazily, typically right
before the first forward pass.
"""
# Initialize param attributes lazily, in case the param's dtype or
# device changes after __init__.
for p in self.params:
self._init_param_attributes(p)
# Initialize _is_root and setup streams. These steps would ideally
# happen in __init__, but _is_root can only be determined after the
# entire model hierarchy is setup, thus we run it lazily.
if self._is_root is None:
self._set_is_root()
self._setup_streams()
self._setup_output_hook_list()
if self._is_root:
# Buffers stay on GPU, and don't get sharded. Since _cast_buffers
# applies recursively, we only call this from the root instance.
self._cast_buffers()
if self.disable_reshard_on_root:
# Don't free the full params for the outer-most (root) instance,
# since those params will be needed immediately after for the
# backward pass.
self.reshard_after_forward = False
# Due to the use of streams, we need to make sure the previous
# ``optim.step()`` is done before we all-gather parameters.
self._wait_for_previous_optim_step()
@torch.no_grad()
def _init_param_attributes(self, p: Parameter) -> None:
"""
We manage several attributes on each Parameter instance. The first two
are set by :func:`_shard_parameters_`:
``_is_sharded``: ``True`` if the Parameter is sharded or ``False``
if the Parameter is intentionally not sharded (in which case we
will all-reduce grads for this param).
``_orig_size``: the size of the original Parameter (before sharding)
The remaining attributes are set here:
``_fp32_shard``: a single shard of the parameters in full precision
(typically FP32, but this is dependent on the dtype of the model
as it's passed in by the user). This can be on CPU or GPU
depending on the value of *``move_params_to_cpu``*.
``_fp16_shard``: This will be a single shard of the parameters in FP16, used for all-gather.
This can be in FP16 or FP32 depending on the value of *``compute_dtype``* and
if params are offloaded to CPU.
``_full_param_padded``: the full weight (padded to be evenly
divisible by ``world_size``), used for computation in the
forward and backward pass. This will be resized in place and
only materialized (via all-gather) as needed.
"""
assert hasattr(p, "_is_sharded") and hasattr(p, "_orig_size")
if hasattr(p, "_fp32_shard"):
return
# A single shard of the parameters in full precision.
p._fp32_shard = p.data
if self.mixed_precision:
assert p._fp32_shard.dtype == torch.float32, self
if self.move_params_to_cpu:
assert p._fp32_shard.device == torch.device("cpu"), self
# If we plan to keep the FP32 parameters on CPU, then pinning
# memory allows us to later use non-blocking transfers when moving
# the FP32 param shard to compute_device.
p._fp32_shard = p._fp32_shard.pin_memory()
p.data = p._fp32_shard
if self.move_params_to_cpu or self.mixed_precision:
# In mixed precision mode, we maintain a reduced precision
# (typically FP16) parameter shard on compute_device for performing
# the computation in the forward/backward pass. We resize the
# storage to size 0 at init (here) and re-materialize (by copying
# from _fp32_shard) as needed. If offloading params to CPU, the
# dtype of the fp16 shard will depend on the *`compute_dtype`*.
p._fp16_shard = torch.zeros_like(p._fp32_shard, device=self.compute_device, dtype=self.compute_dtype)
free_storage_(p._fp16_shard)
if self.mixed_precision:
assert p._fp32_shard.dtype == torch.float32
if not self.mixed_precision and not self.move_params_to_cpu:
# use _fp32_shard if you are not in using mixed precision or
# offloading params and grads to CPU.
p._fp16_shard = None
# We also maintain a full-sized parameter of type self.compute_dtype
# (FP16 for mixed_precision or FP32 otherwise). We resize the
# storage to size 0 at init (here) and only materialize as needed. The
# storage may contain padding elements so that it is evenly divisible by
# world_size, although these padding elements will be removed before the
# relevant computation.
if p._is_sharded:
p._full_param_padded = torch.zeros(
p.data.numel() * self.world_size, device=self.compute_device, dtype=self.compute_dtype
)
free_storage_(p._full_param_padded)
if self.move_grads_to_cpu and self.training:
# We can optionally move the grad shard to CPU during the backward
# pass. In this case, it's important to pre-allocate the CPU grad
# shard in pinned memory so that we can do a non-blocking transfer.
# This is only needed during training and not evaluation.
p._cpu_grad = torch.zeros_like(p.data, device="cpu").pin_memory()
def _set_is_root(self) -> None:
"""If ``True``, implies that no other :class:`FullyShardedDataParallel`
instance wraps this one. Called once by :func:`_lazy_init`.
Also sets self.children_share_process_group = True if all child
instances share the same process group. If some child instances use a
different process group, self.clip_grad_norm_ will raise an error.
"""
if self._is_root is not None:
return
# No FSDP instance wraps this, else _is_root would be set to False.
self._is_root = True
# If final backward callback is never been queued, state should be IDLE.
# If final backward callback is queued, the callback should be finished
# and the state was reset to be IDLE.
# This should be asserted at the beginning of forward pass in the root instance only.
# For children instances, if they are checkpointed, state will not be reset to
# IDLE after each inner forward/backward.
self.assert_state(TrainingState.IDLE)
# As the root, we now set all children instances to False and
# give them a closure to try to queue a wait_for_post_backward.
self.children_share_process_group = True
for n, m in self.named_modules():
# `n != ""` excludes self.
if n != "" and isinstance(m, FullyShardedDataParallel):
# We set inner FSDP to non-root but they could have the value of True
# if, for example, a module is called first (like infernece, EMA)
# then later we call an outer FSDP for state dict load/save.
m._is_root = False
if m.process_group != self.process_group:
self.children_share_process_group = False
# if child instance in its own (smaller) world, that was probably an attempt to avoid OOM.
# Therefore gathering this child's optim state will probably cause OOM, so we won't do it.
m.no_broadcast_optim_state = m.no_broadcast_optim_state or (
(m.world_size == 1) and (m.world_size < self.world_size) and (m.process_group != self.process_group)
)
def _setup_streams(self) -> None:
"""Create streams to overlap data transfer and computation."""
if len(self._streams) > 0 or not self._is_root:
return
if torch.cuda.is_available():
# Stream to move main FP32 params (may be on CPU) to FP16 for forward.
self._streams["fp32_to_fp16"] = torch.cuda.Stream()
# Stream for all-gathering parameters.
self._streams["all_gather"] = torch.cuda.Stream()
# Stream for overlapping grad reduction with the backward pass.
self._streams["post_backward"] = torch.cuda.Stream()
# Helper for bucketing reduce-scatter ops. This is also shared with
# children instances to improve bucket utilization.
self._reducer = ReduceScatterBucketer(self.bucket_cap_mb)
# We share streams with all children instances, which allows them to
# overlap transfers across the forward pass without synchronizing with
# the default stream.
for n, m in self.named_modules():
if n != "" and isinstance(m, FullyShardedDataParallel):
m._streams = self._streams
m._reducer = self._reducer
def _setup_output_hook_list(self) -> None:
"""set up a list to avoid registering pre-backward hooks
incorrectly.
"""
assert self._is_root, "This should only be called on the root"
self._output_pre_backward_hook_registered = []
for n, m in self.named_modules():
if n != "" and isinstance(m, FullyShardedDataParallel):
m._output_pre_backward_hook_registered = self._output_pre_backward_hook_registered
def _wait_for_previous_optim_step(self) -> None:
"""
The outer-most :class:`FullyShardedDataParallel` instance (i.e., the root
instance) needs to synchronize with the default stream to ensure the
previous optimizer step is done.
"""
if not torch.cuda.is_available():
return
if self.mixed_precision or self.move_params_to_cpu:
self._streams["fp32_to_fp16"].wait_stream(torch.cuda.current_stream())
else:
self._streams["all_gather"].wait_stream(torch.cuda.current_stream())
def forward(self, *args: Any, **kwargs: Any) -> torch.Tensor:
self._lazy_init()
# Start of a forward pass.
self.training_state = TrainingState.FORWARD
# For root and mixed precision, we convert the input to FP16 (no_grad is needed for
# the conversion).
if self._is_root and self.mixed_precision:
args, kwargs = cast_floats_to_right_precision(True, True, *args, **kwargs)
# If enabled, convert the input to FP32 if we are in full precision.
# no_grad is not used because the input might be for a non-root instance,
# which mean autograd needs to go through the conversion.
if self.force_input_to_fp32 and not self.mixed_precision:
args, kwargs = cast_floats_to_right_precision(False, False, *args, **kwargs)
# All-gather full parameters. This will also transfer FP32 parameters to
# ``self.compute_dtype`` (e.g., FP16 if *mixed_precision* is ``True``).
self._rebuild_full_params()
# Register backward hooks to reshard params and reduce-scatter grads.
# These need to be re-registered every forward pass.
self._register_post_backward_hooks()
outputs = self.module(*args, **kwargs)
if self.reshard_after_forward:
self._free_full_params()
if self.mixed_precision or self.move_params_to_cpu:
self._free_fp16_param_shard()
# Switch to main FP32 param shard. We maintain this invariant throughout
# the code, i.e., ``p.data == p._fp32_shard`` after each function. This
# also ensures that after the first forward, the optimizer state will be
# initialized with the correct dtype and (sharded) size, since optimizer
# state is typically initialized lazily in ``optim.step()``.
self._use_fp32_param_shard()
# Register pre-backward hooks to all-gather the params for the backward
# pass (if output's grad was needed). This won't register anything if
# we are in eval mode.
#
# Some model does forward pass multiple times, we need to register the
# pre-backward hook on every output since the last output's hook has to
# fire first to setup for backward. However, we use ``self._pre_backward_hook_has_run``
# to prevent repeated overhead from multiple hook callbacks.
outputs = self._register_pre_backward_hooks(outputs)
# Done with a forward pass.
self.training_state = TrainingState.IDLE
# Only need to clear cache during forward. During backward, the cache is not used.
# TODO (Min): Future PyTorch versions may provide a way to completely disable this
# cache. Update this when that's available.
if self.clear_autocast_cache:
torch.clear_autocast_cache()
return outputs
def _register_pre_backward_hooks(self, outputs: Any) -> Any:
"""Register pre-backward hook to run before the wrapped module's
backward. Hooks should be attached to all outputs from the forward.
Returns:
outputs: new outputs with hooks registered if they requires gradient.
"""
if not torch.is_grad_enabled():
return outputs # don't register hooks if grad isn't enabled
if self._is_root:
# This actually means that only root instance has
# _post_backward_callback_queued defined. Accidentally accessing this field
# will assert on all other instances, giving us a nice bug checker.
self._post_backward_callback_queued = False
def _pre_backward_hook(*unused: Any) -> None:
# try to queue final backward callback only once for root, so
# that final backward callback is attached to the outer most
# backward graph task and called after all the backward
# calls are completed.
if self._is_root:
self._queue_wait_for_post_backward()
# All-gather full parameters or switching to the full params.
#
# This needs to be done on every pre_backward hook, even within the same
# iteration (i.e. for checkpointed, multiple forward pass modules). This is
# because after the forward pass (i.e. in checkpoint inner graph), we always
# switch to fp32_shard in the ``forward`` function.
#
# We used to do this only after the ``self._pre_backward_hook_has_run``
# boolean guard below, which is incorrect. It worked in pytorch < 1.9 for
# some unknown reason, but pytorch 1.10 nightly exposed this bug.
#
# Note, both ``self._rebuild_full_params`` and ``self._use_full_params`` are
# idempotent. So in case they are called unnecessarily, they don't incur much
# overhead.
if self.reshard_after_forward:
self._rebuild_full_params()
else:
self._use_full_params()
# Only run the ``self._prep_grads_for_backward`` once per iteration (i.e. in case
# it is multiple outputs or multiple forward passes).
if not self._pre_backward_hook_has_run:
self._pre_backward_hook_has_run = True
# Start of a backward pass for the first time in an iteration.
self.assert_state([TrainingState.IDLE, TrainingState.BACKWARD_PRE])
# Prepare p.grad so that it is in the right shape, device, accumulated values, etc.
self._prep_grads_for_backward()
# Transition to BACKWARD_PRE state if currently IDLE. We can transition from BACKWARD_POST
# to IDLE when FSDP is within activation checkpointing and called multiple times, due to the
# extra forward pass for re-computation.
if self.training_state == TrainingState.IDLE:
self.training_state = TrainingState.BACKWARD_PRE
self.assert_state([TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST])
_registered = 0
def _register_hook(t: torch.Tensor) -> torch.Tensor:
# We don't register the pre_backward hook on the same tensor that has been
# returned from an inner FSDP, unless it is the first one. This does
# not cover all problematic cases though. A tensor not from an inner
# FSDP can cause problems too:
# ```
# x = layer1(input)
# state = [x] # better change to x.detach(), not fixed by the following if-condition
# x = inner_fsdp_module_layer2(x)
# state.append(x) # better change to x.detach(), but fixed by the following if-condition
# x = layer3(x)
# return x, state
# ```
# The tensors in `state`, if not detached, can be registered with
# backward hooks (in addition to the `x` on the last line). In that case,
# pre-backward hook can fire multiple times in the order that causes
# the outer FSDP to crash.
#
# The best practice is for modules to be wrapped by FSDP to return 1 and only
# 1 tensor to be used for backward. All other tensors returned should be
# detached.
nonlocal _registered
assert self._output_pre_backward_hook_registered is not None
if t.requires_grad and (_registered == 0 or id(t) not in self._output_pre_backward_hook_registered):
t.register_hook(_pre_backward_hook)
self._output_pre_backward_hook_registered.append(id(t))
_registered += 1
return t
# Attach hooks to Tensor outputs.
outputs = apply_to_tensors(_register_hook, outputs)
return outputs
def _register_post_backward_hooks(self) -> None:
"""
Register backward hooks to reshard params and reduce-scatter grads.
This is called during forward pass. The goal is to attach a hook
on each of the parameter's gradient generating function (``grad_acc``
below) so that the hook is called *after* all gradients for that
param are computed.
Goals:
1. We want the hook to fire once and only once *after* all gradients
are accumulated for a param.
2. If it fires more than once, we end up incorrectly shard the grad
multiple times. (could lead to dimension too small)
3. If it fires once but too early or doesn't fire, we leave gradients
unsharded. (could lead to dimension too large)
There are several cases here:
1. We can call the same module multiple times in a single outer forward
pass. We register multiple hooks but autograd should fire the last
one after the total gradient is computed and accumulated. If it does
fire multiple times, we may have a crash due to gradient being already
sharded and shape mismatch.
On the other hand, due to _saved_grad_shard, this case may also work
but with extra grad scatter-gather.
2. With activation checkpointing and case 1.
3. The same outer forward can be called multiple times before any backward
is called (within the no_sync context) for a special way of gradient
accumulation. (see test_fsdp_fwd_fwd_bwd_bwd.py)
4. When a param is shared by multiple FSDP wrapper instances, this can
register multiple times. (See test_fsdp_shared_weights.py)
It appears that registering the hook everytime and let them fire and
hook being removed/freed automatically is the correct thing to do. But this
is purely based on experiments.
"""
if not torch.is_grad_enabled():
return # don't register grad hooks if grad isn't enabled
for p in self.params:
if p.requires_grad:
# Register a hook.
p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.
assert p_tmp.grad_fn is not None
grad_acc = p_tmp.grad_fn.next_functions[0][0] # Gets its GradAccumulation object.
handle = grad_acc.register_hook(functools.partial(self._post_backward_hook, p))
# Important, we need to save the hook, otherwise, it appears to be
# deleted/freed/unregistered.
# However, we don't free/unhook at the end of bwd (as we used to do it
# in _finalize_parameters below). If we do, that may unregister the wrong hook.
p._shard_bwd_hook = (grad_acc, handle)
@torch.no_grad()
def _post_backward_hook(self, param: Parameter, *unused: Any) -> None:
"""
At the start of :func:`_post_backward_hook`, ``param.grad`` contains the
full gradient for the local batch. The reduce-scatter op will replace
``param.grad`` with a single shard of the summed gradient across all
GPUs. This shard will align with the current GPU rank. For example::
before reduce_scatter:
param.grad (GPU #0): [1, 2, 3, 4]
param.grad (GPU #1): [5, 6, 7, 8]
after reduce_scatter:
param.grad (GPU #0): [6, 8] # 1+5, 2+6
param.grad (GPU #1): [10, 12] # 3+7, 4+8
The local GPU's ``optim.step`` is responsible for updating a single
shard of params, also corresponding to the current GPU's rank. This
alignment is created by :func:`_shard_parameters_`, which ensures that
the local optimizer only sees the relevant parameter shard.
"""
# First hook callback will see PRE state. If we have multiple params,
# then subsequent hook callbacks will see POST state.
self.assert_state([TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST])
self.training_state = TrainingState.BACKWARD_POST
if hasattr(param, "_linked_param"):
# This links to a shared param. We should try to finalize the linked param here.
# This is done by module code to ensure correct gradient computation.
# p._is_shared and p._linked_param are closely related but not the same.
# See fairscale/experimental/nn/mevo.py.
assert param.shape == (1,), param.shape # This param should have this special dim.
# If the _is_shared flag is set, then this shared weight is indeed being
# shared between different FSDP wrappers. Otherwise, they are linked but
# likely in the same FSDP wrapper, which means we shouldn't finalize the
# linked param..
if hasattr(param._linked_param, "_is_shared") and param._linked_param._is_shared:
# param._linked_param may or may not have .grad since this callback
# could happen multiple times to support #918. Since we check `if param.grad is None`
# below anyway, this is OK.
param = param._linked_param
if param.grad is None:
return
if param.grad.requires_grad:
raise RuntimeError("FSDP only works with gradients that don't require gradients")
if self._require_backward_grad_sync or self.reshard_after_forward:
# Free full params. As a special case, we don't free the full params
# when in a ``no_sync`` context (as inversely indicated by
# ``self._require_backward_grad_sync``), since the params will not
# get updated before the next forward. This saves networking
# bandwidth but uses more GPU memory.
self._free_full_params([param])
if self.mixed_precision:
# This is a no-op if reshard_after_forward is True, since we already
# free the param shard when rebuilding the full params in the
# pre_backward_hook.
self._free_fp16_param_shard([param])
# Switch to FP32 shard after backward.
self._use_fp32_param_shard([param])
if not self._require_backward_grad_sync:
return
# Wait for all work in the current stream to finish, then start the
# reductions in post_backward stream.
self._streams["post_backward"].wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self._streams["post_backward"]):
orig_grad_data = param.grad.data
if self.mixed_precision and self.fp32_reduce_scatter:
# Cast grad to FP32.
param.grad.data = param.grad.data.to(param.dtype)
if self.gradient_predivide_factor > 1:
# Average grad by world_size for consistency with PyTorch DDP.
param.grad.data.div_(self.gradient_predivide_factor)
if param._is_sharded:
assert self._reducer is not None
# Save the unsharded grad for reduction. We will asynchronously accumulate the reduced gradient into
# param._saved_grad_shard. If this FSDP module was called multiple times it's possible that multiple
# gradient reductions will happen in an undefined order. But addition commutes, so this order doesn't
# matter, neglecting rounding.
grad = param.grad.data
# Clear grad on the tensor, so any repeated gradient computations do not interfere with this reduction.
#
# The effect on memory consumption is not usually significant. No extra memory is allocated if this
# module is called only once, reduction happens quickly, or the tensor is bucketed. If the module is
# called multiple times, and the backwards pass runs far enough ahead of the `post_backward` stream,
# then we can end up with multiple unsharded gradients allocated and queued for reduction.
#
# We could guard against this by using CUDA events (see record_event, wait_event in torch.cuda.Stream).
# This ensures the `default` stream will wait for the `post_backward` stream to complete the last
# reduction for this module, before scheduling additional reduction work. Then at most there are two
# unsharded gradients allocated; one for a pending reduction, and one for gradient computation.
param.grad = None
callback_fn = functools.partial(self._post_reduction_hook, param)
grad_chunks = chunk_and_pad(grad, self.process_group_reduce_scatter.size())
self._reducer.reduce_scatter_async(
grad_chunks, group=self.process_group_reduce_scatter, callback_fn=callback_fn
)
else:
# Currently the only way for _is_sharded to be False is if
# world_size == 1. This could be relaxed in the future, in which
# case grads should be all-reduced here.
assert self.world_size == 1
self._post_reduction_hook(param, param.grad.data)
# After _post_backward_hook returns, orig_grad_data will eventually
# go out of scope, at which point it could otherwise be freed for
# further reuse by the main stream while the div/reduce_scatter/copy
# are underway in the post_backward stream. See:
# github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py
orig_grad_data.record_stream(self._streams["post_backward"])
def _post_reduction_hook(self, param: Parameter, reduced_grad: torch.Tensor) -> None:
"""Hook to call on each param after the reduce-scatter."""
assert torch.cuda.current_stream() == self._streams["post_backward"]
self.assert_state(TrainingState.BACKWARD_POST)
if self.gradient_postdivide_factor > 1:
# Average grad by world_size for consistency with PyTorch DDP.
reduced_grad.data.div_(self.gradient_postdivide_factor)
# Cast grad to param's dtype (typically FP32). Note: we do this
# before the move_grads_to_cpu step so that this entire hook remains
# non-blocking. The downside is a bit more D2H transfer in that case.
if self.mixed_precision:
orig_param_grad_data = reduced_grad.data
reduced_grad.data = reduced_grad.data.to(dtype=param.data.dtype)
# Don't let this memory get reused until after the transfer.
orig_param_grad_data.record_stream(torch.cuda.current_stream())
if param._is_sharded:
# Accumulate into the gradient shard.
if getattr(param, "_saved_grad_shard", None) is None:
param._saved_grad_shard = reduced_grad.data
else:
assert (
param._saved_grad_shard.shape == reduced_grad.shape
), f"{param._saved_grad_shard.shape} vs {reduced_grad.shape}"
param._saved_grad_shard.data += reduced_grad.data
reduced_grad = param._saved_grad_shard.data
# Optionally move gradients to CPU, typically used if one is running the optimizer on the CPU. Once the full
# backwards pass completes, we will set `.grad` to the CPU copy.
if self.move_grads_to_cpu:
param._cpu_grad.copy_(reduced_grad.data, non_blocking=True)
# Don't let this memory get reused until after the transfer.
reduced_grad.data.record_stream(torch.cuda.current_stream())
def _queue_wait_for_post_backward(self) -> None:
"""Try to queue a `wait_for_post_backward` callback.
Only called on root and only queue one callback at the beginning of
outer most backward.
"""
assert self._is_root
if not self._post_backward_callback_queued:
self.assert_state([TrainingState.IDLE])
self._post_backward_callback_queued = True
Variable._execution_engine.queue_callback(self._wait_for_post_backward)
@torch.no_grad()
def _wait_for_post_backward(self) -> None:
"""Wait for post-backward to finish. Only called on root instance."""
# None, backward runtime swallow the assert error, so we use p_assert() here.
p_assert(self._is_root, "WFPB not called on root")
# Check if the root module has params and if any of them has
# the `requires_grad` field set. If `requires_grad=False` for
# all the params, the post_backward hook will not fire and the
# state will remain in `TrainingState.BACKWARD_PRE`.
if any([p.requires_grad for p in self.params]):
self.assert_state(TrainingState.BACKWARD_POST)
else:
self.assert_state(TrainingState.BACKWARD_PRE)
if self._require_backward_grad_sync:
# Flush any unreduced buckets in the post_backward stream.
with torch.cuda.stream(self._streams["post_backward"]):
p_assert(self._reducer is not None, "WFPB: reducer is None")
assert self._reducer is not None # make mypy happy
self._reducer.flush()
torch.cuda.current_stream().wait_stream(self._streams["post_backward"])
if self.move_grads_to_cpu:
# Wait for the non-blocking GPU -> CPU grad transfers to finish.
torch.cuda.current_stream().synchronize()
# A backward pass is done, clean up below.
# Free reducer buffers.
if self._reducer is not None:
self._reducer.teardown()
def _finalize_parameters(fsdp_module: FullyShardedDataParallel) -> None:
"""Helper used below on all fsdp modules."""
if not fsdp_module._is_root and self._require_backward_grad_sync:
# We make sure to switch to fp32 shards here because there might be
# params linger in full_param mode, if the following firing order happens:
# pre-bwd: rebuild and use full for p1 and p2
# post-bwd for p1: free and switch to fp32 shard for p1
# pre-bwd: rebuild again for p1 and p2
# post-bwd for p2: free and switch to fp32 shard for p2
# In the end, p1 will be left in full param mode.
#
# We need switch to fp32 *and* free full params. If we don't free,
# we end up reusing potentially *stale* full param (after the fp32
# shard is updated (e.g. by optimizer.step()).
#
# We skip the root because it may have reshard=False, which means
# we want to keep the speed benefit of that. I haven't seen a case
# where this is needed on the root module.
#
# We skip also in grad accum steps since we want to keep the full
# params since they haven't been updated. See comment of ``no_sync``
# for when to use no_sync style grad acc. For FSDP, it is more likely
# you want to use grad acc without no_sync.
fsdp_module._free_full_params()
fsdp_module._use_fp32_param_shard()
for p in fsdp_module.params:
if not p.requires_grad:
continue
# Leave the gradient accumulation state as-is if not synchronizing this pass. This ensures p.grad
# remains the unsharded gradient accumulated from prior no-sync passes, and p._saved_grad_shard
# remains the sharded gradient from the last synchronized pass. This also allows interleaved no-sync and
# sync passes.
if not self._require_backward_grad_sync:
continue
# Parameter and gradient devices must match.
if hasattr(p, "_cpu_grad"):
p_assert(p.device == torch.device("cpu"), f"WFPB: incorrect cpu_grad device {p.device}")
p.grad = p._cpu_grad
elif hasattr(p, "_saved_grad_shard"):
p_assert(
p.device == p._saved_grad_shard.device,
f"WFPB: incorrect saved_grad_shard device p.device={p.device} "
f"vs p._saved_grad_shard.device={p._saved_grad_shard.device}",
)
p_assert(
p.shape == p._saved_grad_shard.shape,
f"WFPB: incorrect saved_grad_shard shape p.shape={p.shape} "
f"vs p._saved_grad_shard.shape={p._saved_grad_shard.shape}",
)
p.grad = p._saved_grad_shard
if hasattr(p, "_saved_grad_shard"):
delattr(p, "_saved_grad_shard")
# Update root and nested FSDP's hooks and flags.
for m in get_fsdp_instances(self):
_finalize_parameters(m)
m._pre_backward_hook_has_run = False
if any(p.requires_grad for p in m.parameters()):
# Check if the module has params and if any of them has
# the `requires_grad` field set. If `requires_grad=False` for
# all the params, the post_backward hook will not fire and the
# state will remain in `TrainingState.BACKWARD_PRE`.
if any([p.requires_grad for p in m.params]):
m.assert_state(TrainingState.BACKWARD_POST)
else:
m.assert_state(TrainingState.BACKWARD_PRE)
else:
# When `m` and its children has no params or has params but
# none with `requires_grad==True`, there are two cases:
# 1. output tensors are `requires_grad==True`. In this case,
# pre-backward hook is still registered, so it is in BACKWARD_PRE state.
# 2. output tensors are `requires_grad==False`. In this case,
# pre-backward hook is not registered, so it is in IDLE state.
m.assert_state([TrainingState.BACKWARD_PRE, TrainingState.IDLE])
m.training_state = TrainingState.IDLE
if m._is_root:
# reset this flag for cases like "one forward pass + multiple backward passes"
self._post_backward_callback_queued = False
# clear this list for next iteration
p_assert(
self._output_pre_backward_hook_registered is not None,
"WFPB: self._output_pre_backward_hook_registered should not be None",
)
assert self._output_pre_backward_hook_registered is not None # make mypy happy
self._output_pre_backward_hook_registered.clear()
@torch.no_grad()
def _rebuild_full_params(self, force_full_precision: bool = False) -> Optional[List[Tuple[torch.Tensor, bool]]]:
"""
Gather all shards of params.
Note, this is idempotent if full params are already gathered. Callers
assume the idempotency. So please keep it that way.
Args:
force_full_precision (bool, Optional): by default params will be gathered
in ``compute_dtype`` (e.g., FP16), unless *force_full_precision* is
``True``, in which case they will be gathered in full precision
(e.g., FP32), possibly in fresh storage. The parameter that's being
rebuilt will end up in full precision as well.
Returns:
A list of tuples, where the first element is the full-sized param
and the second element is a bool indicating if it's safe for the
caller to free the full-sized param. This will be ``None`` if
``force_full_precision=False`` and the full params are already gathered.
"""
output_tensors: List[Tuple[torch.Tensor, bool]] = []
def update_p_data(custom_output_tensor: Optional[torch.Tensor] = None) -> None:
"""
Helper function to update p.data pointer.
Args:
custom_output_tensor (torch.Tensor, Optional): if not None, this
tensor contains the data we just gathered.
"""
if custom_output_tensor is not None:
assert p._is_sharded
p.data = custom_output_tensor
output_tensors.append((p.data, True))
elif not p._is_sharded:
if (self.mixed_precision or self.move_params_to_cpu) and not force_full_precision:
assert p._fp16_shard is not None
p.data = p._fp16_shard
output_tensors.append((p.data, True))
else:
# Here p.data == p._fp32_shard, so it's not safe to free.
output_tensors.append((p.data, False))
else:
p.data = p._full_param_padded
output_tensors.append((p.data, True))
# Trim any padding and reshape to match original size.
p.data = p.data[: p._orig_size.numel()].view(p._orig_size)
if self._has_shared_params:
# self.has_full_params flag can be out of sync if a shared param is
# sharded by another FSDP instance. An example is that in eval case
# with reshard_after_forward=False but the sharing instance has
# reshard_after_forward=True. Then, on the second forward, the
# other instance can shard the shared param and but this instance
# can mistakenly think the full param is already gathered from the
# has_full_params flag.
#
# Therefore, we update the flag accordingly here.
self.has_full_params = not any(p._full_param_padded.storage().size() == 0 for p in self.params)
# Early exit if we already have full params and don't need full precision.
if self.has_full_params and not force_full_precision:
for p in self.params:
update_p_data()
return output_tensors
self.has_full_params = True
with torch.cuda.stream(self._streams["all_gather"]):
if (self.mixed_precision or self.move_params_to_cpu) and not force_full_precision:
self._cast_fp32_param_shards_to_fp16()
if self.move_params_to_cpu:
if force_full_precision:
# If the compute_dtype and storage dtype are the same,
# use pinned memory. Otherwise move p.data to the compute
# device.
if self.params[0].dtype == self.compute_dtype:
self._cast_fp32_param_shards_to_fp16()
else:
for p in self.params:
p.data = p.data.to(self.compute_device)
for p in self.params:
if not p._is_sharded: # e.g., when world_size == 1
update_p_data()
else:
# Skip if already built.
#
# case 1: shared param can be rebuilt multiple times.
# A corner case is p._orig_size = (1,), which means the shape equality is
# not a perfect check. But we assume we don't share a param with shape (1,).
# We do use size (1,) in unit testing at least.
# case 2: with multiple params (like non-flatten, or multiple flatten groups)
# we may have pre & post bwd firing order issues. See comments in the
# _finalize_parameters function for such case.
if p.data.shape == p._orig_size and p._orig_size != (1,):
assert p.data.storage().data_ptr() == p._full_param_padded.storage().data_ptr(), (
f"p.data {p.data.storage().data_ptr()} "
f"p._fp32_shard {p._fp32_shard.storage().data_ptr()} "
f"p._fp16_shard {p._fp16_shard.storage().data_ptr() if p._fp16_shard is not None else None} "
f"p._full_params_padded {p._full_param_padded.storage().data_ptr()} "
)
continue
# If self.move_params_to_cpu and force_full_precision, we need to cast
# the FP32 CPU param to CUDA for the all-gather.
p_data = p.data.to(p._full_param_padded.device, non_blocking=True)
full_p_size = p._full_param_padded.size()
assert full_p_size.numel() % self.world_size == 0
if self.mixed_precision and force_full_precision:
# Allocate fresh tensor in full precision since we are in
# mixed precision and full precision rebuild is asked.
output_tensor = p_data.new_zeros(full_p_size)
else:
if p._full_param_padded.storage().size() != full_p_size.numel():
# Allocate based on full size from all shards.
alloc_storage_(p._full_param_padded, size=full_p_size)
output_tensor = p._full_param_padded
# Fill output_tensor with (p.data for each shard in self.world_size)
if hasattr(dist, "_all_gather_base") and enable_nccl_base_collectives:
# New version of PyTorch has all_gather_base, which is faster than chunk and then all_gather.
dist._all_gather_base(output_tensor, p_data, group=self.process_group)
else:
chunks = list(output_tensor.chunk(self.world_size))
dist.all_gather(chunks, p_data, group=self.process_group)
# Set p.data = output_tensor (with padding trimmed)
update_p_data(output_tensor)
if (self.mixed_precision or self.move_params_to_cpu) and not force_full_precision:
self._free_fp16_param_shard([p])
if self.move_params_to_cpu and (self.params[0].dtype == self.compute_dtype):
self._free_fp16_param_shard([p])
torch.cuda.current_stream().wait_stream(self._streams["all_gather"])
return output_tensors
@torch.no_grad()
def _use_full_params(self) -> None:
"""
Switch p.data pointers to use the full params.
Note: this assumes full params are already gathered.
Note: this might be called after full_params is already in used. So please
make sure it is idempotent in that case.
"""
assert self.has_full_params
for p in self.params:
if not p._is_sharded:
if self.mixed_precision or self.move_params_to_cpu:
assert p._fp16_shard is not None
assert p._fp16_shard.storage().size() != 0
p.data = p._fp16_shard
else:
assert p._full_param_padded.storage().size() != 0, f"{p._orig_size} {id(self)}"
p.data = p._full_param_padded[: p._orig_size.numel()].view(p._orig_size)
@torch.no_grad()
def _prep_grads_for_backward(self) -> None:
"""Make sure p.grad is correctly prepared for the backward with
right shape, device, accumulated values, etc.
"""
for p in self.params:
if p.grad is not None:
if p.grad.device != p.data.device:
p.grad = None
elif p.grad.size() == p._orig_size:
# This is gradient accumulation with no_sync context.
pass
elif p.grad.size() == p._fp32_shard.shape:
# This is gradient accumulation without no_sync context.
# We save the grad shard and set p.grad to None for this backward pass.
# We will accumulate after this pass's grad is generated and reduced and
# sharded.
p._saved_grad_shard = p.grad.data
p.grad = None
else:
raise AssertionError(f"unexpected grad shape: {p.grad.size()}")
@torch.no_grad()
def _free_full_params(self, params: Optional[List[Parameter]] = None) -> None:
"""Free up storage for full parameters."""
if params is None:
params = self.params
self.has_full_params = False
current_stream = torch.cuda.current_stream()
for p in params:
if not p._is_sharded: # e.g., world_size == 1
if self.mixed_precision or self.move_params_to_cpu:
self._free_fp16_param_shard([p])
continue
# Don't let PyTorch reuse this memory until all work in the current
# stream is complete.
p._full_param_padded.record_stream(current_stream)
# There may be external references to the Tensor Storage that we
# can't modify, such as references that are created by
# ctx.save_for_backward in the forward pass. Thus when we
# unshard parameters, we should reuse the original Tensor
# Storage object and unshard it in-place. For now, just resize
# the Storage to 0 to save memory.
free_storage_(p._full_param_padded)
def local_metadata_dict(self) -> Dict[str, Any]:
"""
Get the information needed to reconstruct the model from shards offline.
See the `consolidate_shard_weights` method below.
"""
param_metadata = []
for path, m in self.named_modules():
if isinstance(m, FullyShardedDataParallel):
metadata: Dict[str, Any] = {}
metadata["fsdp_path"] = _clean_path(path)
metadata["params"] = {}
metadata["no_broadcast_optim_state"] = m.no_broadcast_optim_state
shared_param_info = []
for (mpath_dst, mpath_src, _, src_name, _, dst_name) in m._shared_param_infos:
src_param_path = _clean_path(mpath_src + "." + src_name if mpath_src else src_name)
dst_param_path = _clean_path(mpath_dst + "." + dst_name if mpath_dst else dst_name)
shared_param_info.append((src_param_path, dst_param_path))
metadata["shared_param_info"] = shared_param_info
for i, p in enumerate(m.params):
if i < m._num_flatten_params:
backing_param_name = m.module.flat_param_names[i]
names, shapes, numels = m.module.metadata(i)
else:
assert len(m._param_name_groups[i]) == 1
backing_param_name = m._param_name_groups[i][0]
names = [backing_param_name]
shapes = [p._orig_size]
numels = [p._orig_size.numel()]
backing_param_name = _clean_path(backing_param_name)
metadata["params"][backing_param_name] = {
"names": [_clean_path(n) for n in names], # A list of str.
"shapes": shapes, # A list of torch.Size.
"numels": numels, # A list of int.
"padding": m.numel_padded_per_param[i], # An int for padding added to the backing parameter.
}
param_metadata.append(metadata)
buffer_names = [_clean_path(buffer_name) for buffer_name, _ in self.named_buffers(recurse=True)]
return dict(param_metadata=param_metadata, buffer_names=buffer_names)
@staticmethod
def consolidate_shard_weights(
shard_weights: List[Dict[str, torch.Tensor]],
shard_metadata: List[Dict[str, Any]],
with_module_buffers: bool = True,
strict: bool = True,
) -> Dict[str, torch.Tensor]:
"""
Given a list of weights and meta data associated to N shards, reconstruct
the weights of an equivalent consolidated (non-sharded) state dict.
Module parameters are consolidated using the shard metadata.
Module buffers are taken from shard 0: this assumes that module buffers
are either synchronized or that the shard 0 value is valid for all shards.
If this behavior is not correct for your module (for instance if buffers
needs to be all-reduced instead), you can disable it with `with_module_buffers=False`.
This method is used to re-assemble checkpoints of shards without
having to instantiate FSDP wrappers with the world size (i.e. large
number of GPUs) originally used to save the shards.
Args:
shard_weights (List[Dict[str, torch.Tensor]]):
List of dictionaries that contains sharded weights from
each rank.
shard_metadata (List[Dict[str, Any]]):
List of dictionaries that contains metadata from each shard.
See `local_metadata_dict` above.
with_module_buffers (bool):
If shard 0's buffer should be returned in the consolidated
weight dict.
Default: True.
strict (bool):
allow incomplete shard weights. if True, every key in the metadata must be present in the weights.
"""
if len(shard_weights) != len(shard_metadata) or not len(shard_weights):
raise ValueError("Require metadata for each shard and non-empty shards")
consolidated_weights = {}
original_world_size = len(shard_weights)
# For every FSDP instance.
for fsdp_obj_idx, metadata in enumerate(shard_metadata[0]["param_metadata"]):
fsdp_path = metadata["fsdp_path"]
params = metadata["params"]
# For every this-FSDP-owned param, flattened or not.
for backing_param_name, v in params.items():
in_state_dict_key = ".".join([fsdp_path, backing_param_name]) if fsdp_path else backing_param_name
# Get full param back with pad removed.
if in_state_dict_key not in shard_weights[0] and (not strict):
continue
shards = []
for rank in range(original_world_size):
shard = shard_weights[rank][in_state_dict_key]
pad = shard_metadata[rank]["param_metadata"][fsdp_obj_idx]["params"][backing_param_name]["padding"]
shards.append(_unpad(shard, pad))
if metadata["no_broadcast_optim_state"]:
break
full_param = torch.cat(shards, dim=0)
# (Potentially), split the full param and create original params.
names, shapes, numels, _ = v.values()
assert sum(numels) == full_param.size(0)
for n, t, s in zip(names, full_param.split(numels), shapes):
out_state_dict_key = ".".join([fsdp_path, n]) if fsdp_path else n
consolidated_weights[out_state_dict_key] = t.view(s)
# copy shared parameters
for src_path, dest_path in metadata["shared_param_info"]:
consolidated_weights[dest_path] = consolidated_weights[src_path]
# Deal with the buffers, which are not parameters and are not sharded by FSDP
# and therefore are replicated among the different shards.
# We take the values of the first shard (this assumes that there is some form
# of synchronization between shards or that all shards buffers are equivalent).
if with_module_buffers:
for buffer_name in shard_metadata[0]["buffer_names"]:
if buffer_name not in shard_weights[0] and (not strict):
continue
consolidated_weights[buffer_name] = shard_weights[0][buffer_name]
return consolidated_weights
@torch.no_grad()
def _use_fp32_param_shard(self, params: Optional[List[Parameter]] = None) -> None:
"""Use FP32 shard for a list of params."""
if params is None:
params = self.params
for p in params:
p.data = p._fp32_shard
@torch.no_grad()
def _cast_fp32_param_shards_to_fp16(self, params: Optional[List[Parameter]] = None) -> None:
"""Cast FP32 param shard to FP16 for a list of params."""
if params is None:
params = self.params
with torch.cuda.stream(self._streams["fp32_to_fp16"]):
for p in params:
assert p._fp16_shard is not None
alloc_storage_(p._fp16_shard, size=p._fp32_shard.size())
p._fp16_shard.copy_(
# If move_params_to_cpu is True, this will be non-blocking
# because _fp32_shard is pinned, otherwise it's a no-op.
p._fp32_shard.to(p._fp16_shard.device, non_blocking=True)
)
p.data = p._fp16_shard
torch.cuda.current_stream().wait_stream(self._streams["fp32_to_fp16"])
@torch.no_grad()
def _free_fp16_param_shard(self, params: Optional[List[Parameter]] = None) -> None:
"""Free storage for FP16 shards for a list of params."""
if params is None:
params = self.params
current_stream = torch.cuda.current_stream()
for p in params:
if p._fp16_shard is not None:
# _fp16_shard is allocated in "fp32_to_fp16" stream, so we can't
# free it until the work in the current stream completes.
p._fp16_shard.record_stream(current_stream)
free_storage_(p._fp16_shard)
def assert_state(self, state: Union[TrainingState, List[TrainingState]]) -> None:
"""Assert we are in the given state."""
# Since assert can be turned off and this error checking
# is really important, we use explicit error checking
# and raise a ValueError if needed.
if isinstance(state, TrainingState):
state = [state]
if self.training_state not in state:
msg = f"expected to be in states {state} but current state " f"is {self.training_state}"
# In case we are failing in the context of autograd hook, asserting
# may not generate useful msg. So, let's print it to be sure.
if self.rank == 0:
print(f"Asserting FSDP instance is: {self}")
print(f"ERROR: {msg}")
traceback.print_stack()
raise ValueError(msg)
def _broadcast_pad_info_to_r0(self) -> List[List[List[int]]]:
"""Collect [x.numel_padded_per_param for x in get_fsdp_instances(self)] from each rank."""
world_pad_info: List[List[List[int]]] = [] # this will contain values from the whole world.
my_pad_info: List[List[int]] = [
cast(List[int], m.numel_padded_per_param) for m in get_fsdp_instances(self, skip_empty=True)
]
for rank in range(self.world_size):
if rank == self.rank:
pad_info = my_pad_info
else:
pad_info = [[0]] * len(my_pad_info)
dist.broadcast_object_list(pad_info, src=rank, group=self.process_group)
if self.rank == 0:
world_pad_info.append(pad_info)
return world_pad_info
def _gather_optim_state(
self, sd_state: Dict[int, Dict[str, Any]]
) -> Tuple[Dict[int, Dict[str, List]], Dict[int, Dict[str, List]]]:
"""For each value in state[i], if the value is a tensor, collect it from the world. Else use rank 0's entry."""
gathered_state: Dict[int, Dict[str, List[Any]]] = {}
singleton_state: Dict[int, Dict[str, List[Any]]] = {} # Dimensionless tensor
# Non-empty FSDP instance and sd_state item number must match.
fsdp_instances = get_fsdp_instances(self, skip_empty=True)
assert len(fsdp_instances) >= len(sd_state), f"{len(fsdp_instances)} vs. {len(sd_state)}"
for k, v in sd_state.items():
gathered_state[k] = {}
singleton_state[k] = {}
# For shared params, we are not flattening. We have only 1 non-shared
# param that has the optimizer state. So we handle it with the correct
# parameter list.
non_shared_params = fsdp_instances[k].non_shared_params()
# This is the world size and process group of the FSDP submodule which can be
# different than the parent module. For example, when FSDP is used with MoE.
non_shared_world_size = fsdp_instances[k].world_size
non_shared_process_group = fsdp_instances[k].process_group
assert (
len(non_shared_params) == 1
), f"Only flatten param or a single non-shared param is supported: len={len(non_shared_params)} FSDP={self}"
desired_buffer_size = non_shared_params[0]._full_param_padded.size()
buffer = None # for sharded tensors
singleton_buffer = None # for singleton tensors
for buffer_name, t in v.items():
if torch.is_tensor(t):
t = t.to(self.compute_device)
if ou.is_singleton_tensor(t):
if singleton_buffer is None:
singleton_buffer = list(t.new_zeros(non_shared_world_size).chunk(non_shared_world_size))
dist.all_gather(singleton_buffer, t, group=non_shared_process_group)
if self.rank == 0:
singleton_state[k][buffer_name] = [x.cpu().squeeze() for x in singleton_buffer]
assert ou.is_singleton_tensor(singleton_state[k][buffer_name][0])
elif torch.is_tensor(t):
if buffer is None:
buffer = list(t.new_zeros(*desired_buffer_size).chunk(non_shared_world_size))
dist.all_gather(buffer, t, group=non_shared_process_group)
if self.rank == 0:
gathered_state[k][buffer_name] = [x.cpu() for x in buffer]
elif self.rank == 0: # Add non tensor state
gathered_state[k][buffer_name] = [t]
return gathered_state, singleton_state
def gather_full_optim_state_dict(self, optim: torch.optim.Optimizer, **ignored: Dict) -> Optional[Dict[str, Any]]:
"""Return the last known global optimizer state. The returned state is compatible with Pytorch, in that the
sharded properties are not exposed. Multiple parameter groups are not yet supported.
This should be called only on the root FSDP instance.
Nested FSDP instances are supported as long as they have the same world_size as the parent or world_size=1.
Args:
optim (Optimizer): an optimizer instance for this FSDP rank. Its state_dict is
used in the consolidation. However, its state is not modified.
Returns:
* A dict with four entries (On rank zero, other workers return ``None``)
* state - a dict holding gathered optimization state, 1 entry per unflat parameter
* param_groups - a dict containing the 1 parameter group
* param_id_map - global (unflat) to local (flat) id mapping
* uncollected_local_ids - keys in the state dict that were not broadcast
"""
if not self.flatten_parameters:
raise NotImplementedError("optim state dict requires flatten_parameters=True")
self._lazy_init()
sd = self._remove_uncollectable_params_from_optim_state_dict(optim.state_dict())
assert {"param_groups", "state"}.issubset(
set(sd.keys())
), f'{set(sd.keys())} not a superset of {"param_groups", "state"}'
assert len(sd["param_groups"]) == 1, "Param groups are not supported"
# We use all_gather to consolidate OSD['state'] and broadcast to consolidate the other keys (like param_groups)
state, singleton_state = self._gather_optim_state(sd.pop("state"))
pad_info = self._broadcast_pad_info_to_r0()
if self.rank != 0:
return None
# Unify the shard states by concatenating tensors and unflattening params
new_state_dict = ou.build_unflat_state_dict(
get_fsdp_instances(self, skip_empty=True),
pad_info,
state,
singleton_state,
self.uncollected_opt_state,
sd,
)
self.uncollected_opt_state = {}
assert "uncollected_local_ids" in new_state_dict
return new_state_dict
def _remove_uncollectable_params_from_optim_state_dict(self, osd: Dict) -> Dict:
"""Return a new state dict filtering out the ones like MoE layers, which has
``no_broadcast_optim_state`` flag set.
We also make rooms for the optimizer state on rank 0.
Args:
osd (Dict):
Optimizer state dict from a rank. osd["state"] is what we mainly
care. Osd may contain other keys and values, we need to keep. Therefore,
we only change osd["state"] and not returning a new copy of osd
which is slower and may also lose extra fields, like "loss_scale"
used by fairseq.
"""
# In PyTorch version 1.12, Adam's `step` state changed from an int to a singleton
# tensor. We convert it back here. Otherwise, the step counter will be treated
# like a singleton tensor and comparison with original state dict would fail.
for _, bufs in osd["state"].items():
if "step" in bufs.keys():
assert type(bufs["step"]) is int or ou.is_singleton_tensor(bufs["step"])
if ou.is_singleton_tensor(bufs["step"]):
bufs["step"] = bufs["step"].item()
# Get uncollected_ids.
uncollected_ids = [i for i, m in enumerate(get_fsdp_instances(self)) if m.no_broadcast_optim_state]
new_state_value = {k: v for k, v in osd["state"].items() if k not in uncollected_ids}
if self.rank == 0:
# Save placeholders for uncollected opt state to keep the same unflat OSD format, and move them to CPU.
self.uncollected_opt_state = {
k: recursive_copy_to_device(v, non_blocking=False, device=torch.device("cpu"))
for k, v in osd["state"].items()
if k in uncollected_ids
}
osd["state"] = new_state_value
return osd
def get_shard_from_optim_state_dict(self, full_optim_state_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Get the portion of the optimizer state dict associated with the shard
This can be used to get the right sharded optimizer state to be loaded
into the sharded optimizer for this FSDP rank.
..warning:: The input state dict is modified in-place assuming the original
full state isn't going to be used anymore. This is done so that
we don't need to copy extra state in it. It is caller's responsibility
to make copies if it doesn't want the original state dict modified.
Args:
full_optim_state_dict (dict):
consolidated optimizer state returned by ``gather_full_optim_state``,
or loaded from a checkpoint.
Returns:
(dict): a shard of the optimizer state.
"""
# Assert nesting is the same as it was at save time
instance_list = get_fsdp_instances(self, skip_empty=True)
ou.check_param_counts_before_sharding(full_optim_state_dict, len(instance_list))
ids_not_to_shard = copy.deepcopy(full_optim_state_dict["uncollected_local_ids"])
if self.flatten_parameters:
full_optim_state_dict = ou.flatten_optim_state_dict(full_optim_state_dict)
# Due to unused params, the length of the state can be anywhere between
# 0 and number of params/fsdp_instance.
assert len(full_optim_state_dict["state"]) <= len(
instance_list
), f'{len(full_optim_state_dict["state"])}, {len(instance_list)}'
# get the portion of dict associated with the shard, in place
for _id, s in full_optim_state_dict["state"].items():
for k, v in s.items():
if torch.is_tensor(v) and _id not in ids_not_to_shard:
v_shard, _ = self._get_shard(v)
elif isinstance(v, list) and ou.is_singleton_tensor(v[0]):
# if we are resuming on larger world size, take first entry
v_shard = v[0] if self.rank >= len(v) else v[self.rank]
assert ou.is_singleton_tensor(v_shard)
else:
v_shard = v # don't shard entries that are not tensors
full_optim_state_dict["state"][_id][k] = v_shard
return full_optim_state_dict
def _print_r0(self, msg: str, restart: bool = False) -> None:
"""Debugging utility to print memory usage stats nicely on rank 0"""
if restart:
self._tstart = time.time()
if self.rank == 0:
gb_denom = 1024**3
logging.info(
f"{msg} cur={torch.cuda.memory_allocated()/gb_denom: .4f} GB, max={torch.cuda.max_memory_allocated()/gb_denom: .4f} GB, t={time.time()-self._tstart: .1f}"
)
# Note: This property will be deprecated in an upcoming release in favor of `move_params_to_cpu`.
@property
def cpu_offload(self) -> bool:
return self.move_params_to_cpu
def p_assert(cond: Any, s: Any) -> None:
"""Used in backward context to make sure error is printed."""
if not cond:
print(s)
raise AssertionError
def _get_default_cuda_device(module: nn.Module) -> torch.device:
"""Try to infer CUDA device from module parameters."""
try:
compute_device = next(module.parameters()).device
if compute_device.type == "cuda":
return compute_device
except StopIteration:
pass
# Fall back to current CUDA device
return torch.device("cuda")
def cast_floats_to_right_precision(to_fp16: bool, no_grad: bool, *args: Any, **kwargs: Any) -> Tuple[Any, Any]:
"""
Cast floating point Tensors in *args or **kwargs to FP16 or FP32 if they are not.
We also retain the requires_grad flag so that casting doesn't affect the autograd graph.
"""
def fn_fp16(x: torch.Tensor) -> torch.Tensor:
if x.dtype is torch.float32:
y = x.half()
if x.is_leaf:
y.requires_grad = x.requires_grad
return y
return x
def fn_fp32(x: torch.Tensor) -> torch.Tensor:
if x.dtype is torch.float16:
y = x.float()
if x.is_leaf:
y.requires_grad = x.requires_grad
return y
return x
fn = fn_fp16 if to_fp16 else fn_fp32
context = torch.no_grad() if no_grad else contextlib.suppress()
with context: # type: ignore
return apply_to_tensors(fn, args), apply_to_tensors(fn, kwargs)
def free_storage_(data: torch.Tensor) -> None:
"""Free underlying storage of a Tensor."""
if data.storage().size() > 0:
# Since we're modifying the Tensor's Storage directly, make sure the Tensor
# is the sole occupant of the Storage.
assert data.storage_offset() == 0
data.storage().resize_(0)
@torch.no_grad()
def alloc_storage_(data: torch.Tensor, size: torch.Size) -> None:
"""Allocate storage for a tensor."""
if data.storage().size() == size.numel(): # no need to reallocate
return
assert data.storage().size() == 0
data.storage().resize_(size.numel())
def _post_state_dict_hook(
state_dict_on_rank_0_only: bool,
module: FullyShardedDataParallel,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str,
*args: Any,
) -> "OrderedDict[str, torch.Tensor]":
# When state_dict_on_rank_0_only is ``True``, ``model.state_dict()`` will only
# returns full state dict on rank 0 and return empty dict non-rank 0,
# which allow FullyShardedDataParallel to skip the GPU -> CPU copy on
# non-rank 0 altogether and prevent OOM.
if state_dict_on_rank_0_only and dist.get_rank() != 0:
state_dict.clear()
return state_dict
def apply_to_tensor(obj: torch.Tensor) -> torch.Tensor:
"""Apply needed operations on a tensor."""
assert isinstance(obj, torch.Tensor), f"Expect a tensor, got {type(obj)}"
# Already applied?
if getattr(obj, "_has_been_cloned", False):
return obj
if obj.device.type != module.state_dict_device.type:
# Move to right device. This is often used to save GPU memory.
obj = obj.to(device=module.state_dict_device)
elif module.training_state == TrainingState.SUMMON_FULL_PARAMS:
# If we are in a ``summon_full_params()`` context, we need to clone
# each tensor so that it does not get freed (in-place) when the context
# exits. At the same time, this hook can be called multiple times
# recursively, so we need to make sure that we only clone each tensor at
# most once. Thus we add an attribute on the tensor called "_has_been_cloned"
# which keeps track of tensors that are no longer at risk of being freed.
#
# "elif" because .to() clones the object too.
obj = obj.clone()
# Both .to() and .clone() copies a new object. So we set this flag.
obj._has_been_cloned = True
return obj
# State_dict is supposed to be a flat dict (not nested). The
# keys are encoded with hierarchy. Therefore, we can loop
# over the dict here. (See else case below for additional notes.)
for key in state_dict.keys():
# Skip keys without right prefix.
if not key.startswith(prefix):
continue
elif isinstance(state_dict[key], torch.Tensor):
state_dict[key] = apply_to_tensor(state_dict[key])
else:
# For example, EMA module from data2vec is a dict of tensors.
logging.warning(
f"Got an unexpected data type in state_dict" f"key={key} value_type={type(state_dict[key])}"
)
# Remove "_fsdp_wrapped_module." prefix
replace_by_prefix_(state_dict, prefix + "_fsdp_wrapped_module.", prefix)
return state_dict
@contextlib.contextmanager
def no_pre_load_state_dict_hook() -> Generator:
"""Disable the pre-load hook.
This is needed if we are loading a state_dict that was not produced by
a root FSDP instance.
"""
global _enable_pre_load_state_dict_hook
bak = _enable_pre_load_state_dict_hook
_enable_pre_load_state_dict_hook = False
yield
_enable_pre_load_state_dict_hook = bak
def _pre_load_state_dict_hook(
state_dict: Union[Dict[str, torch.Tensor], "OrderedDict[str, torch.Tensor]"], prefix: str, *args: Any
) -> None:
if _enable_pre_load_state_dict_hook:
replace_by_prefix_(state_dict, prefix, prefix + "_fsdp_wrapped_module.")
def _clean_path(path: str) -> str:
"""Remove FSDP related wrapper modules from a given state dict key str path."""
return ".".join([split for split in path.split(".") if split not in {"_fsdp_wrapped_module", "_fpw_module"}])
def _unpad(shard: torch.Tensor, pad: int) -> torch.Tensor:
if pad > 0:
shard = shard[:-pad]
return shard
########################################################################################
# Below are APIs used together with FSDP, but not directly part of FSDP.
########################################################################################
def auto_wrap_bn(
module: nn.Module,
single_rank_pg: bool = False,
process_group: Optional["ProcessGroup"] = None,
fsdp_config: Optional[Dict[str, Any]] = None,
wrap_it: bool = True,
assert_on_collision: bool = True,
) -> nn.Module:
"""
Auto wrap all BatchNorm (BN) instances with a safer FSDP, esp. when convert
to sync BN is used and the outer FSDP is flattening.
We put BN in is own full precision, unflatten, single GPU group FSDP. Note, SyncBNs still have
a group size == world_size. The input and output for BN are still FP16 in mixed precision mode.
See ``keep_batchnorm_fp32`` here: https://nvidia.github.io/apex/amp.html
This needs to be done at each rank, like models being wrapped by FSDP at each rank.
Args:
module (nn.Module):
The model (or part of the model) in which BN to be pre-wrapped.
single_rank_pg (bool):
If true, put BNs in a single-rank process group. Default False.
This might be needed for Apex sync BN support. Still under construction.
process_group (ProcessGroup):
Optional process group to be used.
fsdp_config (Dict):
Optional fsdp_config to be used.
wrap_it (bool):
Whether or not wrap the module after setting the config.
Default: True
assert_on_collision (bool):
Whether or not assert if a wrapper_config already exists on the module.
Default: True
Returns:
Processed module, where BNs are wrapped with a special FSDP instance.
"""
# Prepare a fsdp_config dict for BNs.
pg = process_group
if single_rank_pg:
# No sharding with this single member group.
my_rank = dist.get_rank()
pg = get_process_group_cached(ranks=[my_rank])
if fsdp_config is None:
fsdp_config = {
"process_group": pg,
"mixed_precision": False, # Keep the weights in FP32.
"flatten_parameters": False, # Do not flatten.
# Reshard==False is good for performance. When FSDP(checkpoint(FSDP(bn))) is used, this
# **must** be False because BN's FSDP wrapper's pre-backward callback isn't called
# within the checkpoint's outer backward when multiple forward passes are used.
"reshard_after_forward": False,
# No bucketing or small bucketing should be enough for BNs.
"bucket_cap_mb": 0,
# Setting this for SyncBatchNorm. This may have a performance impact. If
# SyncBatchNorm is used, this can be enabled by passing in the `fsdp_config` argument.
"force_input_to_fp32": False,
}
# Assign the config dict to BNs.
for m in module.modules():
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
if assert_on_collision:
assert not hasattr(
m, "wrapper_config"
), "Module shouldn't already have a wrapper_config. Is it tagged already by another policy?"
m.wrapper_config = fsdp_config
# Wrap it.
with (
enable_wrap(config_auto_wrap_policy, wrapper_cls=FullyShardedDataParallel) if wrap_it else contextlib.suppress()
):
return auto_wrap(module)
def get_fsdp_instances(mod: nn.Module, skip_empty: bool = False) -> List[FullyShardedDataParallel]:
"""Return, a list, if any, of the module/submodule is wrapped by FSDP within another module.
Args:
mod (nn.Module):
A nn.Module module to be scanned.
skip_empty (bool):
If True, skip wrappers without any parameters.
Default: False
"""
ret: List[FullyShardedDataParallel] = []
for m in mod.modules(): # including mod itself
if isinstance(m, FullyShardedDataParallel):
ret.append(m)
if skip_empty:
ret = list(filter(lambda x: len(cast(FullyShardedDataParallel, x).non_shared_params()) > 0, ret))
return ret
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch.distributed as dist
from .fully_sharded_data_parallel import (
FullyShardedDataParallel,
TrainingState,
auto_wrap_bn,
get_fsdp_instances,
no_pre_load_state_dict_hook,
)
if dist.is_available():
# Prevent import failure if dist is not available. #1057
from .sharded_ddp import ShardedDataParallel
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""These functions are used by FullyShardedDataParallel to help consolidate and shard optimizer states."""
import copy
from itertools import groupby
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Tuple, cast
import torch
from fairscale.nn.misc import FlattenParamsWrapper
if TYPE_CHECKING:
from fairscale.nn.data_parallel import FullyShardedDataParallel
# This function helps shard a full optimizer state dict
def flatten_optim_state_dict(sd: Dict) -> Dict:
"""Shard a full optimizer state dict (called by FSDP.get_shard_from_optim_state_dict)"""
param_id_map = sd["param_id_map"]
# Get a set of local ids, like {0, None, 2}, then we remove None from it.
local_ids = set(param_id_map.values())
if None in local_ids:
local_ids.remove(None)
if sd["state"]:
new_state: Dict = {local_id: {} for local_id in local_ids}
singleton_state: Dict = copy.deepcopy(new_state)
else:
new_state = {}
non_tensor_state = {}
# Populate `new_state["state"]`. (Assuming sd is sorted)
for global_id, buffers in sd["state"].items():
local_id = param_id_map[global_id]
for buffer_name, p in buffers.items():
if is_singleton_tensor(p):
singleton_state[local_id][buffer_name] = p
elif torch.is_tensor(p):
if buffer_name not in new_state[local_id]:
new_state[local_id][buffer_name] = []
new_state[local_id][buffer_name].append(p.reshape(-1))
elif isinstance(p, list):
singleton_state[local_id][buffer_name] = p
else:
non_tensor_state[buffer_name] = p
# Now combine all tensors in each buffer using torch.cat().
for local_id, state in new_state.items():
for buffer_name, tensors in state.items():
new_state[local_id][buffer_name] = torch.cat(tensors)
new_state[local_id].update(non_tensor_state)
new_state[local_id].update(singleton_state[local_id])
# Now make a new param_groups copy and update it.
new_sd_pg = copy.deepcopy(sd["param_groups"])
# add pointers from the `params` dict.
for pg_id, _ in enumerate(sd["param_groups"]):
# The values() list may look like [0,0,None,None,2,2]. We use
# groupby to remove the duplicates and then count the length of
# resulting iter.
num_local_params = sum(1 for _ in groupby(param_id_map.values()))
new_sd_pg[pg_id]["params"] = list(range(num_local_params))
# update the original sd so that we don't lose extra keys, like loss_scale.
sd["state"] = new_state
sd["param_groups"] = new_sd_pg
# delete extra keys we have added to match the original state.
del sd["uncollected_local_ids"]
del sd["param_id_map"]
return sd
def check_param_counts_before_sharding(full_optim_state_dict: Dict, n_instances: int) -> None:
n_local_params_in_opt = len(set(full_optim_state_dict["param_id_map"].values()))
msg = (
f"Including itself, this model has {n_instances} nested instances. When the optimizer state was saved "
f"there were {n_local_params_in_opt}"
)
stateless = len(full_optim_state_dict["state"]) == 0
assert stateless or (n_instances == n_local_params_in_opt), msg
# All functions below here help saving the list of optimizer states, one from each rank
# build_unflat_state_dict is the interface used by FSDP
def _extract_non_tensor_state(combined_state: Dict[int, Dict[str, List]], param_id: int) -> Dict:
non_tensor_state = {} # This state is like the `step` count in Adam, not a tensor so we dont unpad or cat it.
for k, v in combined_state[param_id].items():
if torch.is_tensor(v[0]):
continue
elif len(set(v)) == 1:
non_tensor_state[k] = v[0]
else:
raise TypeError(f"Dont know how to consolidate optimizer param {k} with values {v}")
return non_tensor_state
def _unflatten_optim_state(
combined_state: Dict[int, Dict],
instance_list: List["FullyShardedDataParallel"],
world_pad_info: List[List[List[int]]],
singleton_state: Dict[int, Dict],
) -> Tuple[Dict[int, Dict], Dict[int, int]]:
"""Convert optimizer state for flattened parameters into original, unflattened ones.
Args:
combined_state: all-gathered state with tensors
instance_list: list of FSDP wrapper object instances
world_pad_info: [param_id][fsdp_instance_id][bytes_padded_per_rank]
singleton_state: all-gathered dimensionless tensors
Returns:
state: unflattened state dict
idx_mapping: a mapping from global ID to local ID
"""
# local ids are the keys in the current state (combined_state), (usually fewer)
# global ids will be the keys in the unflattened state
next_global_id = 0 # gets incremented
pad_info = {id: [s[id][0] for s in world_pad_info] for id in combined_state}
local_ids = [id for id in sorted(combined_state.keys())]
# non_tensor_state refers to entries in sd[state][param_id] that are not tensors, like "step".
# we check that these are identical across workers and then take the first
non_tensor_state = {id: _extract_non_tensor_state(combined_state, id) for id in combined_state}
# Local corresponds to flattened, global corresponds to unflattened.
# Casting needed only for mypy.
num_global_params: List[int] = []
for m in instance_list:
if m.flatten_parameters:
num_flatten = cast(int, m.num_params_managed)
num_global_params.append(num_flatten)
else:
num_global_params.append(len(m.non_shared_params()))
global_to_local_id = {}
for local_id, num_unflat in enumerate(num_global_params):
for _ in range(num_unflat):
# Some params could be unused, which means the optimizer
# hasn't created their state. Therefore, `local_id` obtained
# by enumerating the params above could be out of the range
# of keys in `combined_state` above. Here is an example:
#
# global local notes
# 0 0 FC1's weight, first flat buffer
# 1 0 FC1's bias, first flat buffer
# 2 None FC2's weight, no flat state
# 3 None FC2's bias, no flat state
# 4 2 FC3's weight, second flat buffer (but with id 2)
# 5 2 FC3's bias, second flat buffer (but with id 2)
global_to_local_id[next_global_id] = local_id if local_id in local_ids else None
next_global_id += 1
if not combined_state:
return {}, global_to_local_id
# copy non tensor state (like the "step" count) to all global entries
unflat_state = {i: copy.deepcopy(non_tensor_state[0]) for i in range(sum(num_global_params))}
# remove the global entries that don't have optim state because pytorch
# optimizer's state_dict() function returns a state_dict without the missing
# param, so we shouldn't have things like "1:{}" for missing params.
for g, l in global_to_local_id.items():
if l is None:
del unflat_state[g]
if non_tensor_state[0].keys() == combined_state[0].keys():
# Early return if there is no tensors in the state dict.
return unflat_state, global_to_local_id
local_to_global: Dict[int, List] = {i: [] for i in local_ids}
for g, l in global_to_local_id.items():
if l is not None:
local_to_global[l].append(g)
# loop over parameters in state.
# Tensor state will be padded, concatenated, and restored to original shape with FlattenParamsWrapper.get_views
# get_views returns multiple tensors, each of which is a new parameter with a new "global" id.
for local_id in local_ids:
# undo the work of shard_parameters
for k, v in combined_state[local_id].items():
if k in non_tensor_state[local_id]:
continue
assert isinstance(v, list), f"got {k}: {v} for {local_id}"
v_unpad = [t[:-np] if np > 0 else t for t, np in zip(v, pad_info[local_id])]
flat_buffer = torch.cat(v_unpad)
if instance_list[local_id].flatten_parameters:
# Unflatten. Casting needed only for mypy.
param_views: Iterator = cast(FlattenParamsWrapper, instance_list[local_id]).get_param_views(
[flat_buffer]
)
for global_id, param_view in zip(sorted(local_to_global[local_id]), param_views):
assert k not in unflat_state[global_id], f"already added {k} to {global_id} {local_id}"
unflat_state[global_id][k] = param_view
else:
# Copy non-flatten state directly.
assert len(local_to_global[local_id]) == 1, "Only support a single non-flatten parameter"
global_id = local_to_global[local_id][0]
unflat_state[global_id][k] = flat_buffer
unflat_state[global_id].update(singleton_state[local_id])
return unflat_state, global_to_local_id
def build_unflat_state_dict(
instance_list: List["FullyShardedDataParallel"],
world_pad_info: List[List[List[int]]],
state: Dict[int, Dict[str, List[torch.Tensor]]],
singleton_state: Dict[int, Dict[str, List[torch.Tensor]]],
uncollected_opt_state: Dict[int, Dict],
original_sd: Dict,
) -> Dict:
"""Build an unflattened optimizer state dict given a list of flattened optimizer state dicts
from each rank. This is only called on rank 0.
Args:
instance_list: list of FSDP wrapper objects
world_pad_info: [param_id][fsdp_instance_id][bytes_padded_per_rank]
state: all-gathered combined/local/flatten state_dict
singleton_state: all-gathered singleton_state (dimensionless tensors)
uncollected_opt_state: non-tensor and not-gathered state
original_sd: the original rank 0's sd
Returns:
dict: an unflattened, nonsharded optimizer state, as if FSDP was not there.
"""
assert all(len(s) == len(instance_list) for s in world_pad_info)
assert all(len(s[0]) == 1 for s in world_pad_info)
# Use uncollected_opt_state to update tensor_state, singleton_state
for local_id, v in uncollected_opt_state.items():
assert local_id not in state
state[local_id] = {buffer_name: [x] for buffer_name, x in v.items() if not is_singleton_tensor(x)}
singleton_state[local_id] = {buffer_name: [x] for buffer_name, x in v.items() if is_singleton_tensor(x)}
# local ids are in the current state, global_ids will be in returned state.
unflat_state, global_to_local_id = _unflatten_optim_state(state, instance_list, world_pad_info, singleton_state)
# Since there are no tensors in param_groups, deepcopy is fine.
param_groups = copy.deepcopy(original_sd["param_groups"])
# Casting needed only for mypy.
num_params = sum([cast(int, m.num_params_managed) for m in instance_list])
param_groups[0]["params"] = list(range(num_params))
# Update the original sd so we don't loss extra state like loss_scale.
original_sd["state"] = dict(sorted(unflat_state.items())) # NOTE: this is probably already sorted
original_sd["param_id_map"] = global_to_local_id
original_sd["param_groups"] = param_groups
original_sd["uncollected_local_ids"] = list(uncollected_opt_state.keys())
return original_sd
def is_singleton_tensor(x: Any) -> bool:
"""Is x a dimensionless tensor?"""
return torch.is_tensor(x) and x.dim() == 0
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
A nn.Module wrapper to go with a Sharded Optimizer in order to handle targeted gradient
reduction automatically.
"""
from collections import deque
import contextlib
import functools
from itertools import chain
import logging
from typing import Any, Callable, Deque, Dict, Generator, List, Optional, Union
import torch
from torch import nn
from torch.autograd import Variable
import torch.autograd.profiler as profiler
import torch.distributed as dist
from fairscale.internal.params import Workhandle, get_global_rank
from fairscale.nn.misc import GradBucket
from fairscale.optim import OSS
def _trainable(param: torch.Tensor) -> bool:
return param.requires_grad
class ShardedDataParallel(nn.Module):
"""Wrap the model, and reduce the gradients to the right rank during the backward pass.
- the partition is given by the sharded optimizer
- wrap the base model with a model which knows where to reduce each gradient
- add an autograd function which calls the model grad dispatch on the way back
Args:
module (nn.Module):
model to be wrapped
sharded_optimizer (OSS, or list of OSS):
the sharded optimizer(s) which will decide the gradient partitioning
Keyword Args:
process_group (group):
torch.distributed group (default: group.WORLD)
broadcast_buffers (bool):
Whether to additionally broadcast model buffers in between ranks at the beginning of each forward pass.
Same setting as in Pytorch DDP, this is in addition to the broadcast and reduction of the model parameters.
sync_models_at_startup (bool):
Synchronize the models in between the ranks when starting up. Not needed if each rank has the same seed,
or the training restarts from a saved state
reduce_buffer_size (int):
The max size of the buffer used to batch the small parameter tensors, in number of elements (default 0 - unused).
this will impact the long term memory consumption, because these buckets correspond to parameters which will not be sharded.
Set to 0 to remove all bucketing, 1M to 8M is usually reasonable.
auto_refresh_trainable (bool):
(default: True) Check whether the parameters trainability (`requires_grad`) has changed and update both ShardedDDP
and OSS automatically if this is the case. If set to False, `refresh_trainable()` needs to be called anytime
a parameter is frozen or unfrozen.
reduce_fp16 (bool):
cast the grads to fp16 before reducing. Not needed if the model is already fp16, but will probably improve performance
for multi node jobs using PyTorch AMP. The effect is similar to DDP's fp16_compress_hook_ and will also save some memory.
warn_on_trainable_params_changed (bool):
When set to False no warning will be logged whenever a parameter trainability change has been detected.
Default is True.
.. _fp16_compress_hook: https://pytorch.org/docs/1.8.0/ddp_comm_hooks.html?highlight=fp16#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
.. warning:
ShardedDDP implements gradient sharding, meaning that each rank only owns a unique shard of the model gradients
after the backward pass, in order to save memory and some communication bandwidth.
.. warning:
As a consequence of sharding:
* in case of gradient clipping, one has to use the `clip_grad_norm` exposed by
the `optimizer state sharding wrapper <fairscale.optim.OSS>`
* after loss.backward() (or equivalent) each rank will have `None` in place of some param.grad
* Pytorch and Apex AMP implementations will hang when used in conjunction with `ShardedDDP`.
One needs a `shard-aware grad scaler<ShardedGradScaler>`, which is proposed in `fairscale.optim.grad_scaler`,
compatible with PytorchAMP.
.. warning:
If `auto_refresh_trainable` is set to `True` (this is the default) then any trainability change in the model graph will be handled
automatically.
If `auto_refresh_trainable` is set to `False`, ShardedDDP will not refresh its assumptions with respect to trainable parameters
for every forward pass, in the hope of saving some time. If some parameters are frozen or unfrozen over time, please refresh
ShardedDDP assumptions by calling `refresh_trainable()` just after said change (before the next forward pass).
"""
def __init__(
self,
module: nn.Module,
sharded_optimizer: Union[OSS, List[OSS]],
process_group: Any = None,
broadcast_buffers: bool = True,
sync_models_at_startup: bool = True,
reduce_buffer_size: int = 2**23,
auto_refresh_trainable: bool = True,
reduce_fp16: bool = False,
warn_on_trainable_params_changed: bool = True,
):
super().__init__()
# This field needs to be exposed to insure interface parity with DDP
self.module = module
self._sharded_optimizers = [sharded_optimizer] if not isinstance(sharded_optimizer, list) else sharded_optimizer
self._enable_broadcast_buffers = broadcast_buffers
self._auto_refresh_trainable = auto_refresh_trainable
self._reduce_fp16 = reduce_fp16
if reduce_buffer_size > 0 and reduce_fp16:
self._reduce_fp16 = False
logging.warning(
"fp16 gradient reduction is not compatible with reduction buffers, which are requested. fp16 grad reduction is deactivated."
)
self._warn_on_trainable_params_changed = warn_on_trainable_params_changed
# Handle a no_sync() context which prevents the gradient synchronization,
# accumulate in place
self._should_accumulate_grads = False
self._accumulate_grads_flipped = False
# Communication related attributes
self._process_group = process_group if process_group is not None else dist.group.WORLD
self._backend = dist.get_backend(self._process_group)
self._world_size_scaling = 1.0 / dist.get_world_size(self._process_group) # > 0
self._reference_global_rank = get_global_rank(self._process_group, 0) # picking rank 0 as the reference
self._rank = dist.get_rank(self._process_group)
self._global_rank = get_global_rank(self._process_group, self._rank)
self._local_to_global_rank = [
get_global_rank(self._process_group, i) for i in range(dist.get_world_size(self._process_group))
]
# Expose some of the PytorchDDP attributes, some frameworks rely on them.
# See https://pytorch.org/docs/stable/_modules/torch/nn/parallel/distributed.html#DistributedDataParallel
# device_id related logic is not present, this is not handled
devices = {p.device for p in self.module.parameters()}
self.is_multi_device_module = len(devices) > 1
distinct_device_types = {p.device.type for p in self.module.parameters()}
assert len(distinct_device_types) == 1, (
"ShardedDataParallel's input module must be on "
"the same type of devices, but input module parameters are located on {} different device types."
).format(distinct_device_types)
self.device_type = list(distinct_device_types)[0]
# Scafolding to be able to reduce the grads during the BW pass
# several optimizers can be present each working on seperate parameter set which is spread across multiple ranks
# - we build an iterator which goes through all the parameters involved globally
self._all_params = list(
chain(
*[
sum([sum(p, []) for p in optim._per_device_params.values()], [])
for optim in self._sharded_optimizers
]
)
)
self._trainable_params: List[torch.Tensor] = []
self._grad_to_be_reduced: List[bool] = []
self._trainable_param_to_rank: Dict[torch.Tensor, int] = {}
self._reference_trainable_mask = list(map(_trainable, self._all_params))
# - setup buckets and tensor views
model_size = sum([p.numel() for p in self.module.parameters()])
self._buffer_max_size = min(reduce_buffer_size, model_size)
if dist.get_world_size(self._process_group) == 1:
self._buffer_max_size = 0
logging.info("Training is not really distributed, single rank. Deactivating buckets")
logging.info(
"ShardedDDP bucket size: {:.2f}M parameters, model size {:.2f}M parameters".format(
self._buffer_max_size / 2**20, model_size / 2**20
)
)
self._use_buckets = self._buffer_max_size > 0
self._buckets: Dict[torch.device, Dict[int, GradBucket]] = {}
self._should_bucket_grad: List[bool] = []
self._bucket_list: List[GradBucket] = []
# - setup backward hooks which will be called by Torch's autograd in due time
self._grad_accs: List[Callable] = []
self._grad_hooks: List[Any] = []
self._manual_reduce: List[Callable] = []
# passing a handle to torch.nn.SyncBatchNorm layer
self._passing_sync_batchnorm_handle(self.module)
# Make sure that all ranks start with the same model
if sync_models_at_startup:
self._sync_params_and_buffers()
self._work_handles: Deque[Workhandle] = deque()
self._bucket_flush_callback_set = False
def forward(self, *inputs: Any, **kwargs: Any) -> Any:
"""
Module forward pass, handles any DDP-specific work in the background. Primes the
backward pass for gradient reduction to the proper ranks.
"""
with profiler.record_function("fairscale::sdp::forward"):
# Deferred initialization, or change detection
needs_setup = len(self._grad_hooks) == 0 and self.training
if self._auto_refresh_trainable:
needs_setup |= self._detect_train_change()
if needs_setup:
self.refresh_trainable()
if self._enable_broadcast_buffers and not self._should_accumulate_grads:
# NCCL communications are on a different stream, needs to be blocking
# for the subsequent FW to be correct
self.sync_buffers(blocking=True)
# Reset all the grad reduce and bucket state flags
self._clear_counters()
# Normal FW on the base model
return self.module(*inputs, **kwargs)
def to( # type: ignore
self,
device: Optional[torch.device],
dtype: Optional[torch.dtype] = None,
non_blocking: bool = False,
) -> "ShardedDataParallel":
"""
Moves and/or casts the parameters and buffers.
Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
floating point desired :attr:`dtype` s. In addition, this method will
only cast the floating point parameters and buffers to :attr:`dtype`
(if given). The integral parameters and buffers will be moved
:attr:`device`, if that is given, but with dtypes unchanged. When
:attr:`non_blocking` is set, it tries to convert/move asynchronously
with respect to the host if possible, e.g., moving CPU Tensors with
pinned memory to CUDA devices.
.. note::
This method modifies the module in-place.
.. warning:
Device changes are not supported, and this will raise an exception. The issue in that case is not
really ShardedDDP, but OSS which will not be aware of the device change, and whose buffers will be
in a broken state.
Arguments:
device (:class:`torch.device`): the desired device of the parameters and buffers in this module.
dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers.
non_blocking (bool): make it an asynchronous call.
Returns:
Module: self.
"""
if isinstance(device, str):
device = torch.device(device)
assert (
device is None
or len(self._buckets.keys()) == 0
or device.type in map(lambda x: x.type, self._buckets.keys())
), "Changing devices is not supported, because this would break OSSs state"
assert (
len(self._buckets.keys()) < 2
), "Several devices specified to begin with, incompatible with setting a single device here"
self.module.to(device=device, dtype=dtype, non_blocking=non_blocking)
# Re-build the buckets, hooks, etc..
self.refresh_trainable()
def refresh_trainable(self) -> None:
"""If the module trainability has changed, update all the assumptions"""
# Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance)
if functools.reduce(lambda x, y: x or y, self._grad_to_be_reduced, False):
logging.warning(
"Grads waiting to be reduced. If this is on purpose (grad accumulation), please use a no_sync() context"
)
with profiler.record_function("fairscale::sdp::refresh_trainable"):
self._trainable_params = list(filter(lambda x: x.requires_grad, self._all_params))
self._trainable_params.sort(key=lambda x: x.numel())
self._trainable_param_to_rank = {}
for optim in self._sharded_optimizers:
# OSS may need to change the communication pattern
optim.refresh_trainable()
# Update ShardedDDP given the new partitions
for (
device_per_rank_params
) in optim._per_device_params.values(): # all the params on this device (inc all ranks)
for device_params in device_per_rank_params:
for param in filter(lambda x: x.requires_grad, device_params):
self._trainable_param_to_rank[param] = optim._param_to_rank[param]
self._setup_bucket_strategy()
self._setup_backward_hooks()
def reduce(self) -> None:
"""
This does not *need* to be called, the gradient reduction is done automatically during the BW pass.
Use this method to reduce the gradients manually
"""
# Check that this is not a mistake, if there's nothing to reduce
assert functools.reduce(
lambda x, y: x or y, self._grad_to_be_reduced, False
), "No grads waiting to be reduced, maybe that this was called twice or there was no BW pass ?"
# Trigger all the current BW hooks
self._bucket_flush_callback_set = True # no need to flush in the end, we own the callback execution
_ = list(map(lambda x: x(), self._manual_reduce))
# Make sure that all the futures are consumed
self._consume_work_handles()
@torch.no_grad()
def sync_buffers(self, blocking: bool = False) -> None:
"""
Sync all the param buffers in between ranks (including for instance batch norm statistics).
Arguments:
blocking (bool): wait for the operation to conclude.
"""
with profiler.record_function("fairscale::sdp::sync_buffers"):
work_handles = []
for buffer in self.module.buffers(recurse=True):
work_handles.append(
dist.broadcast(buffer.data, self._reference_global_rank, self._process_group, async_op=True)
)
if blocking and work_handles:
if self._backend != dist.Backend.NCCL:
_ = list(filter(lambda x: x.wait(), work_handles))
else:
work_handles[-1].wait()
def zero_grad(self, set_to_none: bool = False) -> None:
r"""Sets gradients of all model parameters to zero. See similar function
under :class:`torch.optim.Optimizer` for more context.
Arguments:
set_to_none (bool): instead of setting to zero, set the grads to None.
See :meth:`torch.optim.Optimizer.zero_grad` for details.
"""
for index, trainable_param in enumerate(self._trainable_params):
if set_to_none and (len(self._should_bucket_grad) == 0 or not self._should_bucket_grad[index]):
trainable_param.grad = None
elif trainable_param.grad is not None:
trainable_param.grad.zero_()
for bucket in self._bucket_list:
bucket.zero()
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.module, name)
@contextlib.contextmanager
def no_sync(self) -> Generator:
"""A context manager to disable gradient synchronization."""
old_should_accumulate_grads = self._should_accumulate_grads
self._should_accumulate_grads = True
yield
self._accumulate_grads_flipped = self._should_accumulate_grads != old_should_accumulate_grads
self._should_accumulate_grads = old_should_accumulate_grads
@torch.no_grad()
def _clear_counters(self) -> None:
"""Reset all the grad reduce and call counters"""
if self.training:
self._grad_to_be_reduced = [True for _ in self._trainable_params]
self._bucket_flush_callback_set = False
if self._use_buckets:
for bucket in self._bucket_list:
bucket.reset_checked_in()
if not self._should_accumulate_grads:
self._accumulate_grads_flipped = False
def _get_reduce_fn(self, index: int, param: torch.Tensor, dst_rank: int) -> Callable:
"""
Two possible backward hooks for a given parameter: either directly reduce to the appropriate rank,
or contribute to a bucket and reduce when the bucket is full.
Either way a delayed action is necessary and is passed as a callback.
"""
if not self._use_buckets or not self._should_bucket_grad[index]:
# Direct reduction
@torch.no_grad()
def reduce(*_: Any) -> None:
# Skip gradient reduction, do not alter status flags
if not self._should_accumulate_grads and self._grad_to_be_reduced[index]:
assert param.grad is not None, "Reducing gradients during backward pass, cannot be None"
if not self._bucket_flush_callback_set:
Variable._execution_engine.queue_callback(self._flush_reduce_calls)
self._bucket_flush_callback_set = True
# Make sure that this is not fired twice
self._grad_to_be_reduced[index] = False
param.grad.mul_(self._world_size_scaling)
if self._reduce_fp16:
param.grad.data = param.grad.data.half()
# Future work includes clearing up the buffer if possible
def cleanup() -> None:
if dst_rank != self._global_rank:
param.grad = None
else:
assert param.grad is not None
param.grad.data = param.grad.data.to(dtype=param.dtype)
# Async reduce for this buffer, log the future
self._work_handles.append(
Workhandle(
handle=dist.reduce(
tensor=param.grad.data,
dst=self._local_to_global_rank[dst_rank],
group=self._process_group,
async_op=True,
),
callback=cleanup,
)
)
# Opportunistically try to empty the queue, free memory
self._try_consume_work_handle()
else:
@torch.no_grad()
def reduce(*_: Any) -> None:
# Skip gradient reduction, do not alter status flags
if not self._should_accumulate_grads and self._grad_to_be_reduced[index]:
assert param.grad is not None, "Reducing gradients during backward pass, cannot be None"
if not self._bucket_flush_callback_set:
Variable._execution_engine.queue_callback(self._flush_reduce_calls)
self._bucket_flush_callback_set = True
# Make sure that this is not fired twice
self._grad_to_be_reduced[index] = False
bucket = self._buckets[param.device][dst_rank]
bucket.params_checked_in += 1
if bucket.all_checked_in:
assert bucket.buffer is not None
# Normalize the bucket in one go
bucket.buffer.mul_(self._world_size_scaling)
# Reduce the bucket
bucket.sent = True
self._work_handles.append(
Workhandle(
handle=dist.reduce(
tensor=bucket.buffer,
dst=bucket.destination,
group=self._process_group,
async_op=True,
),
callback=None,
)
)
# Opportunistically try to empty the queue
self._try_consume_work_handle()
return reduce
def _setup_backward_hooks(self) -> None:
"""
Attach a reduce function to each grad-requiring parameter.
This makes the gradient reduction automatic whenever there's a backward pass
"""
with profiler.record_function("fairscale::sdp::setup_backward_hooks"):
# Detach possible pre-existing hooks
while len(self._grad_hooks) > 0:
self._grad_hooks.pop().remove()
# Go through the parameters, attach the hook
self._grad_accs = []
self._manual_reduce = []
if not self.training:
return
for index, param in enumerate(self._trainable_params):
if param.grad is not None and param.grad.requires_grad:
raise RuntimeError("ShardedDataParallel only works with gradients that don't require grad")
p_tmp = param.expand_as(param)
# See https://pytorch.org/docs/stable/tensors.html?highlight=grad_fn
# We're interested in the tensors which will be tracked by Autograd
# Some tensors can have gradients independent of the inputs (ie. pooling layer for instance),
# these do not need to be sync'ed
if p_tmp.grad_fn is not None:
# Register the hook to the next function in line,
# so that the hook is fired when this grad has properly been computed
# (by default the hook with Pytorch is a pre-grad, not a post-grad)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
dst_rank = self._trainable_param_to_rank[param]
reduce_function = self._get_reduce_fn(index, param, dst_rank)
self._grad_hooks.append(grad_acc.register_hook(reduce_function))
self._grad_accs.append(grad_acc) # keep this hook in scope
self._manual_reduce.append(reduce_function)
@torch.no_grad()
def _sync_params_and_buffers(self) -> None:
"""
Sync the complete model states in between the ranks
"""
work_handles = []
for t in self.module.state_dict().values():
work_handles.append(
dist.broadcast(t, src=self._reference_global_rank, group=self._process_group, async_op=True)
)
# gloo does not guarantee inlining like NCCL, wait for all requests
if self._backend != dist.Backend.NCCL:
_ = list(filter(lambda x: x.wait(), work_handles))
elif work_handles:
work_handles[-1].wait()
def _passing_sync_batchnorm_handle(self, module: nn.Module) -> None:
"""
Passes handle required for ``torch.nn.modules.SyncBatchNorm``.
Adapted from ``torch.nn.distributed.DistributedDataParallel``.
"""
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm) and hasattr(layer, "_specify_ddp_gpu_num"):
assert self.device_type != "cpu", "SyncBatchNorm layers only work with GPU modules"
# device_id logic has not been handled, assume single-process single-device
# SyncBatchNorm only supports DDP with single-process single-device anyway'
# This function is removed from pytorch since 1.9.
layer._specify_ddp_gpu_num(1) # type: ignore
def _setup_bucket_strategy(self) -> None:
"""Devise a bucketing strategy on a per-rank ownership level.
These buckets will not be sharded, since the gradients would be re-allocated during the backward in that case.
This method can be a slow for big models, but it it not typically called often (not for every forward for instance)
"""
with profiler.record_function("fairscale::sdp::setup_buckets"):
if not self._use_buckets:
return
# Devise the bucketing strategy. Parameters are already sorted, in that:
# - these are only the trainable parameters, so they should produce grads
# - they are sorted by increasing size
self._buckets = {}
self._should_bucket_grad = [False for _ in self._trainable_params]
for i, param in enumerate(self._trainable_params):
device = param.device
dst_rank = self._trainable_param_to_rank[param]
if param.device not in self._buckets.keys():
self._buckets[param.device] = {}
if dst_rank not in self._buckets[param.device].keys():
self._buckets[param.device][dst_rank] = GradBucket(
self._buffer_max_size,
dtype=param.dtype,
device=param.device,
destination=self._local_to_global_rank[dst_rank],
)
# Criteria to decide whether this parameter is to be bucketed or not:
# - enough room in the bucket
if self._buckets[device][dst_rank].can_add_grad_view(param):
self._buckets[device][dst_rank].add_grad(param)
self._should_bucket_grad[i] = True
self._bucket_list = list(chain(*[self._buckets[device].values() for device in self._buckets.keys()]))
# Resize the buckets to remove lost space in the end
for bucket in self._bucket_list:
bucket.shrink()
def _consume_work_handles(self) -> None:
"""Consume all the futures which are tied to this optimizer's buckets.
We start from the first/older ones, since they are the most likely to be ready and non-blocking
"""
while len(self._work_handles) > 0:
work_handle = self._work_handles.popleft()
work_handle.handle.wait()
if work_handle.callback is not None:
work_handle.callback()
def _try_consume_work_handle(self) -> None:
"""Try to consume the oldest future. This is non blocking, if not ready we'll pass"""
while len(self._work_handles) > 0 and self._work_handles[0].handle.is_completed():
work_handle = self._work_handles.popleft()
if work_handle.callback is not None:
work_handle.callback()
def _flush_reduce_calls(self) -> None:
for bucket in self._bucket_list:
if not bucket.sent:
assert bucket.buffer is not None
# Normalize the bucket in one go
bucket.buffer.mul_(self._world_size_scaling)
# Reduce the bucket
self._work_handles.append(
Workhandle(
handle=dist.reduce(
tensor=bucket.buffer,
dst=bucket.destination,
group=self._process_group,
async_op=True,
),
callback=None,
)
)
bucket.sent = True
self._consume_work_handles()
def _detect_train_change(self) -> bool:
with profiler.record_function("fairscale::sdp::detect_train_changes"):
# Optionally check whether the trainable parameters have changed
trainable_mask = list(map(_trainable, self._all_params))
# - one or more parameters trainability changed
trainability_changed = trainable_mask != self._reference_trainable_mask
# - the whole model is not trainable but we still have grad hooks
trainability_changed |= not self.training and len(self._grad_hooks) > 0
if self._warn_on_trainable_params_changed and trainability_changed:
logging.warning(
"ShardedDDP detected that the trainable params changed, "
"either because of eval/train mode or parameter freezing/unfreeze."
)
self._reference_trainable_mask = trainable_mask
return trainability_changed
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multithreading in pipeline parallelism."""
from contextlib import contextmanager
from queue import Queue
import sys
from threading import Thread
from types import TracebackType
from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Tuple, Type, Union, cast
import torch
from .microbatch import Batch
from .stream import AbstractStream, use_device, use_stream
__all__: List[str] = []
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
# Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
class Task:
"""A task represents how to compute a micro-batch on a partition.
It consists of two parts: :meth:`compute` and :meth:`finalize`.
:meth:`compute` should be executed in worker threads concurrently.
:meth:`finalize` should be executed after when worker threads complete to
execute :meth:`compute`.
:meth:`compute` might be boosted by worker threads. Because it produces
several CUDA API calls by user code. In PyTorch, parallel CUDA API calls
are not serialized through GIL. So more than one CUDA API call can be
produced at the same time.
"""
def __init__(
self,
stream: Optional[AbstractStream],
*,
compute: Callable[[], Batch],
finalize: Optional[Callable[[Batch], None]],
) -> None:
self.stream = stream
self._compute = compute
self._finalize = finalize
self._grad_enabled = torch.is_grad_enabled()
def compute(self) -> Batch:
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
return self._compute()
def finalize(self, batch: Batch) -> None:
if self._finalize is None:
return
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
self._finalize(batch)
def worker(in_queue: InQueue, out_queue: OutQueue, device: torch.device) -> None:
"""The main loop of a worker thread."""
with use_device(device):
while True:
task = in_queue.get()
if task is None:
break
try:
batch = task.compute()
except Exception:
exc_info = cast(ExcInfo, sys.exc_info())
out_queue.put((False, exc_info))
continue
out_queue.put((True, (task, batch)))
done = (False, None)
out_queue.put(done)
def create_workers(
devices: List[torch.device],
) -> Tuple[List[InQueue], List[OutQueue]]:
"""Spawns worker threads. A worker thread is bound to a device."""
in_queues: List[InQueue] = []
out_queues: List[OutQueue] = []
# Spawn workers.
workers: Dict[torch.device, Tuple[InQueue, OutQueue]] = {}
def normalize_device(device: torch.device) -> torch.device:
if device.type == "cuda" and device.index is None:
return torch.device("cuda", index=torch.cuda.current_device())
if device.type == "cpu" and device.index is not None:
return torch.device("cpu")
return device
for device in devices:
device = normalize_device(device)
try:
in_queue, out_queue = workers[device]
except KeyError:
in_queue = Queue()
out_queue = Queue()
workers[device] = (in_queue, out_queue)
t = Thread(
target=worker,
args=(in_queue, out_queue, device),
daemon=True,
)
t.start()
in_queues.append(in_queue)
out_queues.append(out_queue)
return (in_queues, out_queues)
def join_workers(in_queues: List[InQueue], out_queues: List[OutQueue]) -> None:
# Close workers.
for in_queue in set(in_queues):
in_queue.put(None)
# Join running workers.
running = set(out_queues)
while running:
out_queue = running.pop()
ok, payload = out_queue.get()
done = (False, None)
if (ok, payload) == done:
continue
running.add(out_queue)
@contextmanager
def spawn_workers(
devices: List[torch.device],
) -> Generator[Tuple[List[InQueue], List[OutQueue]], None, None]:
try:
(in_queues, out_queues) = create_workers(devices)
yield (in_queues, out_queues)
finally:
join_workers(in_queues, out_queues)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides phony for arbitrary dependency in a autograd graph."""
from typing import Dict, List, Tuple
import torch
from torch import Tensor
from .stream import default_stream, use_stream
__all__: List[str] = []
_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor:
"""Gets a phony. Phony is tensor without space. It is useful to make
arbitrary dependency in a autograd graph because it doesn't require any
gradient accumulation.
.. note::
Phonies for each device are cached. If an autograd function gets a phony
internally, the phony must be detached to be returned. Otherwise, the
autograd engine will mutate the cached phony in-place::
class Phonify(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
phony = get_phony(input.device, requires_grad=False)
return phony.detach() # detach() is necessary.
"""
key = (device, requires_grad)
try:
phony = _phonies[key]
except KeyError:
with use_stream(default_stream(device)):
# Creating phony with size 1 instead of zero, since currently
# tensorpipe does not work with tensors of size zero.
phony = torch.empty(1, device=device, requires_grad=requires_grad)
_phonies[key] = phony
return phony
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpointing with preceding recomputation.
PyTorch already provides the official checkpointing utilities in
:mod:`torch.utils.checkpoint`. The official checkpointing combines
recomputation and recursive backpropagation into one autograd function named
``CheckpointFunction``. Hence, the recomputation can be started only when the
gradients arrive to the function. In Pipe, the recomputation needs to precede
the gradient arrival to minimize the GPU idle time.
We solve this problem by introducing separate autograd functions named
:class:`Recompute` and :class:`Checkpoint`. Each function represents
recomputation and recursive backpropagation, respectively. We can manipulate
the control flow in aspect of both the autograd engine and CUDA with a pair of
the functions.
Specifically, we place CUDA stream synchronization between :class:`Recompute`
and :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is
copied entirely.
"""
from collections import deque
from contextlib import contextmanager
import threading
from typing import TYPE_CHECKING, Deque, Generator, List, Optional, Tuple, Union
import torch
from torch import ByteTensor, Tensor
import torch.autograd
from .dependency import fork, join
from .microbatch import Batch
from .phony import get_phony
__all__ = ["is_checkpointing", "is_recomputing"]
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
# Types for shared memory between Checkpoint and Recompute.
Recomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf)
RNGStates = Tuple[ByteTensor, Optional[ByteTensor]] # (cpu_rng_state, gpu_rng_state)
if TYPE_CHECKING:
from typing_extensions import Protocol
else:
Protocol = object
# Protocol with __call__ instead of Callable can be used as an attribute type.
# See: https://github.com/python/mypy/issues/708#issuecomment-561735949
class Function(Protocol):
def __call__(self, input: TensorOrTensors) -> TensorOrTensors:
...
class Checkpointing:
"""Generates a pair of :class:`Checkpoint` and :class:`Recompute`."""
def __init__(self, function: Function, batch: Batch) -> None:
self.function = function
self.batch = batch
# Shared memory between Checkpoint and Recompute. 1-length deque is
# used for mutability and length limitation.
self.recomputed: Deque[Recomputed] = deque(maxlen=1)
self.rng_states: Deque[RNGStates] = deque(maxlen=1)
def checkpoint(self) -> Batch:
"""Returns a batch applied by :class:`Checkpoint`."""
input_atomic = self.batch.atomic
input = tuple(self.batch)
# Use a phony which requires grad to ensure that Checkpoint can be
# tracked by the autograd engine even when none of the input tensors
# require grad.
phony = get_phony(self.batch[0].device, requires_grad=True)
output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *input)
# Gradients are only supported for float Tensors.
if isinstance(output, tuple):
output = tuple([x if x.is_floating_point() else x.detach() for x in output])
return Batch(output, self.batch.index)
def recompute(self, batch: Batch) -> None:
"""Applies :class:`Recompute` to the batch in place."""
input_atomic = self.batch.atomic
input = tuple(self.batch)
# batch[0] is always requiring grad, because it has been passed
# checkpoint with a phony requiring grad.
batch[0], phony = fork(batch[0])
phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *input)
batch[0] = join(batch[0], phony)
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.is_checkpointing = False
self.is_recomputing = False
thread_local = ThreadLocal()
@contextmanager
def enable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing` return :data:`True` within a context."""
orig = thread_local.is_checkpointing
thread_local.is_checkpointing = True
try:
yield
finally:
thread_local.is_checkpointing = orig
@contextmanager
def enable_recomputing() -> Generator[None, None, None]:
"""Makes :func:`is_recomputing` return :data:`True` within a context."""
orig = thread_local.is_recomputing
thread_local.is_recomputing = True
try:
yield
finally:
thread_local.is_recomputing = orig
def is_checkpointing() -> bool:
"""Whether the current forward propagation is under checkpointing.
Returns:
bool: :data:`True` if it's under checkpointing.
"""
return thread_local.is_checkpointing
def is_recomputing() -> bool:
"""Whether the current forward propagation is under checkpoint
recomputation. Use this to prevent duplicated side-effects at forward
propagation::
class Counter(nn.Module):
def __init__(self):
super().__init__()
self.counter = 0
def forward(self, input):
if not is_recomputing():
self.counter += 1
return input
Returns:
bool: :data:`True` if it's under checkpoint recomputation.
.. seealso:: :ref:`Detecting Recomputation`
"""
return thread_local.is_recomputing
class Context:
"""The common interface between the :class:`Checkpoint` and
:class:`Recompute` context.
"""
recomputed: Deque[Recomputed]
rng_states: Deque[RNGStates]
function: Function
input_atomic: bool
saved_tensors: Tuple[Tensor, ...]
def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover
pass
def save_rng_states(
device: torch.device,
rng_states: Deque[RNGStates],
) -> None:
""":meth:`Checkpoint.forward` captures the current PyTorch's random number
generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.
.. seealso:: :ref:`Referential Transparency`
"""
cpu_rng_state = torch.get_rng_state()
gpu_rng_state: Optional[ByteTensor]
if device.type == "cuda":
gpu_rng_state = torch.cuda.get_rng_state(device)
else:
gpu_rng_state = None
rng_states.clear()
rng_states.append((cpu_rng_state, gpu_rng_state))
@contextmanager
def restore_rng_states(
device: torch.device,
rng_states: Deque[RNGStates],
) -> Generator[None, None, None]:
""":meth:`Recompute.backward` restores the random number generator states
captured by :func:`save_rng_states` within its context.
.. seealso:: :ref:`Referential Transparency`
"""
cpu_rng_state, gpu_rng_state = rng_states[0]
gpu_devices: List[torch.device] = []
if device.type == "cuda":
gpu_devices.append(device)
with torch.random.fork_rng(gpu_devices):
torch.set_rng_state(cpu_rng_state)
if gpu_rng_state is not None:
torch.cuda.set_rng_state(gpu_rng_state, device)
yield
class Checkpoint(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(
ctx: Context,
phony: Tensor,
recomputed: Deque[Recomputed],
rng_states: Deque[RNGStates],
function: Function,
input_atomic: bool,
*input: Tensor,
) -> TensorOrTensors:
ctx.recomputed = recomputed
ctx.rng_states = rng_states
save_rng_states(input[0].device, ctx.rng_states)
ctx.function = function
ctx.input_atomic = input_atomic
ctx.save_for_backward(*input)
with torch.no_grad(), enable_checkpointing():
output = function(input[0] if input_atomic else input)
return output
@staticmethod
def backward(
ctx: Context,
*grad_output: Tensor,
) -> Tuple[Optional[Tensor], ...]: # pragma: no cover
output, input_leaf = ctx.recomputed.pop()
if isinstance(output, tuple):
tensors = output
else:
tensors = (output,)
if any(y.requires_grad for y in tensors):
tensors = tuple([x for x in tensors if x.requires_grad])
torch.autograd.backward(tensors, grad_output)
grad_input: List[Optional[Tensor]] = [None, None, None, None, None]
grad_input.extend(x.grad for x in input_leaf)
return tuple(grad_input)
class Recompute(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(
ctx: Context,
phony: Tensor,
recomputed: Deque[Recomputed],
rng_states: Deque[RNGStates],
function: Function,
input_atomic: bool,
*input: Tensor,
) -> Tensor:
ctx.recomputed = recomputed
ctx.rng_states = rng_states
ctx.function = function
ctx.input_atomic = input_atomic
ctx.save_for_backward(*input)
return phony
@staticmethod
def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover
input = ctx.saved_tensors
input_leaf = tuple(x.detach().requires_grad_(x.requires_grad) for x in input)
with restore_rng_states(input[0].device, ctx.rng_states):
with torch.enable_grad(), enable_recomputing():
output = ctx.function(input_leaf[0] if ctx.input_atomic else input_leaf)
ctx.recomputed.append((output, input_leaf))
grad_input: List[None] = [None, None, None, None, None]
grad_input.extend(None for _ in ctx.saved_tensors)
return tuple(grad_input)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tracks the running statistics per mini-batch instead of micro-batch."""
from typing import Optional, TypeVar, cast
import torch
from torch import Tensor, nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from .checkpoint import is_recomputing
__all__ = ["DeferredBatchNorm"]
TModule = TypeVar("TModule", bound=nn.Module)
class DeferredBatchNorm(_BatchNorm):
"""A BatchNorm layer tracks multiple micro-batches to update running
statistics per mini-batch.
"""
sum: Tensor
sum_squares: Tensor
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: Optional[float] = 0.1,
affine: bool = True,
chunks: int = 1,
) -> None:
super().__init__(num_features, eps, momentum, affine, track_running_stats=True)
self.register_buffer("sum", torch.zeros_like(self.running_mean))
self.register_buffer("sum_squares", torch.zeros_like(self.running_var))
self.counter = 0
self.tracked = 0
self.chunks = chunks
def _check_input_dim(self, input: Tensor) -> None:
# It's the typical _check_input_dim() implementation in PyTorch.
if input.dim() <= 2:
raise ValueError("expected at least 3D input (got %dD input)" % input.dim())
def _track(self, input: Tensor) -> bool:
"""Tracks statistics of a micro-batch."""
# Dimensions except channel. For example, (0, 2, 3) is for BatchNorm2d.
dim = [0]
dim.extend(range(2, input.dim()))
with torch.no_grad():
self.sum += input.sum(dim)
self.sum_squares += (input**2).sum(dim)
size = input.size().numel() // input.size(1)
self.counter += size
self.tracked += 1
return self.tracked == self.chunks
def _commit(self) -> None:
"""Updates the running statistics of a mini-batch."""
exponential_average_factor = 0.0
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
mean = self.sum / self.counter
var = self.sum_squares / self.counter - mean**2
# Calculate the exponential moving average here.
m = exponential_average_factor
self.running_mean *= 1 - m
self.running_mean += mean * m
self.running_var *= 1 - m
self.running_var += var * m
self.sum.zero_()
self.sum_squares.zero_()
self.counter = 0
self.tracked = 0
def forward(self, input: Tensor) -> Tensor: # type: ignore
if not self.training:
# Don't train parameters on the evaluation mode.
return F.batch_norm(
input,
running_mean=self.running_mean,
running_var=self.running_var,
weight=self.weight,
bias=self.bias,
training=False,
momentum=0.0,
eps=self.eps,
)
if not is_recomputing():
# Track a micro-batch on the training mode
# but not under a recomputation.
tracked_enough = self._track(input)
# Update the running statistics for a mini-batch
# if it has tracked enough micro-batches.
if tracked_enough:
self._commit()
# Normalize a micro-batch and train the parameters.
return F.batch_norm(
input,
running_mean=None,
running_var=None,
weight=self.weight,
bias=self.bias,
training=True,
momentum=0.0,
eps=self.eps,
)
@classmethod
def convert_deferred_batch_norm(cls, module: TModule, chunks: int = 1) -> TModule:
"""Converts a :class:`nn.BatchNorm` or underlying
:class:`nn.BatchNorm`s into :class:`DeferredBatchNorm`::
from torchvision.models.resnet import resnet101
from torchpipe.batchnorm import DeferredBatchNorm
model = resnet101()
model = DeferredBatchNorm.convert_deferred_batch_norm(model)
"""
if isinstance(module, DeferredBatchNorm) and module.chunks is chunks:
return cast(TModule, module)
module_output: nn.Module = module
if isinstance(module, _BatchNorm) and module.track_running_stats:
module_output = DeferredBatchNorm(module.num_features, module.eps, module.momentum, module.affine, chunks)
if module.affine:
module_output.register_parameter("weight", module.weight)
module_output.register_parameter("bias", module.bias)
module_output.register_buffer("running_mean", module.running_mean)
module_output.register_buffer("running_var", module.running_var)
module_output.register_buffer("num_batches_tracked", module.num_batches_tracked)
for name, child in module.named_children():
module_output.add_module(name, cls.convert_deferred_batch_norm(child, chunks))
return cast(TModule, module_output)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Pipe implementation in PyTorch."""
from .async_pipe import AsyncPipe
from .checkpoint import is_checkpointing, is_recomputing
from .pipe import Pipe
from .rpc import PipeRPCWrapper
from .types import LazyModule
__all__ = ["Pipe", "is_checkpointing", "is_recomputing", "LazyModule"]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autograd functions for stream-aware CUDA copy. It is used to overlap copy
and computation on the same GPU.
"""
from collections import deque
from typing import Deque, List, Optional, Tuple
import torch
from torch import Tensor
from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream
__all__: List[str] = []
Tensors = Tuple[Tensor, ...]
# Common interface between :class:`Copy` and :class:`Wait`.
class Context:
prev_stream: AbstractStream
next_stream: AbstractStream
class Copy(torch.autograd.Function):
"""Copies tensors on specific streams."""
@staticmethod
# type: ignore
def forward(
ctx: Context,
prev_stream: AbstractStream,
next_stream: AbstractStream,
*input: Tensor,
) -> Tensors:
ctx.prev_stream = prev_stream
ctx.next_stream = next_stream
output = []
output_stream = current_stream(get_device(next_stream))
with use_stream(prev_stream), use_stream(next_stream):
for x in input:
y = x.to(get_device(next_stream), non_blocking=True)
output.append(y)
# 'prev_stream' is not where 'x' has been allocated.
record_stream(x, prev_stream)
# 'y' has been allocated on 'next_stream'.
# It might be used on the current stream captured as 'output_stream'.
record_stream(y, output_stream)
return tuple(output)
@staticmethod
def backward(
ctx: Context,
*grad_output: Tensor,
) -> Tuple[Optional[Tensor], ...]:
prev_stream = ctx.prev_stream
next_stream = ctx.next_stream
grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
input_stream = current_stream(get_device(prev_stream))
with use_stream(prev_stream), use_stream(next_stream):
for x in reversed(grad_output):
y = x.to(get_device(prev_stream), non_blocking=True)
grad_input.appendleft(y)
# 'next_stream' is not where 'x' has been allocated.
record_stream(x, next_stream)
# 'y' has been allocated on 'prev_stream'.
# It might be used on the current stream captured as 'input_stream'.
record_stream(y, input_stream)
grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
return grad_streams + tuple(grad_input)
class Wait(torch.autograd.Function):
"""Synchronizes a stream to another stream.
Place it just before you want to start an operation on the next stream,
provided that all operations on the previous stream are done.
"""
@staticmethod
# type: ignore
def forward(
ctx: Context,
prev_stream: AbstractStream,
next_stream: AbstractStream,
*input: Tensor,
) -> Tensors:
ctx.prev_stream = prev_stream
ctx.next_stream = next_stream
wait_stream(next_stream, prev_stream)
return tuple(x.detach() for x in input)
@staticmethod
def backward(
ctx: Context,
*grad_input: Tensor,
) -> Tuple[Optional[Tensor], ...]:
prev_stream = ctx.prev_stream
next_stream = ctx.next_stream
wait_stream(prev_stream, next_stream)
grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
return grad_streams + grad_input
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
from torch import Tensor, nn
ACTIVATIONS_GRADS_QUEUE = 0
SKIP_TENSOR_QUEUE = 1
PORTAL_QUEUE = 2
EVENT_LOOP_QUEUE = 3
EVENT_LOOP_ACTIVATIONS_QUEUE = 4
EVENT_LOOP_GRADIENTS_QUEUE = 5
MESSAGE_GENERATION_START = 6
MessageGeneration = MESSAGE_GENERATION_START
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
InputDevice = Union[None, int, str, torch.device]
class LazyModule:
def __init__(self, function: Callable[[], nn.Module]):
self.function = function
def __call__(self) -> nn.Module:
return self.function()
@dataclass(init=False)
class PipeMessage:
src: int
dest: int
queue_name: int
args: Any
tensors: Tensors
tensor_shapes: List[torch.Size]
tensor_dtypes: List[torch.dtype]
tag: int = 0
def __init__(
self,
src: int,
dest: int,
queue_name: int,
args: Any = None,
tensors: Optional[Tensors] = None,
tensor_count: int = 0,
):
self.src = src
self.dest = dest
self.queue_name = queue_name
self.args = args
self.tensors = tensors or tuple()
self.tensor_shapes = []
self.tensor_dtypes = []
global MessageGeneration
self.tag = MessageGeneration
if tensors is None:
MessageGeneration += tensor_count
else:
MessageGeneration += len(self.tensors)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for eliminating boilerplate code to handle abstract streams with
CPU device.
"""
from contextlib import contextmanager
from typing import Generator, List, Optional, Union, cast
import torch
__all__: List[str] = []
class CPUStreamType:
pass
# The placeholder on place of streams for the CPU device instead of CUDA.
CPUStream = CPUStreamType()
# It represents both CUDA streams and the CPU stream.
AbstractStream = Union[torch.cuda.Stream, CPUStreamType]
def new_stream(device: torch.device) -> AbstractStream:
"""Creates a new stream for either CPU or CUDA device."""
if device.type != "cuda":
return CPUStream
return torch.cuda.Stream(device)
def current_stream(device: torch.device) -> AbstractStream:
""":func:`torch.cuda.current_stream` for either CPU or CUDA device."""
if device.type != "cuda":
return CPUStream
return torch.cuda.current_stream(device)
def default_stream(device: torch.device) -> AbstractStream:
""":func:`torch.cuda.default_stream` for either CPU or CUDA device."""
if device.type != "cuda":
return CPUStream
return torch.cuda.default_stream(device)
@contextmanager
def use_device(device: torch.device) -> Generator[None, None, None]:
""":func:`torch.cuda.device` for either CPU or CUDA device."""
if device.type != "cuda":
yield
return
with torch.cuda.device(device):
yield
@contextmanager
def use_stream(stream: Optional[AbstractStream]) -> Generator[None, None, None]:
""":func:`torch.cuda.stream` for either CPU or CUDA stream."""
if not stream:
yield
return
if not is_cuda(stream):
yield
return
with torch.cuda.stream(as_cuda(stream)):
yield
def get_device(stream: AbstractStream) -> torch.device:
"""Gets the device from CPU or CUDA stream."""
if is_cuda(stream):
return as_cuda(stream).device
return torch.device("cpu")
def wait_stream(source: AbstractStream, target: AbstractStream) -> None:
""":meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It
makes the source stream wait until the target stream completes work queued.
"""
if is_cuda(target):
if is_cuda(source):
# A CUDA stream waits another CUDA stream.
as_cuda(source).wait_stream(as_cuda(target))
else:
# CPU waits a CUDA stream.
as_cuda(target).synchronize()
# If the target is CPU, synchronization is not required.
def record_stream(tensor: torch.Tensor, stream: AbstractStream) -> None:
""":meth:`torch.Tensor.record_stream` for either CPU or CUDA stream."""
if is_cuda(stream):
# NOTE(sublee): record_stream() on a shifted view tensor throws
# RuntimeError in PyTorch 1.1.0, and does nothing in 1.2.0. To safely
# protect the tensor against unexpected reallocation, here we use a
# temporal tensor associated with the same storage without shifting as
# a workaround.
#
# Issue: https://github.com/pytorch/pytorch/issues/27366
#
tensor = tensor.new_empty([0]).set_(tensor.storage())
tensor.record_stream(as_cuda(stream))
def is_cuda(stream: Optional[AbstractStream]) -> bool:
"""Returns ``True`` if the given stream is a valid CUDA stream."""
return stream is not CPUStream
def as_cuda(stream: AbstractStream) -> torch.cuda.Stream:
"""Casts the given stream as :class:`torch.cuda.Stream`."""
return cast(torch.cuda.Stream, stream)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from threading import Event, Lock, Thread
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import torch
from torch import nn
from torch.distributed import ProcessGroup, rpc
from torch.distributed.distributed_c10d import _get_global_rank
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from .async_pipe import AsyncPipe
from .types import EVENT_LOOP_QUEUE, PipeMessage, TensorOrTensors
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
PipeModel: AsyncPipe
PipeResult: TensorOrTensors
SizeOrSizes = Union[torch.Size, List[torch.Size]]
DtypeOrDtypes = Union[torch.dtype, List[torch.dtype]]
def set_device_based_on_group(group: ProcessGroup) -> None:
# torch.cuda.set_device(group.rank() % torch.cuda.device_count())
torch.cuda.set_device(torch.distributed.get_rank() % torch.cuda.device_count())
def get_shapes(tensor: TensorOrTensors) -> SizeOrSizes:
if isinstance(tensor, torch.Tensor):
return tensor.shape
else:
return [t.shape for t in tensor]
def get_dtype(tensor: TensorOrTensors) -> DtypeOrDtypes:
if isinstance(tensor, torch.Tensor):
return tensor.dtype
else:
return [t.dtype for t in tensor]
def get_global_ranks_from_group(group: ProcessGroup) -> List[int]:
return [_get_global_rank(group, r) for r in range(group.size())]
class PipeBackRedirect(torch.autograd.Function):
@staticmethod
# type: ignore
def forward(ctx, inputs, dest, event, message, transport, futures):
ctx.dest = dest
ctx.event = event
ctx.message = message
ctx.transport = transport
ctx.futures = futures
return inputs
@staticmethod
# type: ignore
def backward(ctx, *grad):
ctx.message.tensors = tuple(grad)
ctx.transport.send_message(ctx.message, sync=False, skip_header=True)
ctx.event.set()
# torch.futures.wait_all(ctx.futures)
return (None, None, None, None, None, None)
def callback_with_model(callback: Callable[[Any, AsyncPipe], None], ctx: Any) -> None:
try:
group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group
set_device_based_on_group(group)
with PipeModel.lock:
callback(ctx, PipeModel)
except Exception as e:
print(f"callback_with_model got {e}")
class PipeRPCWrapper(nn.Module):
"""A wrapper for Pipe to control the entire pipeline from a single process.
Typical usecase would have rank 0 construct `PipeRPCWrapper` and run the
training loop as normal, and all other ranks would call
`torch.distributed.rpc.shutdown()`
To run code on each worker, e.g. to run the optimizer, use `foreach_worker`
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__()
self.group = cast(ProcessGroup, kwargs.get("group")) or get_pipeline_parallel_group()
assert self.group.rank() == 0
self.lock = Lock()
if True:
assert (
self.group == get_pipeline_parallel_group()
), "Can't pickle groups, so group must be `get_pipeline_parallel_group()`"
kwargs["group"] = None
else:
kwargs["group"] = self.group
kwargs["input_device"] = torch.device("cuda", torch.cuda.current_device())
self.model = AsyncPipe(*args, **kwargs)
self.worker_map = kwargs["worker_map"]
self._foreach_worker(self._register_remote_model, args=(args, kwargs))
self.model.cuda()
def _get_rpc_name(self, rank: int) -> str:
return self.worker_map[_get_global_rank(self.group, rank)]
def _foreach_worker(self, callback: Callable, args: Any = None) -> None:
futures = [rpc.rpc_async(self._get_rpc_name(rank), callback, args=args) for rank in range(1, self.group.size())]
futures = [f.wait() for f in futures]
def foreach_worker(
self, callback: Callable[[Any, AsyncPipe], None], ctx: Any = None, *, include_self: bool = False
) -> None:
"""Call `callback` on each worker with the `ctx` and model local to that
worker. e.g.
def register_optimizer(ctx, model):
args, kwargs = ctx
model.optimizer = torch.optim.SGD(model.parameters(), *args, **kwargs)
pipe_model = PipeRPCWrapper( ... )
pipe_model.foreach_worker(
register_optimizer,
([], {"lr" : 0.01, "momentum" : 0.9})
)
"""
self._foreach_worker(callback_with_model, args=(callback, ctx))
if include_self:
with self.model.lock:
callback(ctx, self.model)
def forward(self, tensor: TensorOrTensors) -> TensorOrTensors: # type: ignore
shape = get_shapes(tensor)
dtype = get_dtype(tensor)
if isinstance(tensor, torch.Tensor):
num_tensors = 1
else:
num_tensors = len(tensor)
futures = [
rpc.rpc_async(self._get_rpc_name(rank), self._model_forward, args=(self.model.training, shape, dtype))
for rank in range(1, self.group.size())
]
if self.model.final_stage:
return self.model(tensor)
else:
event = Event()
t = Thread(target=self._model_forward_first_stage, args=(tensor, event))
t.start()
shape, dtype = futures.pop().wait()
dest_rank = self.group.size() - 1
dest = self._get_rpc_name(dest_rank)
dest_global_rank = _get_global_rank(self.group, dest_rank)
src_global_rank = torch.distributed.get_rank()
queue = EVENT_LOOP_QUEUE
activations = PipeMessage(dest_global_rank, src_global_rank, queue_name=queue, tensor_count=num_tensors)
grads = PipeMessage(src_global_rank, dest_global_rank, queue_name=queue, tensor_count=num_tensors)
back_fut = rpc.rpc_async(
dest, self._send_result_and_do_backwards, args=(self.model.training, activations, grads)
)
futures.append(back_fut)
result = self._recv_result(self.model, shape, dtype, activations)
if isinstance(result, torch.Tensor):
result.requires_grad_()
else:
for r in result:
r.requires_grad_()
assert self.model.pipeline
return PipeBackRedirect.apply(
result, dest_global_rank, event, grads, self.model.pipeline.transport, futures
)
@property
def final_stage(self) -> bool:
return self.model.final_stage
@staticmethod
def _recv_result(
model: AsyncPipe, shapes: SizeOrSizes, dtypes: DtypeOrDtypes, message: PipeMessage
) -> TensorOrTensors:
group = get_pipeline_parallel_group()
set_device_based_on_group(group)
assert model.pipeline
transport = model.pipeline.transport
if isinstance(shapes, torch.Size):
message.tensor_shapes = [cast(torch.Size, shapes)]
message.tensor_dtypes = [cast(torch.dtype, dtypes)]
message = transport.recv_message_tensors(message)
return message.tensors[0]
else:
message.tensor_shapes = cast(List[torch.Size], shapes)
message.tensor_dtypes = cast(List[torch.dtype], dtypes)
message = transport.recv_message_tensors(message)
return message.tensors
@staticmethod
def _send_result_and_do_backwards(training: bool, message: PipeMessage, grads_message: PipeMessage) -> None:
group = get_pipeline_parallel_group()
set_device_based_on_group(group)
result = PipeResult
model = PipeModel
if isinstance(result, torch.Tensor):
result = tuple([result])
message.tensors = tuple(result)
assert model.pipeline
transport = model.pipeline.transport
transport.send_message(message, sync=False, skip_header=True)
if training:
grads_message.tensor_shapes = [r.shape for r in result]
grads_message.tensor_dtypes = [r.dtype for r in result]
grads_message = transport.recv_message_tensors(grads_message)
with model.lock:
torch.autograd.backward(result, grads_message.tensors, retain_graph=True)
@staticmethod
def _register_remote_model(args: List[Any], kwargs: Dict[str, Any]) -> None:
group = get_pipeline_parallel_group() # FIXME(tom) handle dynamic group
set_device_based_on_group(group)
kwargs["group"] = group
kwargs["input_device"] = torch.device("cuda", torch.cuda.current_device())
model = AsyncPipe(*args, **kwargs)
model.cuda()
global PipeModel
PipeModel = model
@staticmethod
def _model_forward(
training: bool, shape: torch.Size, dtype: torch.dtype
) -> Optional[Tuple[SizeOrSizes, DtypeOrDtypes]]:
try:
if isinstance(shape, torch.Size):
tensor = torch.empty(shape, dtype=dtype)
else:
tensor = tuple([torch.empty(s, dtype=d) for s, d in zip(shape, dtype)])
model = PipeModel
assert model.group
set_device_based_on_group(model.group)
model.train(training)
result = model(tensor)
if model.final_stage:
global PipeResult
PipeResult = result
return (get_shapes(result), get_dtype(result))
return None
except Exception as e:
print(f"_model_forward got {e}")
raise e
def _model_forward_first_stage(self, tensor: TensorOrTensors, event: Event) -> None:
try:
assert self.model.group
set_device_based_on_group(self.model.group)
self.model(tensor, event=event)
except Exception as e:
print(f"_model_forward got {e}")
raise e
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum, auto
from threading import Event
from typing import Dict, Iterable, List, Optional, Tuple
import torch
from torch import Tensor, nn
from torch.autograd.profiler import record_function
from torch.distributed import ProcessGroup
from fairscale.nn.model_parallel import get_pipeline_parallel_ranks
from .checkpoint import Checkpointing
from .messages import Transport
from .microbatch import Batch
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .types import EVENT_LOOP_QUEUE, PipeMessage, TensorOrTensors, Tensors
from .worker import Task
def create_task(
checkpoint_stop: int,
chunk_id: int,
part_id: int,
batch: Batch,
partition: nn.Sequential,
skip_trackers: List[SkipTrackerThroughPotals],
) -> Task:
# Determine whether checkpointing or not.
if chunk_id < checkpoint_stop:
def function(
input: TensorOrTensors,
partition: nn.Sequential = partition,
skip_tracker: SkipTrackerThroughPotals = skip_trackers[chunk_id],
chunk_id: int = chunk_id,
part_id: int = part_id,
) -> TensorOrTensors:
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
ret = partition(input)
# We do a check here because the backtrace from the checkpoint backward code path
# is very hard to make sense. It would be much easier to check earlier at this point.
assert type(ret) is not list, "Only Tensor or Tuple of Tensor output is supported"
return ret
chk = Checkpointing(function, batch)
task = Task(None, compute=chk.checkpoint, finalize=chk.recompute)
del function, chk # TODO(tom) maybe remove
else:
def compute(
batch: Batch = batch,
partition: nn.Sequential = partition,
skip_tracker: SkipTrackerThroughPotals = skip_trackers[chunk_id],
chunk_id: int = chunk_id,
part_id: int = part_id,
) -> Batch:
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
return batch.call(partition)
task = Task(None, compute=compute, finalize=None)
del compute # TODO(tom) maybe remove
return task
@dataclass(frozen=True)
class Location:
stage: int
index: int
def __repr__(self) -> str:
return f"{self.stage}@{self.index}"
@dataclass(frozen=True)
class Invocation:
order: int
this: Location
source: Optional[Location]
dest: Optional[Location]
Activations = Dict[int, Dict[int, Dict[int, Batch]]]
Invocations = Dict[int, Invocation]
@dataclass(frozen=True)
class TailBackwardContext:
activations: Activations
invocations: Invocations
count_per_order: Dict[int, int]
expected_gradients: int
class ModuleWrapper:
def __init__(self, module: nn.Sequential, location: Location, invocations: Optional[List[Invocation]] = None):
self.module: nn.Sequential = module
self.location: Location = location
self.invocations: List[Invocation] = invocations or []
def __repr__(self) -> str:
return f"{self.location}:\n" + "\n".join(map(str, self.invocations)) + "\n\t" + str(self.module)
def __len__(self) -> int:
return len(self.module)
def __iter__(self) -> Iterable:
yield from self.module
class AsyncMessageType(Enum):
Activations = auto()
Gradients = auto()
@dataclass(frozen=True)
class AsyncMessageBody:
message_type: AsyncMessageType
microbatch_index: int
source: Location
dest: Location
order: int
class AutogradWithoutActivations(torch.autograd.Function):
"""A helper class to add another edge in the autograd graph which allows us
to delete the potentially large activations and still perform a backward
pass. Returns return a phony tensor which is connected to the graph."""
@staticmethod
# type: ignore
def forward(ctx, *x):
return torch.tensor(1.0)
@staticmethod
# type: ignore
def backward(ctx, grad):
assert ctx.grad_from_pipeline is not None
return ctx.grad_from_pipeline
class AsyncRecvOperator(torch.autograd.Function):
"""Receive activations to the previous pipeline stage"""
@staticmethod
# type: ignore
def forward(ctx, phony: Tensor, transport: Transport, message: PipeMessage, queue_name: int) -> Tensors:
ctx.transport = transport
ctx.index = message.args.microbatch_index
ctx.queue_name = queue_name
result = transport.recv_message_tensors(message)
ctx.args = result.args
def maybe_requires_grad(t: Tensor) -> Tensor:
if t.dtype.is_floating_point:
return t.requires_grad_()
return t
return tuple(maybe_requires_grad(r) for r in result.tensors)
@staticmethod
# type: ignore
def backward(
ctx,
*grad: Tensor,
) -> Tuple[Optional[Tensor], ...]:
ranks = get_pipeline_parallel_ranks()
this_rank = torch.distributed.get_rank()
body = AsyncMessageBody(
AsyncMessageType.Gradients, ctx.index, source=ctx.args.dest, dest=ctx.args.source, order=ctx.args.order - 1
)
ctx.transport.send_message(
PipeMessage(
this_rank,
ranks[ctx.args.source.stage],
queue_name=ctx.queue_name,
args=body,
tensors=tuple(grad),
),
sync=True,
)
tail_ctx = getattr(ctx, "tail_ctx", None)
if tail_ctx:
expected_gradients = tail_ctx.expected_gradients
while expected_gradients > 0:
message = ctx.transport.recv_message_header(ctx.queue_name)
args: AsyncMessageBody = message.args
assert args.message_type is AsyncMessageType.Gradients
invocation = tail_ctx.invocations[args.order]
expected_gradients -= tail_ctx.count_per_order[invocation.order]
AsyncEventLoop.perform_backward_for_invocation(ctx.transport, message, tail_ctx.activations, invocation)
return (None, None, None, None, None)
class AsyncEventLoop:
def __init__(
self,
partitions: List[ModuleWrapper],
group: ProcessGroup,
transport: Transport,
training: bool,
checkpoint_stop: int,
):
self.training = training
self.checkpoint_stop = checkpoint_stop
self.transport = transport
self.group = group
self.partitions: List[ModuleWrapper] = partitions
def send_async_message(self, dst_rank: int, result: Batch, invocation: Invocation) -> Batch:
"""Send batch to dst_rank, and use AutogradWithoutActivations to delete
the activations since we no longer need them"""
assert invocation.dest
src_rank = torch.distributed.get_rank()
body = AsyncMessageBody(
AsyncMessageType.Activations, result.index, invocation.this, invocation.dest, invocation.order + 1
)
self.transport.send_message(
PipeMessage(src_rank, dst_rank, queue_name=EVENT_LOOP_QUEUE, args=body, tensors=tuple([*result])),
sync=True,
)
phony = AutogradWithoutActivations.apply(*result)
return Batch(phony, result.index)
def run_invocation(
self,
batch: Batch,
partition: ModuleWrapper,
skip_trackers: List[SkipTrackerThroughPotals],
invocation: Invocation,
) -> Batch:
"""Actually run the forward pass for a given module, and send the result
to the next stage in the pipeline if needed."""
task = create_task(
self.checkpoint_stop,
batch.index,
self.group.rank(),
batch,
partition.module,
skip_trackers,
)
result = task.compute()
task.finalize(result)
if invocation.dest and invocation.dest.stage != invocation.this.stage:
ranks = get_pipeline_parallel_ranks()
dst_rank = ranks[invocation.dest.stage]
result = self.send_async_message(dst_rank, result, invocation)
return result
@staticmethod
def perform_backward_for_invocation(
transport: Transport, message: PipeMessage, activations: Activations, invocation: Invocation
) -> None:
"""Perform the backward pass by looking up the appropriate `Batch` and
then calling `backward` on the tensor"""
recvd_grads = transport.recv_message_tensors(message)
batch: Batch = activations[invocation.this.index][invocation.order][message.args.microbatch_index]
# All batches saved in `activations` are generated by AutogradWithoutActivations,
# so we store the gradients in `grad_from_pipeline` so it will be used
# during the backward pass
batch.tensor.grad_fn.grad_from_pipeline = tuple(recvd_grads.tensors)
batch.tensor.backward(retain_graph=True)
def run_invocations_on_batch(
self,
batch: Batch,
invocations: Invocations,
order: int,
skip_trackers: List[SkipTrackerThroughPotals],
activations: Activations,
) -> Tuple[int, int]:
"""Run invocations on the batch until we hit one that receives its input
from a different stage (i.e. another process)"""
invocations_handled = 0
last_order = 0
for invocation in invocations.values():
if invocation.order < order:
continue
pi = invocation.this.index
partition = self.partitions[pi]
if invocation.order == order:
invocations_handled += 1
last_order = invocation.order
activations[pi][invocation.order][batch.index] = self.run_invocation(
batch, partition, skip_trackers, invocation
)
elif invocation.source and invocation.source.stage == self.group.rank():
invocations_handled += 1
last_order = invocation.order
batch = activations[invocation.source.index][invocation.order - 1][batch.index]
activations[pi][invocation.order][batch.index] = self.run_invocation(
batch, partition, skip_trackers, invocation
)
del activations[invocation.source.index][invocation.order - 1][batch.index]
elif invocation.source and invocation.source.stage != self.group.rank():
break
return (invocations_handled, last_order)
def event_loop_head(
self, batches: List[Batch], skip_trackers: List[SkipTrackerThroughPotals], event: Optional[Event]
) -> None:
"""The event loop for the "head", which first performs the forward pass
on any applicable layers for this stage, and then enters the common
`event_loop_inner`"""
invocations, activations = self.get_invocations_and_activations()
expected_invocations = len(invocations) * len(batches)
actual_invocations = 0
count_per_order = dict()
for batch in batches:
inv_count, last_order = self.run_invocations_on_batch(batch, invocations, 0, skip_trackers, activations)
actual_invocations += inv_count
count_per_order[last_order] = inv_count
if actual_invocations < expected_invocations or self.training:
self.event_loop_inner(
expected_invocations,
skip_trackers,
activations,
invocations,
count_per_order,
already_received=actual_invocations,
event=event,
)
def get_batch_from_message(self, message: PipeMessage) -> Batch:
"""Get the tensor(s) wrapped in a `Batch` from a `PipeMessage`, applying
AsyncRecvOperator so we can intercept the backward pass"""
microbatch_index = message.args.microbatch_index
phony = torch.empty(0, device=self.transport.input_device, requires_grad=True)
result = AsyncRecvOperator.apply(phony, self.transport, message, EVENT_LOOP_QUEUE)
if len(result) == 1:
batch = Batch(result[0], microbatch_index)
else:
batch = Batch(result, microbatch_index)
return batch
def event_loop_tail(self, batches: List[Batch], skip_trackers: List[SkipTrackerThroughPotals]) -> None:
"""The event loop for the "tail", or final stage which only processes
activations and then returns to the caller so that the loss can be
calculated. This also handles the first/only stage for the special
case of a 1-stage pipeline."""
invocations, activations = self.get_invocations_and_activations()
expected_invocations = len(invocations) * len(batches)
actual_invocations = 0
rank = self.group.rank()
count_per_order = dict()
for batch in batches:
if rank == 0:
order = 0
else:
message = self.transport.recv_message_header(EVENT_LOOP_QUEUE)
args: AsyncMessageBody = message.args
batch = self.get_batch_from_message(message)
order = args.order
inv_count, last_order = self.run_invocations_on_batch(batch, invocations, order, skip_trackers, activations)
actual_invocations += inv_count
count_per_order[last_order] = inv_count
if invocations[last_order].dest is None:
self.prepare_tail_backward(
batch, activations, invocations, count_per_order, len(invocations) - inv_count
)
if actual_invocations < expected_invocations:
expected_gradients = 0 # (len(invocations) - 1) * len(batches)
self.event_loop_inner(
expected_invocations,
skip_trackers,
activations,
invocations,
count_per_order,
already_received=actual_invocations,
ignore_gradients=True,
tail=True,
)
_, last_invocation = invocations.popitem()
for index, batch in activations[len(self.partitions) - 1][last_invocation.order].items():
batches[index] = batch
def get_invocations_and_activations(self) -> Tuple[Invocations, Activations]:
activations: Activations = dict()
invocations: Invocations = OrderedDict()
for pi, partition in enumerate(self.partitions):
activations[pi] = dict()
for invocation in partition.invocations:
activations[pi][invocation.order] = dict()
invocations[invocation.order] = invocation
invocations = OrderedDict(sorted(invocations.items(), key=lambda entry: entry[0]))
return (invocations, activations)
def event_loop(self, num_microbatch: int, skip_trackers: List[SkipTrackerThroughPotals]) -> None:
"""The event loop for the "middle", i.e. neither the head nor the tail"""
invocations, activations = self.get_invocations_and_activations()
expected_invocations = len(invocations) * num_microbatch
self.event_loop_inner(expected_invocations, skip_trackers, activations, invocations, dict())
def event_loop_inner(
self,
expected_invocations: int,
skip_trackers: List[SkipTrackerThroughPotals],
activations: Activations,
invocations: Invocations,
count_per_order: Dict[int, int],
*,
already_received: int = 0,
ignore_gradients: bool = False,
event: Optional[Event] = None,
tail: bool = False,
) -> None:
"""The common event loop shared by all stages. This processses
activations for the forward pass, and if `self.training` is true,
processes gradients for the backward pass."""
num_activations = already_received
if self.training and not ignore_gradients:
num_gradients = 0
else:
num_gradients = expected_invocations
while num_activations < expected_invocations or num_gradients < expected_invocations:
if num_activations == expected_invocations and num_gradients == 0 and event is not None:
# We are ready to do the backward pass, but must wait for
# PipeRPCWrapper to signal that it is safe to proceed, otherwise
# deadlock
event.wait()
message = self.transport.recv_message_header(EVENT_LOOP_QUEUE)
args: AsyncMessageBody = message.args
invocation = invocations[args.order]
# FIXME(tom) for combining pipeline with megatron, I currently don't
# control the order of received activations or gradients, so it is
# possible for a reused ColumnParallelLinear for example to receive
# a different order of activations w.r.t. the sending stage, which
# would result in incorrect values being used for the all_gather
if args.message_type is AsyncMessageType.Activations:
batch = self.get_batch_from_message(message)
inv_count, last_order = self.run_invocations_on_batch(
batch, invocations, args.order, skip_trackers, activations
)
count_per_order[last_order] = inv_count
num_activations += inv_count
if tail and invocations[last_order].dest is None:
self.prepare_tail_backward(
batch, activations, invocations, count_per_order, len(invocations) - inv_count
)
assert num_activations <= expected_invocations
elif args.message_type is AsyncMessageType.Gradients:
num_gradients += count_per_order[invocation.order]
self.perform_backward_for_invocation(self.transport, message, activations, invocation)
@staticmethod
def prepare_tail_backward(
batch: Batch,
activations: Activations,
invocations: Invocations,
count_per_order: Dict[int, int],
expected_gradients: int,
) -> None:
if expected_gradients > 0:
grad_fn = next(b.grad_fn for b in batch if b.requires_grad)
assert grad_fn
grad_fn.tail_ctx = TailBackwardContext(activations, invocations, count_per_order, expected_gradients)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The pipeline parallelism of Pipe."""
from queue import Queue
from types import TracebackType
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Type, Union, cast
import torch
from torch import Tensor, nn
from torch.autograd.profiler import record_function
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers, join_workers
__all__: List[str] = []
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
# Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
def depend(fork_from: Batch, join_to: Batch) -> None:
fork_from[0], phony = fork(fork_from[0])
join_to[0] = join(join_to[0], phony)
def copy(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None:
batch[:] = Copy.apply(prev_stream, next_stream, *batch)
# Gradients are only supported for float Tensors.
batch[:] = tuple([x if x.is_floating_point() else x.detach() for x in batch])
def wait(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None:
batch[:] = Wait.apply(prev_stream, next_stream, *batch)
# Gradients are only supported for float Tensors.
batch[:] = tuple([x if x.is_floating_point() else x.detach() for x in batch])
def clock_cycles(m: int, n: int) -> Iterable[List[Tuple[int, int]]]:
"""Generates schedules for each clock cycle."""
# m: number of micro-batches
# n: number of partitions
# i: index of micro-batch
# j: index of partition
# k: clock number
#
# k (i,j) (i,j) (i,j)
# - ----- ----- -----
# 0 (0,0)
# 1 (1,0) (0,1)
# 2 (2,0) (1,1) (0,2)
# 3 (2,1) (1,2)
# 4 (2,2)
for k in range(m + n - 1):
yield [(k - j, j) for j in range(max(1 + k - m, 0), min(1 + k, n))]
class Pipeline:
"""The pipeline parallelism for Pipe."""
def __init__(
self,
partitions: List[nn.Sequential],
devices: List[torch.device],
copy_streams: List[List[AbstractStream]],
skip_layout: SkipLayout,
checkpoint_stop: int,
) -> None:
self.partitions = partitions
self.devices = devices
self.copy_streams = copy_streams
self.skip_layout = skip_layout
self.checkpoint_stop = checkpoint_stop
(self.in_queues, self.out_queues) = create_workers(devices)
def __del__(self) -> None:
join_workers(self.in_queues, self.out_queues)
def run(self, batches: List[Batch]) -> None:
"""Runs pipeline parallelism.
It modifies the given batches in place.
"""
partitions = self.partitions
devices = self.devices
skip_layout = self.skip_layout
m = len(batches)
n = len(partitions)
skip_trackers = [SkipTrackerThroughPotals(skip_layout, i) for i in range(m)]
for schedule in clock_cycles(m, n):
self.fence(batches, schedule, skip_trackers)
self.compute(batches, schedule, skip_trackers)
def fence(
self,
batches: List[Batch],
schedule: List[Tuple[int, int]],
skip_trackers: List[SkipTrackerThroughPotals],
) -> None:
"""Copies micro-batches after computation for the previous
micro-batches.
"""
copy_streams = self.copy_streams
skip_layout = self.skip_layout
for i, j in schedule:
# Ensure that batches[i-1] is executed after batches[i] in
# backpropagation by an explicit dependency.
if i != 0 and j != 0:
depend(batches[i - 1], batches[i])
next_stream = copy_streams[j][i]
for prev_j, ns, name in skip_layout.copy_policy(j):
prev_stream = copy_streams[prev_j][i]
skip_trackers[i].copy(batches[i], prev_stream, next_stream, ns, name)
if j != 0:
prev_stream = copy_streams[j - 1][i]
copy(batches[i], prev_stream, next_stream)
def compute(
self,
batches: List[Batch],
schedule: List[Tuple[int, int]],
skip_trackers: List[SkipTrackerThroughPotals],
) -> None:
"""Runs tasks with synchronization to copy streams."""
partitions = self.partitions
devices = self.devices
copy_streams = self.copy_streams
checkpoint_stop = self.checkpoint_stop
# Disable checkpointing if in eval mode.
if not self.partitions[0].training:
checkpoint_stop = 0
n = len(partitions)
streams = [current_stream(d) for d in devices]
exc_info: Optional[ExcInfo] = None
# With checkpointing, the autograd graph looks like this diagram:
# ┌─────┸──────┐
# │ Copy │
# └─────┰──────┘ (fence)
# ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
# ┃ (compute)
# ┌─────┸──────┐
# │ Wait │ [1] Synchronize the current stream with the copy stream.
# └─────┰──────┘
# ┌─────┸──────┐
# │ Checkpoint │ [2] Compute a partition within checkpointing.
# └─────┰──────┘
# ┌─────┸──────┐
# │ Wait │ [3] Synchronize the copy stream with the current stream.
# └─────┰──────┘
# ┠ ─ ─ ─ ┐
# ┃ ┌─────┴─────┐
# ┃ │ Recompute │ [4] Schedule the recomputation at backpropagation.
# ┃ └─────┬─────┘
# ┠ ─ ─ ─ ┘
# ┃
# ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
# ┌─────┸──────┐ (fence)
# │ Copy │
# └─────┰──────┘
for i, j in schedule:
batch = batches[i]
partition = partitions[j]
# Synchronize with the copied input. ([1] in the diagram)
if j != 0:
wait(batch, copy_streams[j][i], streams[j])
# Determine whether checkpointing or not.
checkpoint = i < checkpoint_stop
if checkpoint:
def function(
input: TensorOrTensors,
partition: nn.Sequential = partition,
skip_tracker: SkipTrackerThroughPotals = skip_trackers[i],
chunk_id: int = i,
part_id: int = j,
) -> TensorOrTensors:
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
return partition(input)
chk = Checkpointing(function, batch)
task = Task(streams[j], compute=chk.checkpoint, finalize=chk.recompute)
del function, chk
else:
def compute(
batch: Batch = batch,
partition: nn.Sequential = partition,
skip_tracker: SkipTrackerThroughPotals = skip_trackers[i],
chunk_id: int = i,
part_id: int = j,
) -> Batch:
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
return batch.call(partition)
task = Task(streams[j], compute=compute, finalize=None)
del compute
# Compute tasks in parallel. ([2] in the diagram)
self.in_queues[j].put(task)
for i, j in schedule:
ok, payload = self.out_queues[j].get()
# Hold the first exception.
if exc_info is not None:
continue
elif not ok:
exc_info = cast(ExcInfo, payload)
continue
task, batch = cast(Tuple[Task, Batch], payload)
# The copy stream synchronizes to copy the output. ([3] in the
# diagram)
if j != n - 1:
wait(batch, streams[j], copy_streams[j][i])
# Finalize tasks. If checkpointing is enabled, here the
# recomputation is scheduled at backpropagation. ([4] in the
# diagram)
with use_device(devices[j]):
task.finalize(batch)
batches[i] = batch
# Fail at the first exception.
if exc_info is not None:
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manipulation of micro-batches."""
import typing
from typing import Callable, Iterable, Iterator, List, Tuple, Union, cast
import torch
from torch import Tensor
import torch.cuda.comm
__all__: List[str] = []
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
Function = Callable[[TensorOrTensors], TensorOrTensors]
class Batch:
"""An abstraction of an atomic tensor or a tuple of tensors. This
eliminates every boilerplate code to classify an atomic tensor or a tuple
of tensors.
::
x = generate_tensor_or_tensors()
x = Batch(x)
# in-place update
x[0] = F.apply(x[0])
x[:] = F.apply(*x)
# f(x) if x is a tensor.
# f(*x) if x is a tuple of tensors.
# y is also a batch.
y = x.call(f)
"""
def __init__(self, value: TensorOrTensors, index: int) -> None:
self.value = value
self.atomic = torch.is_tensor(value)
self.__index = index
@property
def index(self) -> int:
return self.__index
@property
def tensor(self) -> Tensor:
"""Retrieves the underlying tensor."""
if not self.atomic:
raise AttributeError("not atomic batch")
return cast(Tensor, self.value)
@property
def tensors(self) -> Tensors:
"""Retrieves the underlying tensors."""
if self.atomic:
raise AttributeError("batch is atomic")
return cast(Tensors, self.value)
@property
def tensor_or_tensors(self) -> TensorOrTensors:
"""Retrieves the underlying tensor or tensors regardless of type."""
return self.value
def call(self, function: Function) -> "Batch":
"""Calls a function by the underlying tensor or tensors. It also wraps
the output with :class:`Batch`.
"""
return Batch(function(self.value), self.index)
def __repr__(self) -> str:
return f"Batch[atomic={self.atomic!r}]({self.value!r})"
def __iter__(self) -> Iterator[Tensor]:
if self.atomic:
yield self.tensor
else:
yield from self.tensors
def __len__(self) -> int:
return 1 if self.atomic else len(self.tensors)
def __getitem__(self, index: int) -> Tensor:
if not self.atomic:
return self.tensors[index]
if index != 0:
raise IndexError("atomic batch allows index 0 only")
return self.tensor
# NOTE(sublee): pyflakes can't detect "overload" instead of "typing.overload".
@typing.overload
def __setitem__(self, index: int, value: Tensor) -> None:
...
@typing.overload
def __setitem__(self, index: slice, value: Tensors) -> None:
...
def __setitem__(self, index: Union[int, slice], value: TensorOrTensors) -> None:
if isinstance(index, int):
value = cast(Tensor, value)
self._setitem_by_index(index, value)
else:
value = cast(Tensors, value)
self._setitem_by_slice(index, value)
def _setitem_by_index(self, index: int, value: Tensor) -> None:
if not self.atomic:
i = index
self.value = self.value[:i] + (value,) + self.value[i + 1 :]
return
if index != 0:
raise IndexError("atomic batch allows index 0 only")
self.value = value
def _setitem_by_slice(self, index: slice, value: Tensors) -> None:
if not (index.start is index.stop is index.step is None):
raise NotImplementedError("only slice [:] supported")
if not self.atomic:
self.value = value
return
if len(value) != 1:
raise IndexError("atomic batch cannot be replaced with multiple tensors")
self.value = value[0]
def check(input: TensorOrTensors) -> None:
"""Checks whether the input is a tensor or tensors.
Raises:
TypeError: input is not a tensor or tensors.
"""
if isinstance(input, tuple):
for x in input:
check(x)
return
if not isinstance(input, Tensor):
raise TypeError(f"expected Tensor, but got {input.__class__.__name__}")
def scatter(input: TensorOrTensors, chunks: int) -> List[Batch]:
"""Splits an input mini-batch into multiple micro-batches."""
inputs: Iterable[TensorOrTensors]
if isinstance(input, Tensor):
inputs = input.chunk(chunks)
else:
rotated: List[Tensors] = []
for tensor in input:
tensors = tensor.chunk(chunks)
rotated.append(cast(Tensors, tensors))
inputs = zip(*rotated)
return [Batch(x, i) for i, x in enumerate(inputs)]
def gather(outputs: List[Batch]) -> TensorOrTensors:
"""Concatenates output micro-batches into a mini-batch."""
output: TensorOrTensors
if outputs[0].atomic:
tensors = tuple(b.tensor for b in outputs)
output = torch.cat(tensors)
else:
rotated = [b.tensors for b in outputs]
output_buf = []
for tensors in zip(*rotated):
output_buf.append(torch.cat(tensors))
output = tuple(output_buf)
return output
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arbitrary dependency between two autograd lanes."""
from typing import List, Tuple
import torch
from torch import Tensor
from .phony import get_phony
__all__: List[str] = []
def fork(input: Tensor) -> Tuple[Tensor, Tensor]:
"""Branches out from an autograd lane of the given tensor."""
if torch.is_grad_enabled() and input.requires_grad:
input, phony = Fork.apply(input)
else:
phony = get_phony(input.device, requires_grad=False)
return input, phony
class Fork(torch.autograd.Function):
@staticmethod
def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]: # type: ignore
phony = get_phony(input.device, requires_grad=False)
return input.detach(), phony.detach()
@staticmethod
def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor: # type: ignore
return grad_input
def join(input: Tensor, phony: Tensor) -> Tensor:
"""Merges two autograd lanes."""
if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad):
input = Join.apply(input, phony)
return input
class Join(torch.autograd.Function):
@staticmethod
def forward(ctx: "Join", input: Tensor, phony: Tensor) -> Tensor: # type: ignore
return input.detach()
@staticmethod
def backward(ctx: "Join", grad_input: Tensor) -> Tuple[Tensor, None]: # type: ignore
return grad_input, None
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC
from dataclasses import dataclass
from queue import Empty as QueueEmpty
from queue import Queue
from typing import Dict, List, Optional
import torch
from fairscale.internal.object import pyobject_to_tensor, tensor_to_pyobject
from fairscale.nn.model_parallel import get_pipeline_parallel_group
from .types import MESSAGE_GENERATION_START, InputDevice, PipeMessage, Tensors
MESSAGE_TENSOR_SIZE = 1024
MessageQueues: List[Queue] = [Queue() for _ in range(MESSAGE_GENERATION_START)]
def to_input_device(tensors: Tensors, input_device: InputDevice) -> Tensors:
if input_device is None:
return tensors
else:
return tuple(t.to(input_device) for t in tensors)
def rpc_push_queue(message: PipeMessage) -> None:
globals()["MessageQueues"][message.queue_name].put(message)
@dataclass(frozen=True)
class Transport(ABC):
worker_map: Optional[Dict[int, str]]
input_device: InputDevice
def recv_message(self, queue_name: int, *, nowait: bool = False) -> PipeMessage:
message = self.recv_message_header(queue_name, nowait)
return self.recv_message_tensors(message)
def recv_message_header(self, queue_name: int, nowait: bool = False) -> PipeMessage:
...
def recv_message_tensors(self, message: PipeMessage) -> PipeMessage:
...
def send_message(self, message: PipeMessage, sync: bool = False, skip_header: bool = False) -> None:
...
def get_out_of_order(self, queue_name: int, index: int) -> Tensors:
...
def MakeTransport(use_rpc: bool, worker_map: Optional[Dict[int, str]], input_device: InputDevice) -> Transport:
if use_rpc:
if worker_map is None:
raise ValueError("'RpcTransport' requires 'worker_map' to be set")
return RpcTransport(worker_map, input_device)
else:
return SendRecvTransport(worker_map, input_device)
class RpcTransport(Transport):
def send_message(self, message: PipeMessage, sync: bool = False, skip_header: bool = False) -> None:
message.tensors = tuple(t.cpu() for t in message.tensors)
assert self.worker_map
name = self.worker_map[message.dest]
if sync:
torch.distributed.rpc.rpc_sync(name, rpc_push_queue, args=(message,))
else:
torch.distributed.rpc.rpc_async(name, rpc_push_queue, args=(message,))
def recv_message_header(self, queue_name: int, nowait: bool = False) -> PipeMessage:
queue = MessageQueues[queue_name]
if nowait:
result = queue.get_nowait()
else:
result = queue.get()
result.tensors = to_input_device(result.tensors, self.input_device)
return result
def recv_message_tensors(self, message: PipeMessage) -> PipeMessage:
# Tensors already contained within message
message.tensors = to_input_device(message.tensors, self.input_device)
return message
def get_out_of_order(self, queue_name: int, index: int) -> Tensors:
"""Receive a message with a known microbatch index, and handle out-of-order
messages by placing them back on the queue"""
queue = globals()["MessageQueues"][queue_name]
out_of_order: List[PipeMessage] = []
while True:
message = self.recv_message(queue_name)
got_index = message.args
value = message.tensors
if got_index == index:
for b in out_of_order:
queue.put(b)
return value
else:
out_of_order.append(message)
class SendRecvTransport(Transport):
def send_message(self, message: PipeMessage, sync: bool = False, skip_header: bool = False) -> None:
tensors = message.tensors
message.tensors = tuple()
torch.cuda.current_stream().synchronize()
if not skip_header:
message.tensor_shapes = [t.size() for t in tensors]
message.tensor_dtypes = [t.dtype for t in tensors]
torch.distributed.send(
pyobject_to_tensor(message, MESSAGE_TENSOR_SIZE).cuda(),
message.dest,
tag=message.queue_name,
group=get_pipeline_parallel_group(),
)
for index, t in enumerate(tensors):
if t.device.type == "cpu":
t = t.cuda()
torch.distributed.send(
t.contiguous(), message.dest, tag=message.tag + index, group=get_pipeline_parallel_group()
)
def recv_message_header(self, queue_name: int, nowait: bool = False) -> PipeMessage:
# FIXME(handle nowait)
if nowait:
raise QueueEmpty
tensor = torch.empty(MESSAGE_TENSOR_SIZE, dtype=torch.uint8, device=self.input_device)
torch.cuda.current_stream().synchronize()
torch.distributed.recv(tensor, src=None, tag=queue_name, group=get_pipeline_parallel_group())
torch.cuda.current_stream().synchronize()
return tensor_to_pyobject(tensor)
def recv_message_tensors(self, message: PipeMessage) -> PipeMessage:
torch.cuda.current_stream().synchronize()
message_tensors = []
for index, (shape, dtype) in enumerate(zip(message.tensor_shapes, message.tensor_dtypes)):
t = torch.empty(*shape, dtype=dtype, device=self.input_device)
torch.distributed.recv(t, message.src, tag=message.tag + index, group=get_pipeline_parallel_group())
message_tensors.append(t)
message.tensors = tuple(message_tensors)
torch.cuda.current_stream().synchronize()
return message
def get_out_of_order(self, queue_name: int, index: int) -> Tensors:
"""Receive a message with a known microbatch index, and handle out-of-order
messages by placing them back on the queue"""
message = self.recv_message(queue_name)
assert message.args == index
return message.tensors
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from dataclasses import dataclass, field
import itertools
import threading
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union
import warnings
import torch
from torch import Tensor, nn
from fairscale.nn.model_parallel import get_pipeline_parallel_group
from . import microbatch
from .async_pipeline import AsyncPipeline
from .async_schedule import Invocation, Location, ModuleWrapper
from .batchnorm import DeferredBatchNorm
from .skip.layout import SkipLayout
from .skip.skippable import Skippable
from .types import LazyModule
if TYPE_CHECKING:
Module = nn.Module[TensorOrTensors]
NamedModules = OrderedDict[str, Module]
else:
Module = nn.Module
NamedModules = OrderedDict
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
@dataclass
class PartitionInfo:
location: Location
modules: "OrderedDict[str, nn.Module]"
invocations: List[Invocation] = field(default_factory=list)
def __len__(self) -> int:
return len(self.modules)
def verify_module(module: Union[nn.Sequential, List[LazyModule]]) -> None:
if len(set(map(id, module))) != len(module):
raise ValueError("module with duplicate children is not supported")
def check_balance(module: Union[nn.Sequential, List[LazyModule]], balance: List[int]) -> None:
if len(module) != sum(balance):
raise ValueError(
f"module and sum of balance have different length (module: {len(module)}, sum of balance: {sum(balance)})"
)
if any(x <= 0 for x in balance):
raise ValueError(f"all balance numbers must be positive integer (balance: {balance})")
MOVING_DENIED = TypeError("denied to move parameters and buffers, because Pipe should manage device placement")
class AsyncPipe(Module):
"""Wraps an arbitrary :class:`nn.Sequential <torch.nn.Sequential>` module
to train on Pipe_. If the module requires lots of memory, Pipe will be
very efficient.
Pipe combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should determine the balance when defining a :class:`AsyncPipe` module, as
balancing will not be done automatically. The module will be partitioned
into multiple devices according to the given balance. You may rely on
heuristics to find your own optimal configuration.
Args:
module (torch.nn.Sequential):
sequential module to be parallelized
balance (ints):
list of number of layers in each partition
Keyword Args:
group (ProcessGroup):
the process group that all
pipeline stages are a member of. Defaults to
`get_pipeline_parallel_group()`
worker_map (Dict[int, str]):
a map from worker name (the first argument to
`torch.distributed.rpc.init_rpc`) to global rank (i.e.
`torch.distributed.get_rank()`) needed in order for pipeline stages
to communicate with each other
input_device (device):
the device on which tensors should be located before being passed to
the first module in a given pipeline stage
chunks (int):
number of micro-batches (default: ``1``)
checkpoint (str):
when to enable checkpointing, one of ``'always'``,
``'except_last'``, or ``'never'`` (default: ``'except_last'``)
deferred_batch_norm (bool):
whether to use deferred BatchNorm moving statistics (default:
:data:`False`, see :class:`DeferredBatchNorm` for more
details)
Raises:
TypeError:
the module is not a :class:`nn.Sequential <torch.nn.Sequential>`.
ValueError:
invalid arguments, or wrong balance
IndexError:
the number of devices is fewer than the number of partitions.
"""
#: The number of layers in each partition.
balance: List[int] = []
# ^^
# The default value [] required for Sphinx's autoattribute.
#: The devices mapped to each partition.
#:
#: ``devices[-1]`` refers to the device of the last partition, which means
#: it is the output device. Probably, you need to use it to transfer the
#: target to calculate the loss without a device mismatch
#: :exc:`RuntimeError`. For example::
#:
#: out_device = pipe.devices[-1]
#:
#: for input, target in loader:
#: target = target.to(out_device, non_blocking=True)
#: output = pipe(input)
#: loss = F.cross_entropy(output, target)
#:
#: The number of micro-batches.
chunks: int = 1
#: The checkpoint mode to determine when to enable checkpointing. It is one
#: of ``'always'``, ``'except_last'``, or ``'never'``.
checkpoint: str = "except_last"
def __init__(
self,
module: Union[nn.Sequential, List[LazyModule]],
balance: Iterable[int],
*,
group: Optional[torch.distributed.ProcessGroup] = None,
worker_map: Optional[Dict[int, str]] = None,
input_device: Union[None, int, str, torch.device] = None,
chunks: int = chunks,
checkpoint: str = checkpoint,
deferred_batch_norm: bool = False,
) -> None:
super().__init__()
if chunks <= 0:
raise ValueError("number of chunks must be positive integer")
if checkpoint not in ["always", "except_last", "never"]:
raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'")
self.balance = list(balance)
verify_module(module)
check_balance(module, self.balance)
self.chunks = chunks
self.checkpoint = checkpoint
self.pipeline: Optional[AsyncPipeline]
self.lock = threading.Lock()
self.worker_map = worker_map
self.input_device = input_device
self.group: torch.distributed.ProcessGroup
if group is None:
self.group = get_pipeline_parallel_group()
else:
self.group = group
if self.group.size() < len(self.balance):
raise IndexError(
f"too few ranks to hold given partitions (ranks: {self.group.size()}, partitions:"
f" {len(self.balance)})"
)
self._skip_layout = SkipLayout(len(module), {}) # FIXME(tom)
rank = self.group.rank()
self.final_stage = rank == len(self.balance) - 1
if rank >= len(self.balance):
warnings.warn("More ranks than partitions, some ranks unused")
self.partitions: List[ModuleWrapper] = []
self.pipeline = None
# TODO(msb) remove this hack
self.partition = None
else:
self.partitions = self.instantiate_partition(module, self.balance, self.group)
if deferred_batch_norm:
for part in self.partitions:
part.module = DeferredBatchNorm.convert_deferred_batch_norm(part.module, chunks)
for name, part in enumerate(self.partitions):
self.add_module(str(name), part.module)
self.create_pipeline()
# TODO(msb) remove this hack
self.partition = self.partitions[0].module
del module
def create_pipeline(self) -> None:
# The micro-batch index where the checkpointing stops.
checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint]
self.pipeline = AsyncPipeline(
self.partitions,
self._skip_layout,
checkpoint_stop,
group=self.group,
worker_map=self.worker_map,
input_device=self.input_device,
final_stage=self.final_stage,
)
def instantiate_partition(
self,
module: Union[nn.Sequential, List[LazyModule]],
balance: List[int],
group: torch.distributed.ProcessGroup,
) -> List[ModuleWrapper]:
layers: NamedModules = OrderedDict()
def maybe_realize(layer: Any) -> nn.Module:
if isinstance(layer, nn.Module):
return layer
elif callable(layer):
return layer()
else:
raise TypeError(f"layer must be nn.Module or callable, is {type(layer)}")
def iterate_module(module: Union[nn.Sequential, list]) -> Iterable[Tuple[Any, nn.Module]]:
if isinstance(module, nn.Sequential):
yield from module.named_children()
else:
yield from ((str(k), v) for k, v in enumerate(module))
module_ids = list(map(id, module))
index_of_first_use = [module_ids.index(x) for x in module_ids]
locations: List[Location] = []
module_iter = enumerate(iterate_module(module))
partitions: List[List[PartitionInfo]] = []
for bi, b in enumerate(balance):
modules_for_rank: List[PartitionInfo] = []
current_module: OrderedDict[str, nn.Module] = OrderedDict()
def current_location() -> Location:
return Location(bi, len(modules_for_rank))
def append_module(mod: "OrderedDict[str, nn.Module]") -> None:
modules_for_rank.append(PartitionInfo(current_location(), mod))
while sum(map(len, modules_for_rank)) + len(current_module) < b:
module_index, (name, layer) = next(module_iter)
if index_of_first_use[module_index] != module_index:
# Subsequent reuse of a module
locations.append(locations[index_of_first_use[module_index]])
continue
is_reused = index_of_first_use.count(index_of_first_use[module_index]) > 1
if is_reused and len(current_module) > 0:
append_module(current_module)
current_module = OrderedDict()
current_module[str(name)] = layer
locations.append(current_location())
if is_reused:
append_module(current_module)
current_module = OrderedDict()
if len(current_module) > 0:
append_module(current_module)
partitions.append(modules_for_rank)
filtered_locations: List[Optional[Location]] = [loc for loc, _ in itertools.groupby(locations)]
filtered_locations.append(None)
for i in range(len(filtered_locations) - 1):
loc = filtered_locations[i]
assert loc
if i == 0:
inv = Invocation(i, loc, None, filtered_locations[i + 1])
else:
inv = Invocation(i, loc, filtered_locations[i - 1], filtered_locations[i + 1])
partitions[loc.stage][loc.index].invocations.append(inv)
invocations = enumerate(iterate_module(module))
partition = partitions[group.rank()]
result: List[ModuleWrapper] = []
for partition_info in partition:
wrapper = ModuleWrapper(
nn.Sequential(OrderedDict((k, maybe_realize(m)) for k, m in partition_info.modules.items())),
partition_info.location,
partition_info.invocations,
)
if not isinstance(module, nn.Sequential):
for layer in wrapper.module:
if isinstance(layer, Skippable):
raise ValueError("Can't use Skippable layers with multi-process pipe and lazy construction")
result.append(wrapper)
return result
def __len__(self) -> int:
"""Counts the length of the underlying sequential module."""
return sum(len(p) for p in self.partitions)
def __getitem__(self, index: int) -> nn.Module:
"""Gets a layer in the underlying sequential module."""
partitions: List[Any]
partitions = self.partitions
if index < 0:
partitions = partitions[::-1]
for partition in partitions:
try:
if isinstance(partition, ModuleWrapper):
return partition.module[index]
else:
return partition[index]
except IndexError:
pass
shift = len(partition)
if index < 0:
index += shift
else:
index -= shift
raise IndexError
def __iter__(self) -> Iterable[nn.Module]:
"""Iterates over children of the underlying sequential module."""
for partition in self.partitions:
yield from partition.module
def forward(self, input: TensorOrTensors, *, event=None) -> TensorOrTensors: # type: ignore
""":class:`AsyncPipe` is a fairly transparent module wrapper. It doesn't
modify the input and output signature of the underlying module. But
there's type restriction. Input and output have to be a
:class:`~torch.Tensor` or a tuple of tensors. This restriction is
applied at partition boundaries too.
Args:
input (torch.Tensor or tensors): input mini-batch
Returns:
tensor or tensors: output mini-batch
Raises:
TypeError: input is not a tensor or tensors.
"""
microbatch.check(input)
if not self.pipeline:
# No pipeline is not illegal, more ranks than partitions
return input
# Divide a mini-batch into micro-batches.
batches = microbatch.scatter(input, self.chunks)
# Run pipeline parallelism.
with self.lock:
self.pipeline.run(self.training, batches, event)
if self.final_stage:
output = microbatch.gather(batches)
else:
# Don't merge micro-batches to avoid unnecessary edges in autograd
# graph
# FIXME(tom) should figure out a proper type here
output = batches # type: ignore
return output
def back_helper(self, output: List[microbatch.Batch]) -> None:
if self.final_stage:
raise ValueError("back_helper should only be called on non-final stages")
if self.pipeline:
self.pipeline.back_helper(output)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Pipe interface."""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union, cast
import warnings
import torch
from torch import Tensor, nn
import torch.autograd
import torch.cuda
from fairscale.internal import torch_version
from . import microbatch
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
__all__ = ["Pipe"]
Device = Union[torch.device, int, str]
Devices = Union[Iterable[Device], List[Device]]
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
if TYPE_CHECKING:
Module = nn.Module[TensorOrTensors]
NamedModules = OrderedDict[str, Module]
else:
Module = nn.Module
NamedModules = OrderedDict
def recommend_auto_balance(message: str) -> str:
"""Expands a message with recommendation to :mod:`torchpipe.balance`."""
return f"""{message}
If your model is still under development, its optimal balance would change
frequently. In this case, we highly recommend 'fairscale.nn.pipe.balance' for
naive automatic balancing:
from fairscale.nn import Pipe
from fairscale.nn.pipe.balance import balance_by_time
partitions = torch.cuda.device_count()
sample = torch.empty(...)
balance = balance_by_time(partitions, model, sample)
model = Pipe(model, balance, ...)
"""
def verify_module(module: nn.Sequential) -> None:
if not isinstance(module, nn.Sequential):
raise TypeError("module must be nn.Sequential to be partitioned")
named_children = list(module.named_children())
if len(named_children) != len(module):
raise ValueError("module with duplicate children is not supported")
def verify_splitting(
module: nn.Sequential, partitions: List[nn.Sequential], balance: Iterable[int], devices: List[torch.device]
) -> None:
num_parameters = len(list(module.parameters()))
num_child_parameters = sum(len(list(child.parameters())) for child in module.children())
if num_parameters == num_child_parameters:
return
for i in range(len(partitions)):
for j in range(i + 1, len(partitions)):
parti = partitions[i]
partj = partitions[j]
if devices[i] == devices[j]:
continue
for p in parti.parameters():
for q in partj.parameters():
if p is q:
raise ValueError("module with duplicate parameters on distinct devices is not supported")
class BalanceError(ValueError):
pass
def split_module(
module: nn.Sequential,
balance: Iterable[int],
devices: List[torch.device],
) -> Tuple[List[nn.Sequential], List[int], List[torch.device]]:
"""Splits a module into multiple partitions.
Returns:
A tuple of (partitions, balance, devices).
Partitions are represented as a :class:`~torch.nn.ModuleList` whose
item is a partition. All layers in a partition are placed in the
same device.
Raises:
BalanceError:
wrong balance
IndexError:
the number of devices is fewer than the number of partitions.
"""
balance = list(balance)
if len(module) != sum(balance):
raise BalanceError(
"module and sum of balance have different length "
f"(module: {len(module)}, sum of balance: {sum(balance)})"
)
if any(x <= 0 for x in balance):
raise BalanceError(f"all balance numbers must be positive integer (balance: {balance})")
if len(balance) > len(devices):
raise IndexError(
"too few devices to hold given partitions " f"(devices: {len(devices)}, partitions: {len(balance)})"
)
j = 0
partitions = []
layers: NamedModules = OrderedDict()
for name, layer in module.named_children():
layers[name] = layer
if len(layers) == balance[j]:
# Group buffered layers as a partition.
partition = nn.Sequential(layers)
device = devices[j]
partition.to(device)
partitions.append(partition)
# Prepare for the next partition.
layers.clear()
j += 1
partitions = cast(List[nn.Sequential], nn.ModuleList(partitions))
del devices[j:]
return partitions, balance, devices
MOVING_DENIED = TypeError("denied to move parameters and buffers, " "because Pipe should manage device placement")
class Pipe(Module):
"""Wraps an arbitrary :class:`nn.Sequential <torch.nn.Sequential>` module
to train on Pipe_. If the module requires lots of memory, Pipe will be
very efficient.
::
model = nn.Sequential(a, b, c, d)
model = Pipe(model, balance=[1, 1, 1, 1], chunks=8)
output = model(input)
.. _Pipe: https://arxiv.org/abs/1811.06965
Pipe combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should determine the balance when defining a :class:`Pipe` module, as
balancing will not be done automatically. The module will be partitioned
into multiple devices according to the given balance. You may rely on
heuristics to find your own optimal configuration.
Args:
module (torch.nn.Sequential):
sequential module to be parallelized
balance (ints):
list of number of layers in each partition
Keyword Args:
devices (iterable of devices):
devices to use (default: all CUDA devices)
chunks (int):
number of micro-batches (default: ``1``)
checkpoint (str):
when to enable checkpointing, one of ``'always'``,
``'except_last'``, or ``'never'`` (default: ``'except_last'``)
deferred_batch_norm (bool):
whether to use deferred BatchNorm moving statistics (default:
:data:`False`, see :class:`Deferred Batch Normalization <DeferredBatchNorm>` for more
details)
Raises:
TypeError:
the module is not a :class:`nn.Sequential <torch.nn.Sequential>`.
ValueError:
invalid arguments, or wrong balance
IndexError:
the number of devices is fewer than the number of partitions.
"""
#: The number of layers in each partition.
balance: List[int] = []
# ^^
# The default value [] required for Sphinx's autoattribute.
#: The devices mapped to each partition.
#:
#: ``devices[-1]`` refers to the device of the last partition, which means
#: it is the output device. Probably, you need to use it to transfer the
#: target to calculate the loss without a device mismatch
#: :exc:`RuntimeError`. For example::
#:
#: out_device = pipe.devices[-1]
#:
#: for input, target in loader:
#: target = target.to(out_device, non_blocking=True)
#: output = pipe(input)
#: loss = F.cross_entropy(output, target)
#:
devices: List[torch.device] = []
#: The number of micro-batches.
chunks: int = 1
#: The checkpoint mode to determine when to enable checkpointing. It is one
#: of ``'always'``, ``'except_last'``, or ``'never'``.
checkpoint: str = "except_last"
def __init__(
self,
module: nn.Sequential,
balance: Optional[Iterable[int]] = None,
*,
devices: Optional[Devices] = None,
chunks: int = chunks,
checkpoint: str = checkpoint,
deferred_batch_norm: bool = False,
) -> None:
super().__init__()
if torch_version()[:2] >= (1, 8):
warnings.warn(
"fairscale.nn.Pipe has been upstreamed to PyTorch as torch.distributed.pipeline.sync.Pipe. "
"It is now deprecated and will be removed in a future version of fairscale. "
"The PyTorch API has minor changes. Please see https://pytorch.org/docs/stable/pipeline.html for details.",
DeprecationWarning,
)
chunks = int(chunks)
checkpoint = str(checkpoint)
if balance is None:
raise ValueError(recommend_auto_balance("balance is required"))
if chunks <= 0:
raise ValueError("number of chunks must be positive integer")
if checkpoint not in ["always", "except_last", "never"]:
raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'")
verify_module(module)
# Verify if the underlying skippable modules satisfy integrity. The
# integrity can be verified before forward() because it is static.
verify_skippables(module)
self.chunks = chunks
self.checkpoint = checkpoint
if deferred_batch_norm:
module = DeferredBatchNorm.convert_deferred_batch_norm(module, chunks)
if devices is None:
devices = range(torch.cuda.device_count())
devices = [torch.device(d) for d in devices]
devices = cast(List[torch.device], devices)
try:
self.partitions, self.balance, self.devices = split_module(module, balance, devices)
except BalanceError as exc:
raise ValueError(recommend_auto_balance(str(exc)))
verify_splitting(module, self.partitions, self.balance, self.devices)
self._copy_streams: List[List[AbstractStream]] = []
self._skip_layout = inspect_skip_layout(self.partitions)
# Separate CUDA streams for copy.
copy_streams = self._ensure_copy_streams()
# The micro-batch index where the checkpointing stops.
checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint]
self.pipeline = Pipeline(self.partitions, self.devices, copy_streams, self._skip_layout, checkpoint_stop)
def __len__(self) -> int:
"""Counts the length of the underlying sequential module."""
return sum(len(p) for p in self.partitions)
def __getitem__(self, index: int) -> nn.Module:
"""Gets a layer in the underlying sequential module."""
partitions = self.partitions
if index < 0:
partitions = partitions[::-1]
for partition in partitions:
try:
return partition[index]
except IndexError:
pass
shift = len(partition)
if index < 0:
index += shift
else:
index -= shift
raise IndexError
def __iter__(self) -> Iterable[nn.Module]:
"""Iterates over children of the underlying sequential module."""
for partition in self.partitions:
yield from partition
# Pipe should manage the device of each partition.
# Deny cuda(), cpu(), and to() with device, by TypeError.
def cuda(self, device: Optional[Device] = None) -> "Pipe":
raise MOVING_DENIED
def cpu(self) -> "Pipe":
raise MOVING_DENIED
def to(self, *args: Any, **kwargs: Any) -> "Pipe":
"""Deny these usages:
- to(device[, dtype, non_blocking])
- to(tensor[, non_blocking])
But allow this:
- to(dtype[, non_blocking])"""
if "device" in kwargs or "tensor" in kwargs:
raise MOVING_DENIED
if args:
if isinstance(args[0], (torch.device, int, str)):
raise MOVING_DENIED
if torch.is_tensor(args[0]):
raise MOVING_DENIED
return super().to(*args, **kwargs)
def _ensure_copy_streams(self) -> List[List[AbstractStream]]:
"""Ensures that :class:`Pipe` caches CUDA streams for copy.
It's worth to cache CUDA streams although PyTorch already manages a
pool of pre-allocated CUDA streams, because it may reduce GPU memory
fragementation when the number of micro-batches is small.
"""
if not self._copy_streams:
for device in self.devices:
self._copy_streams.append([new_stream(device) for _ in range(self.chunks)])
return self._copy_streams
def forward(self, input: TensorOrTensors) -> TensorOrTensors: # type: ignore
""":class:`Pipe` is a fairly transparent module wrapper. It doesn't
modify the input and output signature of the underlying module. But
there's type restriction. Input and output have to be a
:class:`~torch.Tensor` or a tuple of tensors. This restriction is
applied at partition boundaries too.
Args:
input (torch.Tensor or tensors): input mini-batch
Returns:
tensor or tensors: output mini-batch
Raises:
TypeError: input is not a tensor or tensors.
"""
microbatch.check(input)
if not self.devices:
# Empty sequential module is not illegal.
return input
# Divide a mini-batch into micro-batches.
batches = microbatch.scatter(input, self.chunks)
# Run pipeline parallelism.
self.pipeline.run(batches)
# Merge the micro-batches into one mini-batch.
output = microbatch.gather(batches)
return output
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from threading import Event
from typing import Dict, List, Optional, Union
import torch
from .async_schedule import AsyncEventLoop, ModuleWrapper
from .messages import MakeTransport
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals
class AsyncPipeline:
"""The async pipeline parallelism for Pipe."""
def __init__(
self,
partitions: List[ModuleWrapper],
skip_layout: SkipLayout,
checkpoint_stop: int,
group: torch.distributed.ProcessGroup,
*,
worker_map: Optional[Dict[int, str]] = None,
input_device: Union[None, int, str, torch.device] = None,
final_stage: bool = False,
) -> None:
self.partitions = partitions
self.skip_layout = skip_layout
self.__checkpoint_stop = checkpoint_stop
self.group = group
self.training: bool
self.transport = MakeTransport(
use_rpc=("OMPI_COMM_WORLD_RANK" not in os.environ) or ("FORCE_RPC" in os.environ),
worker_map=worker_map,
input_device=input_device,
)
self.input_device = input_device
self.final_stage = final_stage
@property
def checkpoint_stop(self) -> int:
# Disable checkpointing if in eval mode.
training = self.partitions[0].module.training
if not training:
return 0
return self.__checkpoint_stop
def run(self, training: bool, batches: List[Batch], event: Optional[Event]) -> None:
"""Runs pipeline parallelism.
It modifies the given batches in place.
"""
self.training = training
skip_trackers = [SkipTrackerThroughPotals(self.skip_layout, i) for i in range(len(batches))]
rank = self.group.rank()
event_loop = AsyncEventLoop(
self.partitions,
self.group,
self.transport,
self.training,
self.checkpoint_stop,
)
if rank == 0 and not self.final_stage:
logging.debug(f"{torch.distributed.get_rank()}: entered event head")
event_loop.event_loop_head(batches, skip_trackers, event)
logging.debug(f"{torch.distributed.get_rank()}: exited event head")
elif self.final_stage:
logging.debug(f"{torch.distributed.get_rank()}: entered event tail")
event_loop.event_loop_tail(batches, skip_trackers)
logging.debug(f"{torch.distributed.get_rank()}: exited event tail")
else:
logging.debug(f"{torch.distributed.get_rank()}: entered event loop")
event_loop.event_loop(len(batches), skip_trackers)
logging.debug(f"{torch.distributed.get_rank()}: exited event loop")
def back_helper(self, output: List[Batch]) -> None:
pass
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Per-layer profilers."""
import copy
import time
from typing import Generator, List, Tuple, Union
import torch
from torch import Tensor
import torch.nn as nn
from ..microbatch import Batch
__all__: List[str] = []
Device = Union[torch.device, int, str]
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
def layerwise_sandbox(
module: nn.Sequential,
device: torch.device,
) -> Generator[nn.Module, None, None]:
"""Copies layers for ease to profile. It doesn't modify the given
module.
"""
for layer in module:
layer_copy = copy.deepcopy(layer)
layer_copy.to(device)
layer_copy.train()
yield layer_copy
def detach(batch: Batch) -> None:
"""Detaches from autograd graph."""
for i, x in enumerate(batch):
batch[i] = x.detach().requires_grad_(x.requires_grad)
def profile_times(
module: nn.Sequential,
sample: TensorOrTensors,
timeout: float,
device: torch.device,
) -> List[int]:
"""Profiles elapsed times per layer."""
if any(p.grad is not None for p in module.parameters()):
raise ValueError("some parameter already has gradient")
_batch = Batch(sample, 0)
for i, x in enumerate(_batch):
_batch[i] = x.detach().to(device).requires_grad_(x.requires_grad)
time_bufs: List[List[float]] = [[] for _ in module]
begun_at = time.time()
while time.time() - begun_at < timeout:
batch = _batch
for i, layer in enumerate(layerwise_sandbox(module, device)):
detach(batch)
if device.type == "cuda":
torch.cuda.synchronize(device)
tick = time.time()
# Forward
batch = batch.call(layer)
# Backward
backward_tensors = tuple(y for y in batch if y.requires_grad)
if backward_tensors:
torch.autograd.backward(backward_tensors, backward_tensors)
if device.type == "cuda":
torch.cuda.synchronize(device)
tock = time.time()
time_bufs[i].append(tock - tick)
us = 1_000_000
return [sum(int(t * us) for t in buf) for buf in time_bufs]
def profile_sizes(
module: nn.Sequential,
input: TensorOrTensors,
chunks: int,
param_scale: float,
device: torch.device,
) -> List[int]:
"""Profiles CUDA memory usage per layer."""
if device.type != "cuda":
raise ValueError("size profiler supports only CUDA device")
batch = Batch(input, 0)
sizes: List[int] = []
latent_scale = batch[0].size(0) / chunks
for i, x in enumerate(batch):
batch[i] = x[:1].detach().to(device).requires_grad_(x.requires_grad)
for layer in layerwise_sandbox(module, device):
detach(batch)
# Detect memory usage at forward.
memory_before = torch.cuda.memory_allocated(device)
batch = batch.call(layer)
memory_after = torch.cuda.memory_allocated(device)
latent_size = memory_after - memory_before
# Analyze size of parameters.
param_size = sum(p.storage().size() * p.storage().element_size() for p in layer.parameters())
# Combine size of parameters and activations with normalize scales.
size = latent_size * latent_scale + param_size * param_scale
sizes.append(int(size))
return sizes
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper to roughly balance a sequential module.
Usage::
import torch
from fairscale.nn import Pipe
from fairscale.nn.pipe.balance import balance_by_time
sample = torch.empty(128, 3, 224, 224)
balance = balance_by_time(torch.cuda.device_count(), model, sample)
pipe = Pipe(model, balance, chunks=8)
.. note::
balance_by_time does not work with inplace ReLU because we exhausetively search
every partition boundary, which could hit an inplace ReLU.
.. note::
If the model is larger than a single CUDA device memory, use "cpu"
in the balance_by_time function.
"""
from typing import List, Tuple, Union
import torch
from torch import Tensor
import torch.nn as nn
from . import blockpartition
from .profile import profile_sizes, profile_times
__all__ = ["balance_by_time", "balance_by_size"]
Device = Union[torch.device, int, str]
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
def _balance_cost(cost: List[int], partitions: int) -> List[int]:
partitioned = blockpartition.solve(cost, partitions)
return [len(p) for p in partitioned]
def balance_by_time(
partitions: int,
module: nn.Sequential,
sample: TensorOrTensors,
*,
timeout: float = 1.0,
device: Device = torch.device("cuda"),
) -> List[int]:
"""Naive automatic balancing by elapsed time per layer.
::
sample = torch.empty(128, 3, 224, 224)
balance = balance_by_time(torch.cuda.device_count(), model, sample)
pipe = Pipe(model, balance, chunks=8)
Args:
partitions (int):
intended number of partitions
module (torch.nn.Sequential):
sequential module to be partitioned
sample (torch.Tensor):
example input with arbitrary batch size
Keyword Args:
timeout (float):
profiling iterates again if the timeout (in second) is not exceeded
(default: ``1.0``)
device ('cpu' or 'cuda' device):
CPU or CUDA device where each layer is profiled (default: the
current CUDA device)
Returns:
A list of number of layers in each partition. Use it for the `balance`
parameter of :class:`~fairscale.nn.Pipe`.
.. note::
`module` and `sample` must be placed on the same device.
"""
times = profile_times(module, sample, timeout, torch.device(device))
return _balance_cost(times, partitions)
def balance_by_size(
partitions: int,
module: nn.Sequential,
input: TensorOrTensors,
*,
chunks: int = 1,
param_scale: float = 2.0,
device: Device = torch.device("cuda"),
) -> List[int]:
"""Naive automatic balancing by CUDA memory usage per layer.
During training, required memory for parameters depends on which optimizer
is used. Optimizers may use buffers for each parameter to track
optimization statistics internally, such as momentum buffer in SGD.
To get more reliable size based balance, you should specify `param_scale`
with regard to your optimizer. The default `param_scale` is 2 instead of 1
due to gradient accumulation which is necessary for every optimizer.
Follow this guide to choose correct `param_scale` for typical optimizers:
========= ============= =========================================
Optimizer `param_scale` Internal State
========= ============= =========================================
SGD 2--3 (momentum_buffer)
Adam 4--5 exp_avg, exp_avg_sq, (max_exp_avg_sq)
Adadelta 4 square_avg, acc_delta
Adagrad 3 sum
RMSprop 3--5 square_avg, (momentum_buffer), (grad_avg)
========= ============= =========================================
Here's a simple example with the Adam optimizer::
balance = balance_by_size(
torch.cuda.device_count(),
model,
# Same size with mini-batch to train
torch.empty(1024, 3, 224, 224),
# Number of micro-batches to train with Pipe
chunks=8,
# 4 for Adam
param_scale=4.0,
)
pipe = Pipe(model, balance, chunks=8)
adam = Adam(pipe.parameters())
Args:
partitions (int):
intended number of partitions
module (torch.nn.Sequential):
sequential module to be partitioned
input (torch.Tensor):
example mini-batch with the same size to train
Keyword Args:
chunks (int):
number of micro-batches will be used to train (default: ``1``)
param_scale (float):
how many copies of parameters would be allocated for training. It
depends on optimizer. See the above guide. (default: ``2.0``)
device ('cuda' device):
CUDA device where each layer is profiled (default: the current CUDA
device)
Returns:
A list of number of layers in each partition. Use it for the `balance`
parameter of :class:`~fairscale.nn.Pipe`.
.. note::
`module` and `input` must be placed on the same CUDA device.
"""
sizes = profile_sizes(module, input, chunks, param_scale, torch.device(device))
return _balance_cost(sizes, partitions)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements "Block Partitions of Sequences" by Imre Bárány et al.
Paper: https://arxiv.org/pdf/1308.2452.pdf
"""
from typing import Iterator, List, Tuple
__all__ = ["solve"]
def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
"""Splits a sequence into several partitions to minimize variance for each
partition.
The result might not be optimal. However, it can be done only in O(kn³),
where k is the number of partitions and n is the length of the sequence.
"""
if partitions < 1:
raise ValueError(f"partitions must be a positive integer ({partitions} < 1)")
n = len(sequence)
if n < partitions:
raise ValueError(f"sequence is shorter than intended partitions ({n} < {partitions})")
# Normalize the sequence in [0, 1].
minimum = min(sequence)
maximum = max(sequence) - minimum
normal_sequence: List[float]
if maximum == 0:
normal_sequence = [0 for _ in sequence]
else:
normal_sequence = [(x - minimum) / maximum for x in sequence]
splits = [n // partitions * (x + 1) for x in range(partitions - 1)] + [n]
def block_size(i: int) -> float:
start = splits[i - 1] if i > 0 else 0
stop = splits[i]
return sum(normal_sequence[start:stop])
def leaderboard() -> Iterator[Tuple[float, int]]:
return ((block_size(i), i) for i in range(partitions))
while True:
"""
(1) Fix p ∈ [k] with M(P) = bp. So Bp is a maximal block of P.
"""
# max_size: M(P)
max_size, p = max(leaderboard())
while True:
"""
(2) If M(P) ≤ m(P) + 1, then stop.
"""
# min_size: m(P)
min_size, q = min(leaderboard())
if max_size <= min_size + 1:
return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)]
"""
(3) If M(P) > m(P) + 1, then let m(P) = bq for the q ∈ [k] which is
closest to p (ties broken arbitrarily). Thus Bq is a minimal block
of P. Let Bh be the block next to Bq between Bp and Bq. (Note that
Bh is a non-empty block: if it were, then m(P) = 0 and we should
have chosen Bh instead of Bq.)
"""
if p < q:
"""
So either p < q and then h = q−1 and we define P ∗ by moving
the last element from Bh = Bq−1 to Bq,
"""
h = q - 1
splits[h] -= 1
else:
"""
or q < p, and then h = q + 1 and P ∗ is obtained by moving the
first element of Bh = Bq+1 to Bq.
"""
h = q + 1
splits[q] += 1
"""
Set P = P ∗ . If p = h, then go to (1), else go to (2).
"""
if p == h:
break
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portal keeps a tensor in the pocket plane. The tensor becomes hidden to the
autograd engine. The shared context of three functions (:class:`PortalBlue`,
:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is
one of the most important feature of :mod:`torchpipe.skip`.
The metaphor is inspired by Portal™ from Valve.
"""
from typing import Any, List, Optional, Tuple
import torch
from torch import Tensor
from . import Namespace
from ..copy import Context as CopyContext
from ..copy import Copy
from ..phony import get_phony
from ..stream import AbstractStream, get_device
__all__: List[str] = []
class Portal:
"""A portal for a tensor."""
def __init__(self, tensor: Optional[Tensor], tensor_life: int, index: int) -> None:
self.put_tensor(tensor, tensor_life)
self.grad: Optional[Tensor] = None
self.__index = index
self.ns_name: Optional[Tuple[Namespace, str]]
self.pipeline: Any
@property
def index(self) -> int:
return self.__index
def blue(self) -> Tensor:
"""Creates a :class:`PortalBlue` which hides the underlying tensor from
the autograd engine.
Join the returning phony to the main lane of the autograd graph to
assure the correct backpropagation::
PortalBlue --+
|
---------- Join --
"""
tensor = self.use_tensor()
if tensor is None:
return get_phony(torch.device("cpu"), requires_grad=False)
return PortalBlue.apply(self, tensor)
def orange(self, phony: Tensor) -> Optional[Tensor]:
"""Creates a :class:`PortalOrange` which retrieves the hidden tensor
without losing ability of backpropagation.
Give a phony forked from the main lane of an autograd graph::
+-- PortalOrange --+
| |
-- Fork --------- f(a, b) --
"""
self.check_tensor_life()
if self.tensor is None:
return self.use_tensor()
return PortalOrange.apply(self, phony)
def copy(
self,
prev_stream: AbstractStream,
next_stream: AbstractStream,
phony: Tensor,
) -> Tensor:
"""Copies the hidden tensor by a :class:`PortalCopy`.
Give a phony and use the returning phony to keep backpropagation::
+-- PortalCopy --+
| |
-- Fork ---------- Join --
"""
if self.tensor is None:
return get_phony(torch.device("cpu"), requires_grad=False)
return PortalCopy.apply(self, prev_stream, next_stream, phony)
def check_tensor_life(self) -> None:
if self.tensor_life <= 0:
raise RuntimeError("tensor in portal has been removed")
def put_tensor(self, tensor: Optional[Tensor], tensor_life: int) -> None:
"""Stores a tensor into this portal."""
# [Life of Tensor through Portal]
#
# The tensor can be retrieved by use_tensor() up to 'tensor_life'
# times. When the life becomes 0, the tensor will be deleted for
# deallocation in CUDA memory.
#
# The below events participate in a tensor through a portal.
# Note that [x] denotes the events which call use_tensor():
#
# 1. [x] blue()
# 2. [ ] PortalBlue.forward
# 3. [ ] copy()
# 4. [ ] PortalCopy.forward
# 5. [ ] orange()
# 6. [x] PortalOrange.forward
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 7. [ ] orange() (recomputed)
# 8. [x] PortalOrange.forward (recomputed)
# 9. [ ] PortalOrange.backward
# 10. [ ] PortalCopy.backward
# 11. [x] blue() (recomputed)
# 12. [ ] PortalBlue.forward (recomputed)
# 13. [ ] PortalBlue.backward
#
self.tensor_life = tensor_life
if tensor_life > 0:
self.tensor = tensor
else:
self.tensor = None
def use_tensor(self) -> Optional[Tensor]:
"""Retrieves the underlying tensor and decreases the tensor life. When
the life becomes 0, it the tensor will be removed.
"""
self.check_tensor_life()
tensor = self.tensor
self.tensor_life -= 1
if self.tensor_life <= 0:
self.tensor = None
return tensor
def put_grad(self, grad: Tensor) -> None:
"""Stores a gradient into this portal."""
if hasattr(self, "pipeline"):
self.pipeline.send_portal_grad(self.ns_name, self.index, grad)
self.grad = grad
def use_grad(self) -> Tensor:
"""Retrieves and removes the underlying gradient. The gradient is
always ephemeral.
"""
if self.grad is None and hasattr(self, "pipeline"):
self.grad = self.pipeline.recv_portal_grad(self.ns_name, self.index)
if self.grad is None:
raise RuntimeError("grad in portal has been removed or never set")
grad = self.grad
self.grad = None
return grad
# Common interface between :class:`PortalBlue`, :class:`PortalOrange`, and
# :class:`PortalCopy`.
class Context(CopyContext):
portal: Portal
class PortalBlue(torch.autograd.Function):
"""Hides a tensor from the autograd engine by a :class:`Portal`."""
@staticmethod
# type: ignore
def forward(
ctx: Context,
portal: Portal,
# This tensor must be retrieved by portal.use_tensor().
tensor: Tensor,
) -> Tensor:
ctx.portal = portal
phony = get_phony(tensor.device, requires_grad=False)
return phony.detach()
@staticmethod
# type: ignore
def backward(
ctx: Context,
grad_phony: Tensor,
) -> Tuple[None, Tensor]:
# The paired PortalOrange should keep the gradient.
grad = ctx.portal.use_grad()
return None, grad
class PortalOrange(torch.autograd.Function):
"""Retrieves the hidden tensor from a :class:`Portal`."""
@staticmethod
# type: ignore
def forward(ctx: Context, portal: Portal, phony: Tensor) -> Tensor:
ctx.portal = portal
tensor = portal.use_tensor()
assert tensor is not None
return tensor.detach()
@staticmethod
def backward(ctx: Context, grad: Tensor) -> Tuple[None, None]: # type: ignore
# The paired PortalBlue will use the gradient.
ctx.portal.put_grad(grad)
return None, None
class PortalCopy(torch.autograd.Function):
"""Copies the hidden tensor in a :class:`Portal`. It replaces the hidden
tensor with copied one.
"""
@staticmethod
# type: ignore
def forward(
ctx: Context,
portal: Portal,
prev_stream: AbstractStream,
next_stream: AbstractStream,
phony: Tensor,
) -> Tensor:
ctx.portal = portal
assert portal.tensor is not None
(portal.tensor,) = Copy.forward(ctx, prev_stream, next_stream, portal.tensor)
phony = get_phony(get_device(next_stream), requires_grad=False)
return phony.detach()
@staticmethod
# type: ignore
def backward(
ctx: Context,
grad_phony: Tensor,
) -> Tuple[None, None, None, None]:
portal = ctx.portal
assert portal.grad is not None
_, _, portal.grad = Copy.backward(ctx, portal.grad)
return None, None, None, None
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static skip connection layout of ``@skippable`` modules."""
from typing import Dict, Iterable, List, Tuple
from torch import nn
from .namespace import Namespace
__all__: List[str] = []
class SkipLayout:
"""Represents a skip connection layout across partitions."""
# Skip routes indexed by 'ns, name': {(ns, name): (prev_j, next_j), ...}
by_ns_name: Dict[Tuple[Namespace, str], Tuple[int, int]]
# Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...]
by_partition: List[List[Tuple[int, Namespace, str]]]
# Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...]
by_src_partition: List[List[Tuple[int, Namespace, str]]]
def __init__(
self,
num_partitions: int,
skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]],
) -> None:
# The skip routes are already indexed by 'ns, name'.
self.by_ns_name = skip_routes
# Index skip routes by partition number 'j'.
self.by_partition = [[] for _ in range(num_partitions)]
self.by_src_partition = [[] for _ in range(num_partitions)]
for (ns, name), (prev_j, next_j) in skip_routes.items():
self.by_partition[next_j].append((prev_j, ns, name))
self.by_src_partition[prev_j].append((next_j, ns, name))
for p in self.by_partition:
p.sort()
def copy_policy_by_src(self, prev_j: int) -> Iterable[Tuple[int, Namespace, str]]:
"""Generates skip routes for the given destination partition number.
The skip routes are sorted by source partition number in ascending
order.
Yields:
Each tuple of (source partition number, namespace, name).
"""
for next_j, ns, name in self.by_src_partition[prev_j]:
if prev_j == next_j:
# This skip tensor will be popped at the same partition where
# it is stashed. In this case, copy is not required.
continue
yield (next_j, ns, name)
def copy_policy(self, next_j: int) -> Iterable[Tuple[int, Namespace, str]]:
"""Generates skip routes for the given destination partition number.
The skip routes are sorted by source partition number in ascending
order.
Yields:
Each tuple of (source partition number, namespace, name).
"""
for prev_j, ns, name in self.by_partition[next_j]:
if prev_j == next_j:
# This skip tensor will be popped at the same partition where
# it is stashed. In this case, copy is not required.
continue
yield (prev_j, ns, name)
def requires_copy(self, ns: Namespace, name: str) -> bool:
"""Whether the given namespace and name requires partition-to-partition
copy or not.
"""
prev_j, next_j = self.by_ns_name.get((ns, name), (-1, -1))
return prev_j != next_j
def inspect_skip_layout(partitions: List[nn.Sequential]) -> SkipLayout:
"""Inspects the skip connection layout in the given partitions."""
# NOTE(sublee): Hide circular import inside this subroutine. Circular
# import is not ideal but placing this logic near to SkipLayout may
# increase cohesion of code.
from .skippable import Skippable
skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]] = {}
stashed_at: Dict[Tuple[Namespace, str], int] = {}
for j, partition in enumerate(partitions):
for layer in partition:
if not isinstance(layer, Skippable):
continue
for ns, name in layer.stashable():
stashed_at[(ns, name)] = j
for ns, name in layer.poppable():
prev_j = stashed_at.pop((ns, name))
skip_routes[(ns, name)] = (prev_j, j)
return SkipLayout(len(partitions), skip_routes)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supports efficiency with skip connections."""
from .namespace import Namespace
from .skippable import pop, skippable, stash, verify_skippables
__all__ = ["skippable", "stash", "pop", "verify_skippables", "Namespace"]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tracks skip tensors on a thread."""
from contextlib import contextmanager
import threading
from typing import Dict, Generator, List, Optional, Tuple
from torch import Tensor
from ..checkpoint import is_checkpointing
from ..dependency import fork, join
from ..microbatch import Batch
from ..stream import AbstractStream
from .layout import SkipLayout
from .namespace import Namespace
from .portal import Portal
__all__: List[str] = []
class SkipTracker:
"""Tracks saved skip tensors.
It will update the given micro-batch in place. This is because when it
manipulates the underlying skip tensors, the current micro-batch also has
to be connected with the skip tensors.
One thread has one skip tracker. Call :func:`current_skip_tracker` to get
the skip tracker on the current thread.
"""
def __init__(self) -> None:
self.tensors: Dict[Tuple[Namespace, str], Optional[Tensor]] = {}
def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None:
self.tensors[(ns, name)] = tensor
def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]:
return self.tensors.pop((ns, name))
def copy(
self,
batch: Batch,
prev_stream: AbstractStream,
next_stream: AbstractStream,
ns: Namespace,
name: str,
) -> None:
raise TypeError("copy is not supported for non-portal skip tensors")
@property
def index(self) -> int:
return 0
class SkipTrackerThroughPotals(SkipTracker):
"""Tracks saved skip tensors through portals. The skip tensors will be
hidden in portals so that the autograd engine does not need to track them.
This tracker is only used when the training or evaluating module is wrapped
with :class:`torchpipe.Pipe`.
"""
def __init__(self, skip_layout: SkipLayout, index: int) -> None:
super().__init__()
self.skip_layout = skip_layout
self.portals: Dict[Tuple[Namespace, str], Portal] = {}
self.__index = index
@property
def index(self) -> int:
return self.__index
def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None:
"""Saves the stashed skip tensor in a portal. The portal is then
connected to the given micro-batch with :class:`Join`.
"""
if not self.skip_layout.requires_copy(ns, name):
super().save(batch, ns, name, tensor)
return
# See [Tensor Life of Portal] at Portal.put_tensor() to understand the
# below tensor_life values. Here are the selected events which retrieve
# the tensor in portal:
#
# 1. [x] blue()
# ...
# 6. [x] PortalOrange.forward
# ...
# 8. [x] PortalOrange.forward (recomputed)
# ...
# 11. [x] blue() (recomputed)
#
if (ns, name) not in self.portals:
if is_checkpointing():
# Under checkpointing, the tensor used by the first
# PortalOrange should be alive in the portal. This tensor will
# be used again by the second PortalOrange during the
# recomputation.
tensor_life = 3 # Delete at [8. PortalOrange.forward (recomputed)]
else:
tensor_life = 2 # Delete at [6. PortalOrange.forward]
assert batch.index == self.index
portal = Portal(tensor, tensor_life, batch.index)
portal.ns_name = (ns, name)
self.portals[(ns, name)] = portal
else:
# Under recomputation, the portal already exists.
portal = self.portals[(ns, name)]
# The existing tensor life already became 0. It should be reset as
# 1 to delete the tensor after the second PortalBlue immediately.
tensor_life = 1 # Delete at [11. blue() (recomputed)]
portal.put_tensor(tensor, tensor_life)
phony = portal.blue()
batch[0] = join(batch[0], phony)
def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]:
"""Loads a skip tensor from the corresponding portal to pop. The given
micro-batch is connected to the portal with :class:`Fork`.
"""
if not self.skip_layout.requires_copy(ns, name):
tensor = super().load(batch, ns, name)
return tensor
portal = self.portals[(ns, name)]
batch[0], phony = fork(batch[0])
tensor = portal.orange(phony)
return tensor
def copy(
self,
batch: Batch,
prev_stream: AbstractStream,
next_stream: AbstractStream,
ns: Namespace,
name: str,
) -> None:
"""Copies the skip tensor in the corresponding portal. The given
micro-batch and the portal will be tied with :class:`Fork` and
:class:`Join`.
"""
assert self.skip_layout.requires_copy(ns, name)
batch[0], phony = fork(batch[0])
portal = self.portals[(ns, name)]
phony = portal.copy(prev_stream, next_stream, phony)
batch[0] = join(batch[0], phony)
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.skip_tracker: Optional[SkipTracker] = None
thread_local = ThreadLocal()
@contextmanager
def use_skip_tracker(skip_tracker: SkipTracker) -> Generator[None, None, None]:
"""Registers the given skip tracker on the current thread within a
context::
with use_skip_tracker(my_skip_tracker):
...
"""
orig = thread_local.skip_tracker
thread_local.skip_tracker = skip_tracker
try:
yield
finally:
thread_local.skip_tracker = orig
def current_skip_tracker() -> SkipTracker:
"""Gets the skip tracker on the current thread."""
skip_tracker = thread_local.skip_tracker
if skip_tracker is None:
skip_tracker = SkipTracker()
thread_local.skip_tracker = skip_tracker
return skip_tracker
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides isolated namespace of skip tensors."""
import abc
from functools import total_ordering
from typing import Any
import uuid
__all__ = ["Namespace"]
@total_ordering
class Namespace(metaclass=abc.ABCMeta):
"""Namespace for isolating skip tensors used by :meth:`isolate()
<torchpipe.skip.skippable.Skippable.isolate>`.
"""
__slots__ = ("id",)
def __init__(self) -> None:
self.id = uuid.uuid4()
def __repr__(self) -> str:
return f"<Namespace '{self.id}'>"
def __hash__(self) -> int:
return hash(self.id)
# Namespaces should support ordering, since SkipLayout will sort tuples
# including a namespace. But actual order between namespaces is not
# important. That's why they are ordered by version 4 UUID which generates
# random numbers.
def __lt__(self, other: Any) -> bool:
if isinstance(other, Namespace):
return self.id < other.id
return False
def __eq__(self, other: Any) -> bool:
if isinstance(other, Namespace):
return self.id == other.id
return False
# 'None' is the default namespace,
# which means that 'isinstance(None, Namespace)' is 'True'.
Namespace.register(type(None))
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The user interface to define skip connections."""
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
FrozenSet,
Generator,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from torch import Tensor, nn
from ..microbatch import Batch
from .namespace import Namespace
from .tracker import current_skip_tracker
__all__ = ["skippable", "stash", "pop", "verify_skippables"]
Tensors = Tuple[Tensor, ...]
TensorOrTensors = Union[Tensor, Tensors]
StashPop = Union["stash", "pop"]
StashPopGenerator = Generator[StashPop, Optional[Tensor], TensorOrTensors]
if TYPE_CHECKING:
SkippableModule = nn.Module[Union[StashPopGenerator, TensorOrTensors]]
else:
SkippableModule = nn.Module
T = TypeVar("T", bound="Skippable")
class Skippable(nn.Module):
"""The base class for skippable modules.
Do not use this class directly. Define a subclass by :func:`skippable`
instead.
"""
module_cls: ClassVar[Type[SkippableModule]]
stashable_names: ClassVar[FrozenSet[str]]
poppable_names: ClassVar[FrozenSet[str]]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__()
self.module = self.module_cls(*args, **kwargs) # type: ignore
self.namespaces: Dict[str, Namespace] = {}
def __repr__(self) -> str:
return f"@skippable({self.module})"
def namespaced(self, name: str) -> Tuple[Namespace, str]:
"""Prepends namespace for the given skip name."""
ns = self.namespaces.get(name)
ns = cast(Namespace, ns)
return (ns, name)
def stashable(self) -> Iterable[Tuple[Namespace, str]]:
"""Iterates over namespaced skip names to be stashed."""
for name in self.stashable_names:
yield self.namespaced(name)
def poppable(self) -> Iterable[Tuple[Namespace, str]]:
"""Iterates over namespaced skip names to be popped."""
for name in self.poppable_names:
yield self.namespaced(name)
def isolate(self: T, ns: Namespace, *, only: Optional[Iterable[str]] = None) -> T:
r"""Isolates a specified subset or the whole set of skip tensors into a
namespace. In a single sequential module, skip tensors with the same
name are not allowed unless they are isolated by different namespaces.
Here's an example using the same name for skip tensors twice. Each pair
of ``Layer1`` and ``Layer2`` is isolated with its own namespace ``ns1``
and ``ns2``. There is no conflict anymore::
ns1 = Namespace()
ns2 = Namespace()
model = nn.Sequential(
Layer1().isolate(ns1),
Layer1().isolate(ns2),
Layer2(),
Layer3().isolate(ns2),
Layer3().isolate(ns1),
)
When `only` parameter is omitted, all skip tensors are isolated. You
can isolate a subset of skip tensors by passing `only` parameter::
ns_alice = Namespace()
ns_bob = Namespace()
model = nn.Sequential(
...
StashStashPop().isolate(ns_alice, only=['alice']) \
.isolate(ns_bob, only=['bob']),
...
)
Args:
ns (Namespace):
namespace for isolation
Keyword Args:
only (iterable of strs):
names of specific skip tensors to be isolated (omit this option
to isolate all skip tensors declared in this module)
Returns:
this module itself
"""
names: Iterable[str]
if only is None:
names = self.stashable_names | self.poppable_names
else:
names = set(only)
for name in names:
self.namespaces[name] = ns
return self
def dispatch(
self,
input: TensorOrTensors,
handle_stash: Callable[[str, Optional[Tensor]], None],
handle_pop: Callable[[str], Optional[Tensor]],
) -> TensorOrTensors:
"""Dispatches :class:`stash` or :class:`pop` commands generated by the
module's ``forward()``.
"""
generator = self.module(input)
if not isinstance(generator, Generator):
# The underlying module returned output without any yield.
output = generator
return output
try:
op = next(generator)
while True:
if isinstance(op, stash):
handle_stash(op.name, op.tensor)
op = next(generator)
continue
if isinstance(op, pop):
tensor = handle_pop(op.name)
op = generator.send(tensor)
continue
raise TypeError("%r is not a command from @skippable" % op)
except StopIteration as stop:
output = stop.args[0]
return output
def forward(self, input: TensorOrTensors) -> TensorOrTensors: # type: ignore
"""Performs the forward propagation. :class:`stash` or :class:`pop`
commands will be handled by portals silently. The portals won't be
exposed to users.
Raises:
RuntimeError:
illegal 'stash' or 'pop' is found.
"""
skip_tracker = current_skip_tracker()
stashed_tensors: Dict[str, Optional[Tensor]] = {}
# Load skip tensors that might be popped.
poppable_tensors = {}
batch = Batch(input, skip_tracker.index)
for ns, name in self.poppable():
try:
poppable_tensors[name] = skip_tracker.load(batch, ns, name)
except KeyError:
raise RuntimeError(f"'{name}' has not been stashed")
input = batch.tensor_or_tensors
# Handle skip commands.
def handle_stash(name: str, tensor: Optional[Tensor]) -> None:
if name not in self.stashable_names:
raise RuntimeError(f"'{name}' has not been declared as stashable")
stashed_tensors[name] = tensor
def handle_pop(name: str) -> Optional[Tensor]:
if name not in self.poppable_names:
raise RuntimeError(f"'{name}' has not been declared as poppable")
return poppable_tensors.pop(name)
output = self.dispatch(input, handle_stash, handle_pop)
# All declared skips must be stashed or popped.
not_stashed = self.stashable_names - stashed_tensors.keys()
if not_stashed:
comma_names = ", ".join("'%s'" % n for n in not_stashed)
raise RuntimeError(f"{comma_names} must be stashed but have not")
not_popped = poppable_tensors.keys()
if not_popped:
comma_names = ", ".join("'%s'" % n for n in not_popped)
raise RuntimeError(f"{comma_names} must be popped but have not")
# Save stashed skip tensors.
batch = Batch(output, skip_tracker.index)
for ns, name in self.stashable():
tensor = stashed_tensors[name]
skip_tracker.save(batch, ns, name, tensor)
output = batch.tensor_or_tensors
return output
# TODO(sublee): Move to above of Skippable class for better read flow.
def skippable(
stash: Iterable[str] = (),
pop: Iterable[str] = (),
) -> Callable[[Type[SkippableModule]], Type[Skippable]]:
"""The decorator to define a :class:`nn.Module <torch.nn.Module>` with skip
connections. Decorated modules are called "skippable". This functionality
works perfectly fine even when the module is not wrapped by
:class:`~torchpipe.Pipe`.
Each skip tensor is managed by its name. Before manipulating skip tensors,
a skippable module must statically declare the names for skip tensors by
`stash` and/or `pop` parameters. Skip tensors with pre-declared name can be
stashed by ``yield stash(name, tensor)`` or popped by ``tensor = yield
pop(name)``.
Here is an example with three layers. A skip tensor named "1to3" is stashed
and popped at the first and last layer, respectively::
@skippable(stash=['1to3'])
class Layer1(nn.Module):
def forward(self, input):
yield stash('1to3', input)
return f1(input)
class Layer2(nn.Module):
def forward(self, input):
return f2(input)
@skippable(pop=['1to3'])
class Layer3(nn.Module):
def forward(self, input):
skip_1to3 = yield pop('1to3')
return f3(input) + skip_1to3
model = nn.Sequential(Layer1(), Layer2(), Layer3())
One skippable module can stash or pop multiple skip tensors::
@skippable(stash=['alice', 'bob'], pop=['carol'])
class StashStashPop(nn.Module):
def forward(self, input):
yield stash('alice', f_alice(input))
yield stash('bob', f_bob(input))
carol = yield pop('carol')
return input + carol
Every skip tensor must be associated with exactly one pair of `stash` and
`pop`. :class:`~torchpipe.Pipe` checks this restriction automatically
when wrapping a module. You can also check the restriction by
:func:`~torchpipe.skip.verify_skippables` without
:class:`~torchpipe.Pipe`.
.. note::
:func:`@skippable <skippable>` changes the type of the wrapped class.
But currently (mypy v0.740), mypy could not understand class decorators
yet (`#3135 <https://github.com/python/mypy/issues/3135>`_).
There are two workarounds:
1. Naively ignore type errors by ``# type: ignore``.
2. Use ``skippable()()`` as a function instead of a decorator.
.. seealso:: :ref:`Long Skip Connections`
"""
stashable_names = frozenset(stash)
poppable_names = frozenset(pop)
def extend_skippable(module_cls: Type[SkippableModule]) -> Type[Skippable]:
name = module_cls.__name__
bases = (Skippable,)
attrs = {"module_cls": module_cls, "stashable_names": stashable_names, "poppable_names": poppable_names}
return type(name, bases, attrs)
return extend_skippable
class stash:
"""The command to stash a skip tensor.
::
def forward(self, input):
yield stash('name', input)
return f(input)
Args:
name (str): name of skip tensor
input (torch.Tensor or None): tensor to pass to the skip connection
"""
__slots__ = ("name", "tensor")
def __init__(self, name: str, tensor: Optional[Tensor]) -> None:
self.name = name
self.tensor = tensor
class pop:
"""The command to pop a skip tensor.
::
def forward(self, input):
skip = yield pop('name')
return f(input) + skip
Args:
name (str): name of skip tensor
Returns:
the skip tensor previously stashed by another layer under the same name
"""
__slots__ = ("name",)
def __init__(self, name: str) -> None:
self.name = name
def verify_skippables(module: nn.Sequential) -> None:
"""Verifies if the underlying skippable modules satisfy integrity.
Every skip tensor must have only one pair of `stash` and `pop`. If there
are one or more unmatched pairs, it will raise :exc:`TypeError` with the
detailed messages.
Here are a few failure cases. :func:`verify_skippables` will report failure
for these cases::
# Layer1 stashes "1to3".
# Layer3 pops "1to3".
nn.Sequential(Layer1(), Layer2())
# └──── ?
nn.Sequential(Layer2(), Layer3())
# ? ────┘
nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3())
# └───────────────────┘ ^^^^^^
nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3())
# ^^^^^^ └───────────────────┘
To use the same name for multiple skip tensors, they must be isolated by
different namespaces. See :meth:`isolate()
<torchpipe.skip.skippable.Skippable.isolate>`.
Raises:
TypeError:
one or more pairs of `stash` and `pop` are not matched.
"""
stashed: Set[Tuple[Namespace, str]] = set()
popped: Set[Tuple[Namespace, str]] = set()
msgs: List[str] = []
for layer_name, layer in module.named_children():
if not isinstance(layer, Skippable):
continue
for name in layer.stashable_names & layer.poppable_names:
msg = f"'{layer_name}' declared '{name}' both as stashable and as poppable"
msgs.append(msg)
for ns, name in layer.stashable():
if name in layer.poppable_names:
continue
if (ns, name) in stashed:
msg = f"'{layer_name}' redeclared '{name}' as stashable " "but not isolated by namespace"
msgs.append(msg)
continue
stashed.add((ns, name))
for ns, name in layer.poppable():
if name in layer.stashable_names:
continue
if (ns, name) in popped:
msg = f"'{layer_name}' redeclared '{name}' as poppable " "but not isolated by namespace"
msgs.append(msg)
continue
if (ns, name) not in stashed:
msg = f"'{layer_name}' declared '{name}' as poppable but it was not stashed"
msgs.append(msg)
continue
popped.add((ns, name))
for (_, name) in stashed - popped:
msg = f"no module declared '{name}' as poppable but stashed"
msgs.append(msg)
if msgs:
raise TypeError(
"one or more pairs of stash and pop do not match:\n\n%s" "" % "\n".join("* %s" % x for x in msgs)
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
from dataclasses import dataclass
import functools
import threading
from typing import Any, Dict, Generator, Optional, Tuple
import weakref
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.checkpoint as torch_checkpoint
from fairscale.internal.containers import pack_kwargs, split_non_tensors, unpack_kwargs, unpack_non_tensors
from .checkpoint_utils import patch_batchnorm
# https://docs.python.org/3/library/threading.html#thread-local-data
# Manage the checkpoint context with thread-local data.
@dataclass
class ThreadLocalCheckpointingState(threading.local):
is_checkpointing: bool = False
is_recomputing: bool = False
is_checkpointing_disabled: bool = False
thread_local = ThreadLocalCheckpointingState()
@contextmanager
def disable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing_disabled` return :data:`True` within a context."""
orig = thread_local.is_checkpointing_disabled
thread_local.is_checkpointing_disabled = True
try:
yield
finally:
thread_local.is_checkpointing_disabled = orig
@contextmanager
def enable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing` return :data:`True` within a context."""
orig = thread_local.is_checkpointing
thread_local.is_checkpointing = True
try:
yield
finally:
thread_local.is_checkpointing = orig
@contextmanager
def enable_recomputing() -> Generator[None, None, None]:
"""Makes :func:`is_recomputing` return :data:`True` within a context."""
orig = thread_local.is_recomputing
thread_local.is_recomputing = True
try:
yield
finally:
thread_local.is_recomputing = orig
def is_checkpointing() -> bool:
"""Whether the current forward propagation is under checkpointing.
Returns:
bool: :data:`True` if it's under checkpointing.
"""
return thread_local.is_checkpointing
def is_recomputing() -> bool:
"""Whether the current forward propagation is under checkpoint
recomputation. Use this to prevent duplicated side-effects at forward
propagation::
class Counter(nn.Module):
def __init__(self):
super().__init__()
self.counter = 0
def forward(self, input):
if not is_recomputing():
self.counter += 1
return input
Returns:
bool: :data:`True` if it's under checkpoint recomputation.
"""
return thread_local.is_recomputing
def checkpoint_wrapper(
module: nn.Module,
offload_to_cpu: bool = False,
) -> nn.Module:
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
- supports offloading activations to CPU
Usage::
checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
To understand the benefits of checkpointing and the `offload_to_cpu` flag,
let's divide activations into 2 types: inner activations and outer
activations w.r.t. the checkpointed modules. The inner ones are saved
by activation checkpointing, the outer ones are saved by offload_to_cpu.
In terms of GPU memory savings:
- When inner ones are large in size and outer ones are small,
checkpointing helps a lot, offload_to_cpu may help a little.
- When inner ones are small and outer ones are large,
checkpointing helps little, offload_to_cpu helps a lot.
- When both inner and outer are large, both help and the
benefit is additive.
..Note::
The first and last layers are not likely to benefit from the `offload_to_cpu` flag
because (1) there are typically other references to the first layer's input, so
the GPU memory won't be freed; (2) the input to the last layer is immediately
used by the backward pass and won't result in memory savings.
Args:
module (nn.Module):
The module to be wrapped
offload_to_cpu (bool):
Whether to offload activations to CPU.
Returns:
(nn.Module):
Wrapped module
"""
# Patch the batchnorm layers in case there are any in this module.
patch_batchnorm(module)
# The use of weakref here is to prevent creating a ref cycle: m -> m.forward -> m.
# When such cycle exists, gc won't collect the module when the module is freed.
# That causes GPU memory to be leaked. See the unit test for how we catch that.
#
# We prefer this over a class wrapper since the class wrapper would have to
# proxy a lot of fields and methods.
module.forward = functools.partial( # type: ignore
_checkpointed_forward, type(module).forward, weakref.ref(module), offload_to_cpu
)
return module
def _checkpointed_forward(
original_forward: Any, weak_self: Any, offload_to_cpu: bool, *args: Any, **kwargs: Any
) -> Any:
module = weak_self()
# If gradients are disabled, just use original `.forward()` method directly.
if not torch.is_grad_enabled() or thread_local.is_checkpointing_disabled:
return original_forward(module, *args, **kwargs)
# Autograd Functions in PyTorch work best with positional args, since
# the backward must return gradients (or None) for every input argument.
# We can flatten keyword arguments to make this easier.
args = (module,) + args
kwarg_keys, flat_args = pack_kwargs(*args, **kwargs)
parent_ctx_dict: Dict[str, Any] = {
"offload": offload_to_cpu,
}
# Dummy tensor with grad is used to ensure the backward pass is called. This is needed
# when original_forward's input are non-tensor (i.e. a tuple). Using this dummy tensor
# avoids requiring users to set their input tensors's requires_grad flag. In the case
# of tuple type inputs, setting the flag won't even trigger the backward pass.
#
# One implication of this is that since we always feed in a dummy tensor
# needing grad, then the output will always require grad, even if it originally
# wouldn't, such as if the module and original input both do not require grad.
# We get around this by saving the desired requires_grad value in output and
# detaching the output if needed.
output = CheckpointFunction.apply(
torch.tensor([], requires_grad=True), original_forward, parent_ctx_dict, kwarg_keys, *flat_args
)
output_requires_grad = parent_ctx_dict["output_requires_grad"]
if not isinstance(output, torch.Tensor):
# If output should not require grad, then detach it, since otherwise it will
# always have requires_grad = True due to our dummy tensor input above that
# requires_grad
output = [x.detach() if not output_requires_grad else x for x in output]
packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"]
if packed_non_tensor_outputs:
output = unpack_non_tensors(output, packed_non_tensor_outputs)
else:
# If output should not require grad, then detach it, since otherwise it will
# always have requires_grad = True due to our dummy tensor input above that
# requires_grad
if not output_requires_grad:
output = output.detach()
return output
def get_rng_state() -> Dict[str, Any]:
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state: Dict[str, Any]) -> None:
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
def is_autocast_enabled() -> bool:
"""Similar to torch.is_autocast_enabled, but compatible with torch 1.5.1"""
if hasattr(torch, "is_autocast_enabled"):
return torch.is_autocast_enabled()
return False
@contextmanager
def autocast(enabled: bool) -> Generator:
"""Similar to torch.cuda.amp.autocast, but compatible with torch 1.5.1"""
if enabled:
with torch.cuda.amp.autocast(enabled):
yield
else:
yield
class CheckpointFunction(torch.autograd.Function):
"""Similar to the torch version, but support non-Tensor outputs.
The caller is expected to provide a dict (*parent_ctx_dict*) that will hold
the non-Tensor outputs. These should be combined with the Tensor *outputs*
by calling :func:`unpack_non_tensors`.
"""
@staticmethod
def forward( # type: ignore
ctx: Any,
dummy_tensor_requires_grad: torch.Tensor,
run_function: Any,
parent_ctx_dict: Dict[str, Any],
kwarg_keys: Tuple[str, ...],
*args: Any,
**kwargs: Any
) -> Any:
torch_checkpoint.check_backward_validity(args)
ctx.run_function = run_function
ctx.kwarg_keys = kwarg_keys
ctx.fwd_rng_state = get_rng_state()
ctx.had_autocast_in_fwd = is_autocast_enabled()
tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args)
if parent_ctx_dict["offload"]:
ctx.fwd_device = tuple(x.device for x in tensor_inputs)
ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs)
tensor_inputs = tuple(x.to("cpu", non_blocking=True) for x in tensor_inputs)
else:
ctx.fwd_device, ctx.grad_requirements = None, None
ctx.save_for_backward(*tensor_inputs)
ctx.packed_non_tensor_inputs = packed_non_tensor_inputs
with torch.no_grad(), enable_checkpointing():
unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args)
outputs = run_function(*unpacked_args, **unpacked_kwargs)
the_module = unpacked_args[0]
# Because we run with torch.no_grad(), we can't actually access
# outputs.requires_grad. Instead, we manually compute it by
# checking if either the input or the module needs grads
parameters = list(the_module.parameters())
# If the module is wrapped by FlattenParamsWrapper, then the
# parameters would have been deleted. If so, we need to access
# the views into the flattened parameters.
if hasattr(the_module, "_unflattened_param_views"):
parameters += the_module._unflattened_param_views
output_requires_grad = any(param.requires_grad for param in parameters) or any(
x.requires_grad for x in tensor_inputs
)
parent_ctx_dict["output_requires_grad"] = output_requires_grad
if not isinstance(outputs, torch.Tensor):
# Autograd Functions don't like non-Tensor outputs. We can split the
# non-Tensor and Tensor outputs, returning the former by reference
# through *parent_ctx_dict* and returning the latter directly.
outputs, packed_non_tensor_outputs = split_non_tensors(outputs)
parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs
return outputs
@staticmethod
def backward(ctx: Any, *args: Any) -> Tuple[Optional[Tensor], ...]:
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
tensor_inputs: Tuple = ctx.saved_tensors
tensor_inputs = torch_checkpoint.detach_variable(tensor_inputs)
if ctx.fwd_device is not None:
tensor_inputs = tuple(t.to(ctx.fwd_device[i], non_blocking=True) for i, t in enumerate(tensor_inputs))
for i, need_grad in enumerate(ctx.grad_requirements):
tensor_inputs[i].requires_grad = need_grad
inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs)
# Store the current states.
bwd_rng_state = get_rng_state()
# Set the states to what it used to be before the forward pass.
set_rng_state(ctx.fwd_rng_state)
with torch.enable_grad(), enable_recomputing(), autocast(ctx.had_autocast_in_fwd):
unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs)
outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs)
tensor_outputs, _ = split_non_tensors(outputs)
# Set the states back to what it was at the start of this function.
set_rng_state(bwd_rng_state)
# Run backward() with only Tensors that require grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(tensor_outputs)):
if tensor_outputs[i].requires_grad:
outputs_with_grad.append(tensor_outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError("None of the outputs have requires_grad=True, " "this checkpoint() is not necessary")
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs)
return (None, None, None, None) + grads
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .checkpoint_activations import checkpoint_wrapper, is_checkpointing, is_recomputing
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import Tensor, nn
from torch.nn.modules.batchnorm import _BatchNorm
def patch_batchnorm(module: nn.Module) -> List:
"""Patch all batchnorm instances (1d, 2d, 3d, sync_bn, etc.) of a module
so that they don't track running stats when torch.no_grad() is enabled.
This is important in activation checkpointing to ensure stats are tracked
correctly as if there were no activation checkpointing. The reason is
that activation checkpointing runs the forward function twice, first
with torch.no_grad(), then with torch.grad().
Args:
module (nn.Module):
The module to be patched in-place.
Returns:
(list):
A list of hook handles, late can be freed.
"""
def pre_forward(module: _BatchNorm, input: Tensor) -> None:
if torch.is_grad_enabled():
return
module._track_running_stats_backup = module.track_running_stats
module.track_running_stats = False
def post_forward(module: _BatchNorm, input: Tensor, result: Tensor) -> None:
if torch.is_grad_enabled():
return
module.track_running_stats = module._track_running_stats_backup
hooks = []
for name, child in module.named_modules():
# _BatchNorm is base for bn1d, bn2d, bn3d and sync_bn, apex_sync_bn, etc.
if isinstance(child, _BatchNorm) and not hasattr(child, "disable_patch_batchnorm"):
# Register the pre/post hooks.
pre_handle = child.register_forward_pre_hook(pre_forward)
post_handle = child.register_forward_hook(post_forward)
hooks += [pre_handle, post_handle]
return hooks
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import get_model_parallel_group, get_model_parallel_rank, get_model_parallel_world_size
from .utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target): # type: ignore
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=get_model_parallel_group())
# Subtract the maximum value.
vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_model_parallel_rank()
world_size = get_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(
predicted_logits, op=torch.distributed.ReduceOp.SUM, group=get_model_parallel_group()
)
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits.exp()
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(
sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=get_model_parallel_group()
)
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output): # type: ignore
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= 1.0 - target_mask.view(-1).float()
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
def vocab_parallel_cross_entropy(vocab_parallel_logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target)
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model and data parallel groups."""
from typing import List, Optional
import torch
from .utils import ensure_divisibility
# Model parallel group that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
# Pipeline parallel group that the current rank belongs to.
_PIPELINE_PARALLEL_GROUP = None
_PIPELINE_PARALLEL_RANKS = None
def initialize_model_parallel(
model_parallel_size_: int,
pipeline_length: int = 1,
*,
model_parallel_backend: Optional[str] = None,
pipeline_backend: Optional[str] = None,
ddp_backend: Optional[str] = None
) -> None:
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel groups as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = int(min(model_parallel_size_, world_size))
ensure_divisibility(world_size, model_parallel_size)
ensure_divisibility(world_size, model_parallel_size * pipeline_length)
rank = torch.distributed.get_rank()
data_parallel_size = int(world_size / (model_parallel_size * pipeline_length))
if torch.distributed.get_rank() == 0:
print("> initializing model parallel with size {}".format(model_parallel_size_))
print("> initializing ddp with size {}".format(data_parallel_size))
print("> initializing pipeline with size {}".format(pipeline_length))
groups = torch.LongTensor(range(world_size)).reshape(data_parallel_size, pipeline_length, model_parallel_size)
found = torch.where(groups == rank)
assert all(len(x) == 1 for x in found)
found = [x[0] for x in found]
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized"
for j in range(pipeline_length):
for k in range(model_parallel_size):
group = torch.distributed.new_group(groups[:, j, k].tolist(), backend=ddp_backend)
if j == found[1] and k == found[2]:
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
for j in range(pipeline_length):
group = torch.distributed.new_group(groups[i, j, :].tolist(), backend=model_parallel_backend)
if i == found[0] and j == found[1]:
_MODEL_PARALLEL_GROUP = group
global _PIPELINE_PARALLEL_GROUP
assert _PIPELINE_PARALLEL_GROUP is None, "model parallel group is already initialized"
global _PIPELINE_PARALLEL_RANKS
assert _PIPELINE_PARALLEL_RANKS is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
for k in range(model_parallel_size):
ranks = groups[i, :, k].tolist()
group = torch.distributed.new_group(ranks, backend=pipeline_backend)
if i == found[0] and k == found[2]:
_PIPELINE_PARALLEL_GROUP = group
_PIPELINE_PARALLEL_RANKS = ranks
def model_parallel_is_initialized() -> bool:
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None or _PIPELINE_PARALLEL_GROUP is None:
return False
return True
def get_model_parallel_group() -> torch.distributed.ProcessGroup:
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized"
return _MODEL_PARALLEL_GROUP
def get_data_parallel_group() -> torch.distributed.ProcessGroup:
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _DATA_PARALLEL_GROUP
def get_pipeline_parallel_group() -> torch.distributed.ProcessGroup:
"""Get the pipeline parallel group the caller rank belongs to."""
assert _PIPELINE_PARALLEL_GROUP is not None, "pipeline parallel group is not initialized"
return _PIPELINE_PARALLEL_GROUP
def get_pipeline_parallel_ranks() -> List[int]:
"""Get the pipeline parallel group the caller rank belongs to."""
assert _PIPELINE_PARALLEL_RANKS is not None, "pipeline parallel group is not initialized"
return _PIPELINE_PARALLEL_RANKS
def get_model_parallel_world_size() -> int:
"""Return world size for the model parallel group."""
return torch.distributed.get_world_size(group=get_model_parallel_group())
def get_model_parallel_rank() -> int:
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_model_parallel_src_rank() -> int:
"""Calculate the global rank corresponding to a local rank zero
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_data_parallel_world_size() -> int:
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank() -> int:
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
def destroy_model_parallel() -> None:
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
global _PIPELINE_PARALLEL_GROUP
_PIPELINE_PARALLEL_GROUP = None
global _PIPELINE_PARALLEL_RANKS
_PIPELINE_PARALLEL_RANKS = None
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .cross_entropy import vocab_parallel_cross_entropy
from .initialize import (
destroy_model_parallel,
get_data_parallel_group,
get_data_parallel_rank,
get_data_parallel_world_size,
get_model_parallel_group,
get_model_parallel_rank,
get_model_parallel_src_rank,
get_model_parallel_world_size,
get_pipeline_parallel_group,
get_pipeline_parallel_ranks,
initialize_model_parallel,
)
from .layers import ColumnParallelLinear, RowParallelLinear, VocabParallelEmbedding
from .mappings import copy_to_model_parallel_region, gather_from_model_parallel_region
from .random import get_cuda_rng_tracker, model_parallel_cuda_manual_seed
__all__: List[str] = []
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import contextlib
from typing import Dict, Iterator, Set, Union
import torch
from torch.cuda import _lazy_call
from torch.utils.checkpoint import detach_variable
from .initialize import get_data_parallel_rank, get_model_parallel_rank
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = "model-parallel-rng"
def _set_cuda_rng_state(new_state: torch.ByteTensor, device: Union[int, str, torch.device] = -1) -> None:
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if device == -1:
device = torch.device("cuda")
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("cuda", device)
def cb() -> None:
idx = device.index # type: ignore
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx] # type: ignore
default_generator.set_state(new_state)
_lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self) -> None:
# Map from a string name to the cuda rng state.
self.states_: Dict[str, torch.ByteTensor] = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_: Set[int] = set()
def reset(self) -> None:
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self) -> Dict[str, torch.ByteTensor]:
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states: Dict[str, torch.ByteTensor]) -> None:
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name: str, seed: int) -> None:
"""Track the rng state.
Arguments:
name (str): The name of the seed
seed (int): The seed value
"""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception("seed {} already exists".format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception("cuda rng state {} already exists".format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name: str = _MODEL_PARALLEL_RNG_TRACKER_NAME) -> Iterator[None]:
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception("cuda rng state {} is not added".format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker() -> CudaRNGStatesTracker:
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed: int) -> None:
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print(
"> initializing model parallel cuda seeds on global rank {}, "
"model parallel rank {}, and data parallel rank {} with "
"model parallel seed: {} and data parallel seed: {}".format(
torch.distributed.get_rank(),
get_model_parallel_rank(),
get_data_parallel_rank(),
model_parallel_seed,
data_parallel_seed,
),
flush=True,
)
if torch.cuda.is_available():
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed)
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
@staticmethod
def forward(ctx, run_function, *args): # type: ignore
ctx.run_function = run_function
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*args)
return outputs
@staticmethod
def backward(ctx, *args): # type: ignore
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
inputs = ctx.saved_tensors
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# Compute the forward pass.
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
return (None,) + tuple(inp.grad for inp in detached_inputs)
def checkpoint(function, *args): # type: ignore
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, *args)
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
def ensure_divisibility(numerator: int, denominator: int) -> None:
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(numerator, denominator)
def divide_and_check_no_remainder(numerator: int, denominator: int) -> int:
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_along_last_dim(
tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False
) -> Tuple[torch.Tensor, ...]:
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide_and_check_no_remainder(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class VocabUtility:
"""Split the vocabulary into `world_size` chunks amd return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indices in [first, last)"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size: int, rank: int, world_size: int
) -> Tuple[int, int]:
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) -> Tuple[int, int]:
per_partition_vocab_size = divide_and_check_no_remainder(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, world_size)
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
from typing import Callable, Optional
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from .initialize import get_model_parallel_rank, get_model_parallel_world_size
from .mappings import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
reduce_from_model_parallel_region,
scatter_to_model_parallel_region,
)
from .utils import VocabUtility, divide_and_check_no_remainder
def _initialize_affine_weight(
weight: torch.Tensor,
out_features: int,
in_features: int,
per_partition_size: int,
partition_dim: int,
init_method: Callable[[torch.Tensor], torch.Tensor],
stride: int = 1,
return_master_weight: bool = False,
) -> Optional[torch.Tensor]:
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(out_features, in_features, dtype=weight.dtype, requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide_and_check_no_remainder(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size, dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
) -> None:
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self._weight = None
# Divide the weight matrix along the vocaburaly dimension.
self.vocab_start_index, self.vocab_end_index = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, get_model_parallel_rank(), get_model_parallel_world_size()
)
self.num_embeddings_per_partition = self.vocab_end_index - self.vocab_start_index
# Allocate weights.
self.weight = Parameter(torch.Tensor(self.num_embeddings_per_partition, self.embedding_dim))
# And initialize.
_initialize_affine_weight(
self.weight, self.num_embeddings, self.embedding_dim, self.num_embeddings_per_partition, 0, init_method
)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type: ignore
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_model_parallel_region(output_parallel)
return output
class ParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the embedding dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
keep_master_weight_for_test: bool = False,
) -> None:
super(ParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = scale_grad_by_freq
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self._weight = None
# Divide the weight matrix along the embedding dimension.
world_size = get_model_parallel_world_size()
self.embedding_dim_per_partition = divide_and_check_no_remainder(self.embedding_dim, world_size)
# Allocate weights.
self.weight = Parameter(torch.Tensor(self.num_embeddings, self.embedding_dim_per_partition))
# And initialize.
_initialize_affine_weight(
self.weight,
self.num_embeddings,
self.embedding_dim,
self.embedding_dim_per_partition,
1,
init_method,
stride=1,
return_master_weight=False,
)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type: ignore
input_parallel = copy_to_model_parallel_region(input_)
output_parallel = F.embedding(
input_parallel,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
output = gather_from_model_parallel_region(output_parallel)
return output
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
in_features: first dimension of matrix A.
out_features: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
gather_output: bool = True,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
stride: int = 1,
keep_master_weight_for_test: bool = False,
) -> None:
super(ColumnParallelLinear, self).__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.output_size_per_partition = divide_and_check_no_remainder(out_features, world_size)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
self.weight = Parameter(torch.Tensor(self.output_size_per_partition, self.in_features))
if bias:
self.bias = Parameter(torch.Tensor(self.output_size_per_partition))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
# Initialize weight.
self.master_weight = _initialize_affine_weight(
self.weight,
self.out_features,
self.in_features,
self.output_size_per_partition,
0,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
)
def get_master_weight(self) -> torch.Tensor:
return gather_from_model_parallel_region(self.weight.data.transpose(0, 1)).transpose_(0, 1)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type: ignore
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight, self.bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
return output
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
Arguments:
in_features: first dimension of matrix A.
out_features: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
input_is_parallel: bool = False,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
stride: int = 1,
keep_master_weight_for_test: bool = False,
):
super(RowParallelLinear, self).__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.input_size_per_partition = divide_and_check_no_remainder(in_features, world_size)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
self.weight = Parameter(torch.Tensor(self.out_features, self.input_size_per_partition))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
# Initialize weight.
self.master_weight = _initialize_affine_weight(
self.weight,
self.out_features,
self.in_features,
self.input_size_per_partition,
1,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
)
def get_master_weight(self) -> torch.Tensor:
return gather_from_model_parallel_region(self.weight.data)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type:ignore
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight)
# All-reduce across all the partitions.
output_ = reduce_from_model_parallel_region(output_parallel)
if self.bias is not None:
output = output_ + self.bias
else:
output = output_
return output
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
def _reduce(ctx: Any, input_: torch.Tensor) -> torch.Tensor:
"""All-reduce the the input tensor across model parallel group."""
group = get_model_parallel_group()
if ctx:
ctx.mark_dirty(input_)
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=group)
return input_
def _split(input_: torch.Tensor) -> torch.Tensor:
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = torch.distributed.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = torch.distributed.get_rank(group=group)
output = input_list[rank].contiguous()
return output
def _gather(input_: torch.Tensor) -> torch.Tensor:
"""Gather tensors and concatinate along the last dimension."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def forward(ctx, input_): # type: ignore
return input_
@staticmethod
def backward(ctx, grad_output): # type: ignore
return _reduce(None, grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-redcue the input from the model parallel region."""
@staticmethod
def forward(ctx, input_): # type: ignore
return _reduce(ctx, input_)
@staticmethod
def backward(ctx, grad_output): # type: ignore
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def forward(ctx, input_): # type: ignore
return _split(input_)
@staticmethod
def backward(ctx, grad_output): # type: ignore
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatinate."""
@staticmethod
def forward(ctx, input_): # type: ignore
return _gather(input_)
@staticmethod
def backward(ctx, grad_output): # type: ignore
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _GatherFromModelParallelRegion.apply(input_)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from typing import Any, Callable, Dict, Generator, Optional, Set, Tuple, Type, cast
import torch.nn as nn
def default_auto_wrap_policy(
module: nn.Module,
recurse: bool,
unwrapped_params: int,
module_is_root: bool,
# These are customizable for this default policy function.
min_num_params: int = int(1e8),
force_leaf_modules: Optional[Set[Type[nn.Module]]] = None,
exclude_wrap_modules: Optional[Set[Type[nn.Module]]] = None,
skip_params_check_for_root: bool = False,
) -> bool:
"""Default policy function for :func:`auto_wrap`.
Return if a module should be wrapped during :func:`auto_wrap`.
The first four parameters are used by :func:`auto_wrap`. If
you write a custom version of this policy function, your version
needs to at least accept the first four parameters and free
to do whatever you want in the function.
Args:
module (nn.Module):
The module to be considered in this decision.
recurse (bool):
Indicate if this is called to make a decision on whether we
should recurse down a subgraph of the module structure.
If False, it means this function is called to make a decision
on whether we should wrap the said module.
unwrapped_params (int):
The number of parameters yet to be wrapped in this module.
module_is_root (bool):
Indicates if current module is the root.
min_num_params (int):
Customizable policy input. It controls the size threshold
on how big should a module be to be considered wrapped.
force_leaf_modules (Set[Type[nn.Module]]): set of module types to
keep as leaves, i.e., their children will never be wrapped.
exclude_wrap_modules (Set[Type[nn.Module]]):
Customizable set of module types to be excluded in wrapping.
skip_params_check_for_root (bool):
If module_is_root is True, then this includes the root in
wrapping regardless of their number of unwrapped params.
"""
force_leaf_modules = (
default_auto_wrap_policy.FORCE_LEAF_MODULES # type: ignore
if force_leaf_modules is None
else force_leaf_modules
)
exclude_wrap_modules = (
default_auto_wrap_policy.EXCLUDE_WRAP_MODULES # type: ignore
if exclude_wrap_modules is None
else exclude_wrap_modules
)
is_large = unwrapped_params >= min_num_params
if recurse:
# We should recurse if the module is big enough but not in force_leaf_modules list.
return is_large and not isinstance(module, tuple(force_leaf_modules))
else:
# If we are not recursing, determine if we should wrap.
return ((module_is_root and skip_params_check_for_root) or is_large) and not isinstance(
module, tuple(exclude_wrap_modules)
)
# Set those defaults to the default_auto_wrap_policy function. Make them easy to be imported.
default_auto_wrap_policy.EXCLUDE_WRAP_MODULES = {nn.ModuleList, nn.ModuleDict} # type: ignore
default_auto_wrap_policy.FORCE_LEAF_MODULES = {nn.MultiheadAttention} # type: ignore
def config_auto_wrap_policy(
module: nn.Module,
recurse: bool,
unwrapped_params: int,
module_is_root: bool,
) -> bool:
"""Config based policy function for :func:`auto_wrap`.
Return true for a module to be wrapped if it is already tagged with
a ``wrapper_config`` attribute.
Args:
module (nn.Module):
The module to be considered in this decision.
recurse (bool):
Indicate if this is called to make a decision on whether we
should recurse down a subgraph of the module structure.
If False, it means this function is called to make a decision
on whether we should wrap the said module.
unwrapped_params (int):
The number of parameters yet to be wrapped in this module.
Unused by this function.
module_is_root (bool):
Indicates if current module is the root.
Unused by this function.
"""
if recurse:
# We should always recurse.
return True
else:
# If we are not recursing, determine if we should wrap.
return hasattr(module, "wrapper_config")
@contextlib.contextmanager
def enable_wrap(auto_wrap_policy: Optional[Callable] = None, **wrapper_kwargs: Any) -> Generator[None, None, None]:
"""
Context manager to wrap modules using a wrapper.
Useful for when you'd like to apply the same parameters to all child modules
that you wrap. A particularly important use case is wrapping large layers so
that they get sharded (in-place) during initialization, to avoid running out of
system memory. Large layers can indicate that they should be sharded via
the ``wrap`` annotation and this context manager can provide the
exact configuration for these nested instances.
Usage::
with enable_wrap(**params):
# Wraps layer in FSDP by default if within context
self.l1 = wrap(torch.nn.Linear(5, 5))
self.l2 = auto_wrap(
TransformerBlock(),
# Wraps children modules based on a different min_num_params
auto_wrap_policy=functools.partial(default_auto_wrap_policy, min_num_params=1e7)
)
Args:
auto_wrap_policy (Callable, Optional):
Custom function to control how to do :func:`auto_wrap`. This is
useful to exclude unsupported modules or wrap based on sizes when
wrapping recursively. Note: modules annotated with :func:`wrap`
ignore this policy and will always be wrapped.
(default: :func:`default_auto_wrap_policy`)
**wrapper_kwargs:
Configuration settings that will be passed to all ``wrap``
instances inside the context
"""
with ConfigAutoWrap(auto_wrap_policy, **wrapper_kwargs):
yield
def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module:
"""
Annotate that a module should be wrapped. Annotated modules will only be
wrapped if inside of an :func:`enable_wrap` context manager. This allows
a module to be initialized both with and without a wrapper without code
change.
Both wrapper_cls and wrapper_config can be taken from 3 sources with
increasing priority:
1. ConfigAutoWrap's context
2. module.wrapper_config
3. wrap_overrides argument of this function
Usage::
with enable_wrap(wrapper_cls=FSDP, **fsdp_config):
# Wraps layer in FSDP by default if within context
self.l1 = wrap(torch.nn.Linear(5, 5))
Args:
module (nn.Module): module to wrap (if in :func:`enable_wrap` context)
**wrap_overrides: configuration overrides that will take priority over
the values provided by the :func:`enable_wrap` context
"""
if ConfigAutoWrap.in_autowrap_context:
module_overrides = {}
if hasattr(module, "wrapper_config"):
module_overrides = module.wrapper_config
assert isinstance(module_overrides, dict)
wrap_overrides = {**ConfigAutoWrap.kwargs, **module_overrides, **wrap_overrides}
assert ConfigAutoWrap.wrapper_cls is not None
if ConfigAutoWrap.move_module_cuda_half:
module = module.cuda().half()
return ConfigAutoWrap.wrapper_cls(module, **wrap_overrides)
return module
def auto_wrap(module: nn.Module, auto_wrap_policy: Optional[Callable] = None, **kwargs: Any) -> nn.Module:
"""
Annotate that a module should be wrapped with the *wrapper_cls* from the
:func:`enable_wrap` context (if the context exists) and recursively wrap
children modules that meet the criteria given by :func:`auto_wrap_policy`. This
is useful for wrapping large complex layers.
.. note:: auto_wrap can only be applied to a module once because it
assumes none of the sub-modules is already wrapped and uses that
assumption to compute the wrapped vs. unwrapped parameters.
To get around this limitation, users can pre-assign ``wrapper_config``
attributes to the sub-modules they want to wrap (in multiple passes)
and then uses the ``config_auto_wrap_policy``.
.. warning:: It is not recommended to use :func:`auto_wrap` with
:class:`FullyShardedDataParallel` on modules that have shared
parameters, as the parameter sharing may be broken (i.e. end up not
shared) if the shared parameters are not (auto-)wrapped under the same
FSDP wrapper instance.
Usage::
with enable_wrap(**params):
# Wraps children modules.
self.l1 = auto_wrap(TransformerBlock())
Args:
module (nn.Module):
module to wrap (if in :func:`enable_wrap` context)
auto_wrap_policy (Callable):
a function to determine should Module to be wrapped.
(default: wrap if > 100M parameters)
"""
if ConfigAutoWrap.in_autowrap_context:
wrapped_module, remainder = ConfigAutoWrap.recursive_wrap(
module, auto_wrap_policy=auto_wrap_policy, module_is_root=True, **kwargs
)
return wrapped_module
return module
class ConfigAutoWrap:
"""
Helper class to wrap modules based on default config args via a context manager.
See :func:`enable_wrap` for more information.
"""
in_autowrap_context: bool = False # Context flag
move_module_cuda_half: bool = False # A flag to control the wrap() function.
wrapper_cls: Optional[Callable] = None # The wrapper class
kwargs: Dict[str, Any] = {} # Wrapper's args
auto_wrap_policy: Optional[Callable] = None # Used only in auto_wrap
def __init__(self, auto_wrap_policy: Optional[Callable] = None, **kwargs: Dict[str, Any]):
self.auto_wrap_policy = auto_wrap_policy
self.kwargs = kwargs
@staticmethod
def enable_autowrap_context(auto_wrap_policy: Optional[Callable], kwargs: Any) -> None:
if ConfigAutoWrap.in_autowrap_context:
raise NotImplementedError(
"You are already within an autowrap context and we currently do not supported nested autowrap."
)
ConfigAutoWrap.in_autowrap_context = True
# Get and save the wrapper cls for the context.
if "move_module_cuda_half" in kwargs.keys():
ConfigAutoWrap.move_module_cuda_half = cast(bool, kwargs["move_module_cuda_half"])
del kwargs["move_module_cuda_half"]
assert "wrapper_cls" in kwargs.keys()
ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs["wrapper_cls"])
del kwargs["wrapper_cls"]
# Save the rest.
ConfigAutoWrap.auto_wrap_policy = default_auto_wrap_policy if auto_wrap_policy is None else auto_wrap_policy
ConfigAutoWrap.kwargs = kwargs
@staticmethod
def disable_autowrap_context() -> None:
ConfigAutoWrap.in_autowrap_context = False
ConfigAutoWrap.move_module_cuda_half = False
ConfigAutoWrap.wrapper_cls = None
ConfigAutoWrap.kwargs = {}
ConfigAutoWrap.auto_wrap_policy = None
def __enter__(self) -> None:
self.enable_autowrap_context(self.auto_wrap_policy, self.kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.disable_autowrap_context()
@staticmethod
def recursive_wrap(
module: nn.Module, auto_wrap_policy: Optional[Callable], module_is_root: bool, **kwargs: Any
) -> Tuple[nn.Module, int]:
"""
Automatically wrap child modules of *module* that meet the given
criteria with :func:`auto_wrap`.
Args:
module (nn.Module):
module to recursively wrap
auto_wrap_policy (Callable, Optional):
optionally, override the :func:`auto_wrap_policy` from the context.
Returns:
(nn.Module, int):
Wrapped module and the number parameters wrapped recursively.
"""
if auto_wrap_policy is None:
auto_wrap_policy = ConfigAutoWrap.auto_wrap_policy
# Make sure no child is not already wrapped.
for _, child in module.named_modules():
assert not isinstance(child, cast(type, ConfigAutoWrap.wrapper_cls))
# We count all params, assuming none of them is already wrapped.
num_params = sum([p.numel() for p in module.parameters()])
assert auto_wrap_policy is not None
if auto_wrap_policy(module=module, recurse=True, unwrapped_params=num_params, module_is_root=module_is_root):
total_wrapped_params = 0
# Iterate through the children, recursively wrap if necessary
for name, child in module.named_children():
wrapped_child, num_wrapped_params = ConfigAutoWrap.recursive_wrap(
module=child, auto_wrap_policy=auto_wrap_policy, module_is_root=False, **kwargs
)
setattr(module, name, wrapped_child)
# Keep track of how many parameters have been wrapped
total_wrapped_params += num_wrapped_params
# decide if we need to wrap the current module,
# since the left over parameters exceed the number of params to wrap
remainder = num_params - total_wrapped_params
if auto_wrap_policy(
module=module, recurse=False, unwrapped_params=remainder, module_is_root=module_is_root
):
# Leaf node or final wrapping of the remainder both happen here.
return wrap(module, **kwargs), num_params
else:
return module, total_wrapped_params
return module, 0
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .auto_wrap import auto_wrap, config_auto_wrap_policy, default_auto_wrap_policy, enable_wrap, wrap
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .moe_layer import MOELayer
from .top2gate import Top2Gate
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf
# Code is inspired by Top2GatingOnLogits from lingvo:
# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477
from typing import Callable, Dict, Tuple
import torch
from torch import Tensor
import torch.nn.functional as F
gumbel_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
def one_hot(tensor: torch.Tensor, num_classes: int) -> Tensor:
"""Workaround for https://github.com/pytorch/pytorch/issues/55579"""
assert num_classes > 0, "num_classes must be a positive integer"
ret = torch.zeros(tensor.shape + (num_classes,), device=tensor.device, dtype=tensor.dtype)
ret.scatter_(-1, tensor.unsqueeze(-1), 1)
return ret
def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
# NOTE(msb) softmax requires FP32: https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/
gates = F.softmax(logits, dim=1, dtype=torch.float)
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# capacity = 2S/E
capacity = 2 * num_tokens // num_experts
assert num_tokens % num_experts == 0
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = one_hot(indices1_s, num_classes=num_experts)
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1)
mask2 = one_hot(indices2_s, num_classes=num_experts)
# Compute locations in capacity buffer
locations1 = torch.cumsum(mask1, dim=0) - 1
locations2 = torch.cumsum(mask2, dim=0) - 1
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.mean(me * ce)
# Remove locations outside capacity from mask
mask1 *= torch.lt(locations1, capacity)
mask2 *= torch.lt(locations2, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Normalize gate probabilities
gates1_s = (gates * mask1).sum(dim=1) # einsum("se,se->s")
gates2_s = (gates * mask2).sum(dim=1) # einsum("se,se->s")
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1 # einsum("s,se->se")
gates2 = gates2_s.unsqueeze(-1) * mask2 # einsum("s,se->se")
locations1_sc = one_hot(locations1_s, num_classes=capacity)
locations2_sc = one_hot(locations2_s, num_classes=capacity)
combine1_sec = gates1.unsqueeze(2) * locations1_sc.unsqueeze(1) # einsum("se,sc->sec")
combine2_sec = gates2.unsqueeze(2) * locations2_sc.unsqueeze(1) # einsum("se,sc->sec")
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
return l_aux.to(logits.dtype), combine_weights.to(logits.dtype), dispatch_mask
class Top2Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
) -> None:
super().__init__()
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore
logits = self.wg(input)
return top2gating(logits)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Any, Optional, Tuple, Union, cast
import torch
from torch import Tensor
import torch.distributed as dist
from torch.nn import Module, ModuleList
if TYPE_CHECKING:
Base = Module[Tensor]
else:
Base = Module
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
dist.all_to_all_single(output, input, group=group)
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. _Gshard: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate: gate network
expert: expert network
group: group to use for all-to-all communication
"""
def __init__(self, gate: Module, experts: Union[Module, ModuleList], group: Optional[Any] = None) -> None:
super().__init__()
self.gate = gate
if type(experts) == ModuleList:
self.experts = cast(ModuleList, experts)
else:
self.experts = ModuleList([experts])
self.group = group if group is not None else dist.group.WORLD
for expert in self.experts:
for p in experts.parameters():
p.expert = True # type: ignore
self.world_size = dist.get_world_size(self.group)
self.num_local_experts = len(self.experts)
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
assert len(input) == 1, "only single input Tensor supported"
assert len(input[0].shape) == 3, "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel"
assert input[0].shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts"
# Implement Algorithm 2 from GShard paper.
d_model = input[0].shape[2]
# Reshape into S tokens by dropping sequence dimension.
reshaped_input = input[0].reshape(-1, d_model)
self.l_aux, combine_weights, dispatch_mask = self.gate(reshaped_input)
dispatched_input = torch.einsum("sec,sm->ecm", dispatch_mask.float(), reshaped_input)
dispatched_input = _AllToAll.apply(self.group, dispatched_input)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(self.world_size, self.num_local_experts, -1, d_model)
chunks = dispatched_input.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.experts):
expert_outputs += [expert(chunk)]
expert_output = torch.cat(expert_outputs, dim=1)
expert_output = _AllToAll.apply(self.group, expert_output)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(self.world_size * self.num_local_experts, -1, d_model)
combined_output = torch.einsum("sec,ecm->sm", combine_weights, expert_output)
return combined_output.reshape(input[0].shape)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple
import torch
from torch import Tensor
import torch.distributed as dist
if TYPE_CHECKING:
from torch.distributed import ProcessGroup
# TODO: Remove the toggle-enable_nccl_base_collectives when github open issue #801 is resolved.
if os.getenv("ENABLE_NCCL_BASE_COLLECTIVES", "1") == "0":
enable_nccl_base_collectives = False
else:
enable_nccl_base_collectives = True
class Bucket:
def __init__(self, data: Tensor, group: "ProcessGroup"):
self.data = data
self.group = group
self.offset = 0
self.callbacks: List[Callable] = []
self.output_shard = torch.zeros_like(data[0])
def flush(self) -> None:
"""Flush content of the bucket."""
if self.offset == 0:
assert len(self.callbacks) == 0
return
# reduce-scatter bucket
if hasattr(dist, "_reduce_scatter_base") and enable_nccl_base_collectives:
dist._reduce_scatter_base(
self.output_shard[: self.offset], self.data[:, : self.offset].contiguous(), group=self.group
)
else:
dist.reduce_scatter(
self.output_shard[: self.offset], list(self.data[:, : self.offset].unbind(0)), group=self.group
)
# execute post-reduction callbacks
for callback_fn in self.callbacks:
callback_fn()
# reuse input bucket but allocate a fresh output shard
self.data[:, : self.offset].zero_()
self.offset = 0
self.callbacks.clear()
self.output_shard = torch.zeros_like(self.data[0])
def setup(self) -> None:
"""Setup the buffers if they are not allocated.
Using ``setup`` and ``teardown``, we can ensure that the bucket
buffers are only allocated during the backward pass, hence saving more
memory to other parts of the training process, such as the forward pass
for activation memory.
"""
for tensor in [self.data, self.output_shard]:
if tensor.storage().size() == 0:
tensor.storage().resize_(tensor.size().numel())
def teardown(self) -> None:
"""Tear down the bucket by freeing the memory"""
assert self.offset == 0 and self.callbacks == [], "Incorrect call of teardown"
for tensor in [self.data, self.output_shard]:
tensor.storage().resize_(0)
class ReduceScatterBucketer:
"""
Helper for bucketing multiple reduce-scatter operations on small tensors
into larger reduce-scatter ops to improve communication efficiency.
Usage::
bucketer = ReduceScatterBucketer()
bucketer.reduce_scatter_async(
small_tensors, callback_fn=lambda result: print("small")
)
bucketer.reduce_scatter_async(
big_tensors, callback_fn=lambda result: print("big")
)
bucketer.reduce_scatter_async(
more_small_tensors, callback_fn=lambda result: print("small2")
)
bucketer.flush() # callbacks only guaranteed to be called after flush()
# Example output (note that it is out of order, due to bucketing):
# big
# small
# small2
Args:
bucket_cap_mb (int, Optional): bucket size for communicating. Buckets
are sub-divided based on world_size. Values <= 0 disable bucketing.
"""
def __init__(self, bucket_cap_mb: int = 25):
self.bucket_cap_mb = bucket_cap_mb
self.buckets: Dict[Tuple[torch.dtype, torch.device, "ProcessGroup"], Bucket] = {}
@torch.no_grad()
def reduce_scatter_async(
self,
input_list: List[Tensor],
group: "ProcessGroup",
callback_fn: Optional[Callable] = None,
) -> None:
"""
Reduce-scatter a list of tensors asynchronously, so smaller reductions
can be bucketed together. The given callback (``callback_fn``) will be
called with the reduced result at some later time. Call ``flush()`` to
force all queued ops and callbacks to be executed.
Note that large inputs will be reduced immediately, and this function
may also flush the relevant bucket to make room for ``input_list``.
Args:
input_list (List[Tensor]): list of tensors to reduce-scatter. List
should contain ``group.size()`` tensors and each tensor should
have identical shape, dtype and device.
group (ProcessGroup): process group for reduction
callback_fn (Callable, Optional): callback function to call after
the reduction executes. Function will be called with a single
argument corresponding to the reduced result.
"""
world_size = group.size()
assert (
len(input_list) == world_size
), f"reduce_scatter received {len(input_list)} inputs, expected group.size() ({world_size})"
first_input = input_list[0]
first_input_size = first_input.numel()
bucket_shard_size = self._get_shard_size(first_input.element_size(), world_size)
if first_input_size > bucket_shard_size:
# TODO: investigate how to avoid using torch.cat (because it seems to be slow for CPU tensors)
# input is too big to fit in the bucket, reduce-scatter directly
output = torch.zeros_like(input_list[0])
if hasattr(dist, "_reduce_scatter_base") and enable_nccl_base_collectives:
input_flattened = torch.cat(input_list)
dist._reduce_scatter_base(output, input_flattened, group=group)
else:
# fallback
dist.reduce_scatter(output, input_list, group=group)
if callback_fn is not None:
callback_fn(output)
return
bucket = self._get_bucket(first_input, group)
if first_input_size > bucket.data.size(1) - bucket.offset:
# not enough space remaining in bucket, flush it now
bucket.flush()
# copy data from input_list into bucket
stacked_input = torch.stack(input_list).view(world_size, first_input_size)
offset = bucket.offset
bucket.data[:, offset : offset + first_input_size].copy_(stacked_input)
bucket.offset += first_input_size
# callback will be given the reduced result
if callback_fn is not None:
result_view = bucket.output_shard[offset : offset + first_input_size].view_as(first_input)
bucket.callbacks.append(functools.partial(callback_fn, result_view))
@torch.no_grad()
def flush(self) -> None:
"""Reduce-scatter any partial buckets."""
for bucket in self.buckets.values():
bucket.flush()
@torch.no_grad()
def teardown(self) -> None:
"""Free buffers from all buckets."""
for bucket in self.buckets.values():
bucket.teardown()
@functools.lru_cache()
def _get_shard_size(self, element_size: int, num_shards: int) -> int:
if self.bucket_cap_mb <= 0: # Values <= 0 disable bucketing.
return 0
MB = 1024 * 1024
bucket_size = self.bucket_cap_mb * MB / element_size
return int(bucket_size // num_shards)
def _get_bucket(self, tensor: Tensor, group: "ProcessGroup") -> Bucket:
# TODO (Min): the `group` used here in the key is the object hash, not the content
# hash. That means if FSDP instances are initialized with different process groups,
# even when the group members are in fact the same, we end up creating different
# buckets here.
key = (tensor.dtype, tensor.device, group)
if key not in self.buckets:
# buckets are divided into world_size pieces, bucket.data shaped (world_size, shard_size)
world_size = group.size()
shard_size = self._get_shard_size(tensor.element_size(), world_size)
data = tensor.new_zeros((world_size, shard_size))
self.buckets[key] = Bucket(data, group)
self.buckets[key].setup()
return self.buckets[key]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import collections.abc as abc
from dataclasses import dataclass
from math import inf
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.distributed as dist
@dataclass
class Workhandle:
handle: Any
callback: Optional[Callable] = None
def get_global_rank(group: Any, rank: int) -> int:
if group is dist.group.WORLD:
return rank
return dist.distributed_c10d._get_global_rank(group, rank)
# Credits: classy_vision/generic/distributed_util.py
def recursive_copy_to_device(value: Any, non_blocking: bool, device: torch.device) -> Any:
"""
Recursively searches lists, tuples, dicts and copies tensors to device if
possible. Non-tensor values are passed as-is in the result.
NOTE: These are all copies, so if there are two objects that reference
the same object, then after this call, there will be two different objects
referenced on the device.
"""
if isinstance(value, torch.Tensor):
return value.to(device, non_blocking=non_blocking)
if isinstance(value, (list, tuple)):
values = []
for val in value:
values.append(recursive_copy_to_device(val, non_blocking=non_blocking, device=device))
return values if isinstance(value, list) else tuple(values)
if isinstance(value, abc.Mapping):
device_val: Dict[str, Any] = {}
for key, val in value.items():
device_val[key] = recursive_copy_to_device(val, non_blocking=non_blocking, device=device)
return device_val
return value
def calc_grad_norm(parameters: List[torch.nn.Parameter], p: float) -> torch.Tensor:
r"""Calculate gradient norm of an iterable of parameters.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda par: par.grad is not None, parameters))
if len(parameters) == 0:
return torch.tensor(0.0)
p = float(p)
if p == inf:
local_norm = max(par.grad.detach().abs().max() for par in parameters) # type: ignore
else:
# Compute the norm in full precision no matter what
local_norm = torch.norm(torch.stack([torch.norm(par.grad.detach(), p, dtype=torch.float32) for par in parameters]), p).to(dtype=parameters[0].dtype) # type: ignore
return local_norm
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Useful functions for manipulating state_dicts."""
from typing import TYPE_CHECKING, Dict, List, Tuple, Type, Union
from torch import Tensor, nn
if TYPE_CHECKING:
from collections import OrderedDict # noqa: F401
def find_module_instances(module: nn.Module, search_class: Type[nn.Module]) -> List[Tuple[str, nn.Module]]:
"""
Find all occurrences of a given search_class among the given Modules's
children and return the corresponding paths in the same format as
state_dicts.
Usage::
net = nn.Sequential(
nn.Linear(1, 1),
nn.ModuleDict({"ln": nn.LayerNorm(1), "linear": nn.Linear(1, 1)}),
nn.LayerNorm(1)
)
>>> find_module_instances(net, nn.LayerNorm)
[('1.ln.', LayerNorm((1,), eps=1e-05, elementwise_affine=True)), ('2.', LayerNorm((1,), eps=1e-05, elementwise_affine=True))]
>>> find_module_instances(net, nn.Dropout)
[]
>>> find_module_instances(net, nn.Sequential)
[('', Sequential(
(0): Linear(in_features=1, out_features=1, bias=True)
(1): ModuleDict(
(ln): LayerNorm((1,), eps=1e-05, elementwise_affine=True)
(linear): Linear(in_features=1, out_features=1, bias=True)
)
(2): LayerNorm((1,), eps=1e-05, elementwise_affine=True)
))]
"""
paths = []
def add_paths_(module: nn.Module, prefix: str = "") -> None:
if isinstance(module, search_class):
paths.append((prefix, module))
for name, child in module.named_children():
add_paths_(child, prefix + name + ".")
add_paths_(module)
return paths
def replace_by_prefix_(
state_dict: Union[Dict[str, Tensor], "OrderedDict[str, Tensor]"], old_prefix: str, new_prefix: str
) -> None:
"""
Replace all keys that match a given old_prefix with a new_prefix (in-place).
Usage::
state_dict = {"layer.xyz": torch.tensor(1)}
replace_by_prefix_(state_dict, "layer.", "module.layer.")
assert state_dict == {"module.layer.xyz": torch.tensor(1)}
"""
if old_prefix == new_prefix:
raise ValueError("old_prefix and new_prefix must be distinct")
for key in list(state_dict.keys()):
if not key.startswith(old_prefix):
continue
new_key = new_prefix + key[len(old_prefix) :]
state_dict[new_key] = state_dict[key]
del state_dict[key]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from typing import List, Tuple
import torch
__all__: List[str] = ["torch_version"]
def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:
numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version)
if not numbering:
return tuple()
# Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,
if numbering.group(4):
# Two options here:
# - either skip this version (minor number check is not relevant)
# - or check that our codebase is not broken by this ongoing development.
# Assuming that we're interested in the second use-case more than the first,
# return the pre-release or dev numbering
logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it")
return tuple(int(numbering.group(n)) for n in range(1, 4))
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from typing import Any
import torch
def pyobject_to_tensor(obj: Any, fixed_buffer_size: int = 0) -> torch.Tensor:
pickled = pickle.dumps(obj)
result: torch.Tensor = torch.ByteTensor(bytearray(pickled))
if fixed_buffer_size:
delta = fixed_buffer_size - len(result)
if delta < 0:
raise ValueError(
f"message too big to send, increase `fixed_buffer_size`? - {len(result)} > {fixed_buffer_size}"
)
elif delta > 0:
result = torch.cat((result, torch.zeros(delta, dtype=torch.uint8)))
return result
def tensor_to_pyobject(tensor: torch.Tensor) -> Any:
nparray = tensor.cpu().numpy()
return pickle.loads(nparray.tobytes())
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple, Union, cast
import numpy as np
import torch
from torch.nn.utils.rnn import PackedSequence
"""Useful functions to deal with tensor types with other python container types."""
def apply_to_type(
type_fn: Callable, fn: Callable, container: Union[torch.Tensor, np.ndarray, Dict, List, Tuple, Set, NamedTuple]
) -> Any:
"""Recursively apply to all objects in different kinds of container types that matches a type function."""
def _apply(x: Union[torch.Tensor, np.ndarray, Dict, List, Tuple, Set]) -> Any:
if type_fn(x):
return fn(x)
elif isinstance(x, OrderedDict):
od = x.__class__()
for key, value in x.items():
od[key] = _apply(value)
return od
elif isinstance(x, PackedSequence):
_apply(x.data)
return x
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
f = getattr(x, "_fields", None)
if f is None:
return tuple(_apply(x) for x in x)
else:
assert isinstance(f, tuple), "This needs to be a namedtuple"
# convert the namedtuple to a dict and _apply().
x = cast(NamedTuple, x)
_dict: Dict[str, Any] = x._asdict()
_dict = {key: _apply(value) for key, value in _dict.items()}
return type(x)(**_dict) # make a copy of the namedtuple
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(container)
def apply_to_tensors(fn: Callable, container: Union[torch.Tensor, Dict, List, Tuple, Set]) -> Any:
"""Recursively apply to all tensor in different kinds of container types."""
return apply_to_type(torch.is_tensor, fn, container)
def to_np(tensor_or_container: Union[torch.Tensor, Dict, List, Tuple, Set]) -> Any:
"""Convert a tensor or a container to numpy."""
return apply_to_type(torch.is_tensor, lambda x: x.cpu().numpy(), tensor_or_container)
def from_np(ndarray_or_container: Union[np.ndarray, Dict, List, Tuple, Set]) -> Any:
"""Convert a ndarray or a container to tensor."""
return apply_to_type(lambda x: isinstance(x, np.ndarray), lambda x: torch.from_numpy(x), ndarray_or_container)
def pack_kwargs(*args: Any, **kwargs: Any) -> Tuple[Tuple[str, ...], Tuple[Any, ...]]:
"""
Turn argument list into separate key list and value list (unpack_kwargs does the opposite)
Usage::
kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)
assert kwarg_keys == ("a", "b")
assert flat_args == (1, 2, 3, 4)
args, kwargs = unpack_kwargs(kwarg_keys, flat_args)
assert args == (1, 2)
assert kwargs == {"a": 3, "b": 4}
"""
kwarg_keys: List[str] = []
flat_args: List[Any] = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return tuple(kwarg_keys), tuple(flat_args)
def unpack_kwargs(kwarg_keys: Tuple[str, ...], flat_args: Tuple[Any, ...]) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""See pack_kwargs."""
assert len(kwarg_keys) <= len(flat_args), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}"
if len(kwarg_keys) == 0:
return flat_args, {}
args = flat_args[: -len(kwarg_keys)]
kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])}
return args, kwargs
def split_non_tensors(
mixed: Union[torch.Tensor, Tuple[Any, ...]]
) -> Tuple[Tuple[torch.Tensor, ...], Optional[Dict[str, List[Any]]]]:
"""
Split a tuple into a list of tensors and the rest with information
for later reconstruction.
When called with a tensor X, will return: (x,), None
Usage::
x = torch.Tensor([1])
y = torch.Tensor([2])
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
assert tensors == (x, y)
assert packed_non_tensors == {
"is_tensor": [True, True, False, False],
"objects": [None, 3],
}
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
"""
if isinstance(mixed, torch.Tensor):
return (mixed,), None
tensors: List[torch.Tensor] = []
packed_non_tensors: Dict[str, List[Any]] = {"is_tensor": [], "objects": []}
for o in mixed:
if isinstance(o, torch.Tensor):
packed_non_tensors["is_tensor"].append(True)
tensors.append(o)
else:
packed_non_tensors["is_tensor"].append(False)
packed_non_tensors["objects"].append(o)
return tuple(tensors), packed_non_tensors
def unpack_non_tensors(
tensors: Tuple[torch.Tensor, ...], packed_non_tensors: Optional[Dict[str, List[Any]]]
) -> Tuple[Any, ...]:
"""See split_non_tensors."""
if packed_non_tensors is None:
return tensors
assert isinstance(packed_non_tensors, dict), type(packed_non_tensors)
mixed: List[Any] = []
is_tensor_list = packed_non_tensors["is_tensor"]
objects = packed_non_tensors["objects"]
assert len(tensors) + len(objects) == len(is_tensor_list), (
f"len(tensors) {len(tensors)} len(objects) {len(objects)} " f"len(is_tensor_list) {len(is_tensor_list)}"
)
obj_i = tnsr_i = 0
for is_tensor in is_tensor_list:
if is_tensor:
mixed.append(tensors[tnsr_i])
tnsr_i += 1
else:
mixed.append(objects[obj_i])
obj_i += 1
return tuple(mixed)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .version import *
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Useful functions for parallel training."""
from enum import Enum
import sys
from typing import TYPE_CHECKING, List, Optional, Sequence
import torch
import torch.distributed as dist
import torch.nn.functional as F
if TYPE_CHECKING:
# See comments in FSDP code for reason of this import.
from torch.distributed import ProcessGroup
def chunk_and_pad(tensor: torch.Tensor, num_chunks: int) -> List[torch.Tensor]:
"""Chunk a given Tensor into num_chunks parts and add any necessary padding."""
chunks = list(torch.flatten(tensor).chunk(num_chunks))
# torch.chunk may return fewer than num_chunks chunks, pad accordingly.
num_pad_for_partial_chunk = chunks[0].numel() - chunks[-1].numel()
if num_pad_for_partial_chunk > 0:
chunks[-1] = F.pad(chunks[-1], [0, num_pad_for_partial_chunk])
if len(chunks) < num_chunks:
chunks.extend([torch.zeros_like(chunks[0]) for _ in range(num_chunks - len(chunks))])
return chunks
def validate_process_group(device: torch.device, process_group: "ProcessGroup") -> None:
"""Do a quick test in case user called FSDP without calling torch.cuda.set_device()
correctly. This can easily happen in cpu_offload case where the model resides on
the CPU.
"""
if not hasattr(process_group, "allgather"):
# Likely a dummy pg for unit test, skip checking.
return
world_size = process_group.size()
if "cuda" in str(device):
input_tensor = torch.ones(1).to(device)
output = list(torch.zeros(world_size).to(device).chunk(world_size))
dist.all_gather(output, input_tensor, group=process_group)
assert torch.cat(output).sum() == float(world_size), (
f"found {torch.cat(output).sum()} devices in process group but "
f"world_size={world_size}. Check torch.cuda.set_device is called properly"
)
def enable_pytorch_sync_bn(module: torch.nn.Module) -> None:
"""Call _specify_ddp_gpu_num for all pytorch SyncBN layers so that it
is happily running even without DDP. E.g. this is used by FSDP.
"""
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm) and hasattr(layer, "_specify_ddp_gpu_num"):
# Number "1" below meant to be the number of GPUs for each DDP worker.
# (i.e. "device_ids" in DDP. As far as I see, the value is not actually
# used, but this call needs to be made to avoid an exception.
# This function is removed from pytorch since 1.9.
layer._specify_ddp_gpu_num(1) # type: ignore
class ProcessGroupName(str, Enum):
default = "default"
reduce_scatter = "reduce_scatter"
def get_process_group_cached(
name: ProcessGroupName = ProcessGroupName.default, ranks: Optional[Sequence[int]] = None
) -> "ProcessGroup":
"""
Singleton PyTorch distributed group cache. Inspired by the code from fairseq.
Just like torch.distributed.new_group, this method needs to be called on all ranks
at the same time when a new group is created. This is true for all ranks irrespective
of their group membership status.
For FSDP, it is important to use the same group between outer and inner FSDP instances,
otherwise, inner FSDP instances will not share the gradient reduction bucket buffer with
the root instance. This will result in increased GPU memory utilization.
Each separate process group also uses separate NCCL library instances, which will have
a significant effect on GPU memory use if too many process groups are created and used.
Setting NCCL_BUFFSIZE=102400 env variable is a useful technique to check if the NCCL
memory is causing GPU OOM. Note, the NCCL buffers are not allocated
through the PyTorch caching allocator, therefore, you may see GPU OOM even when
torch.cuda.reserved_memory() is still way below the total amount of GPU memory.
Extra process groups can also reduce training speed (observed on VISSL models).
Args:
name ProcessGroupName:
There are two process groups when reduce_scatter overlap is enabled. The "default" process group is the
default process group. The other group is "reduce_scatter" group.
Default: ProcessGroupName.default
ranks (Optional[List[int]]):
Ranks requested in the target group. None for all ranks.
Default: None
Returns:
(ProcessGroup):
Return the requested process group. Throws RuntimeError if torch.distributed module is not yet initialized.
"""
if not dist.is_initialized():
# Likely caused by initiating a dummy pg for unit test, skip checking.
if name == ProcessGroupName.reduce_scatter and "pytest" in sys.modules:
return None
else:
raise RuntimeError("torch.distributed is not yet initialized but process group is requested.")
# Init the cache if needed.
if not hasattr(get_process_group_cached, "_global_group_cache"):
get_process_group_cached._global_group_cache = {} # type: ignore
# Populate with default process group.
cache = get_process_group_cached._global_group_cache # type: ignore
default_pg = dist.new_group(ranks=ranks)
cache[None] = default_pg
cache[(ProcessGroupName.default, None)] = default_pg
cache[(ProcessGroupName.default, frozenset(list(range(dist.get_world_size()))))] = default_pg
# Lookup and fill the cache if needed.
cache = get_process_group_cached._global_group_cache # type: ignore
if ranks is not None:
# take care of ordering and duplicates in the ranks list. use tuple so that ranks
# can be used as a cache index.
ranks = tuple(sorted(list(set(ranks))))
if (name, ranks) not in cache:
cache[(name, ranks)] = dist.new_group(ranks=ranks)
return cache[(name, ranks)]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import collections.abc as abc
from enum import Enum
import logging
from typing import Any, Dict, List, Optional, Union
import warnings
import torch
from torch.cuda import FloatTensor # type: ignore
from torch.cuda.amp.common import amp_definitely_not_available
from torch.cuda.amp.grad_scaler import GradScaler as TorchGradScaler
import torch.distributed as dist
from torch.optim import Optimizer
from torch.optim.sgd import SGD
from fairscale.internal import torch_version
class _GeneralMultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
This class adds the cpu option to the _MultiDeviceReplicator class in PyTorch grad_scaler.py.
https://pytorch.org/docs/stable/_modules/torch/cuda/amp/grad_scaler.html#GradScaler
"""
def __init__(self, master_tensor: torch.Tensor) -> None:
assert master_tensor.is_cuda or master_tensor.device.type == "xla" or master_tensor.device.type == "cpu"
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device: torch.device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
# Defines default_factory for GradScaler's _per_optimizer_states defaultdict,
# as well as associated "enum" values. Prefers defining these at top level because
# - Lambdas can't be pickled, so we don't want to supply a lambda as the factory.
# - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler
# causes a circular reference, which we'd rather avoid.
class OptState(Enum):
READY = 0
UNSCALED = 1
STEPPED = 2
def _refresh_per_optimizer_state() -> Dict:
return {"stage": OptState.READY, "found_inf_per_device": {}}
class GradScaler(TorchGradScaler):
def _unscale_grads_(
self,
optimizer: Optimizer,
inv_scale: torch.Tensor,
found_inf: torch.Tensor,
allow_fp16: bool,
) -> Dict[torch.device, torch.Tensor]:
return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)
class ShardedGradScaler(TorchGradScaler):
"""
A shard aware Grad Scaler which enables loss scaling with/without cpu_offload. This is a
slight modification of the pytorch grad scaler.
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
"""
def __init__(
self,
init_scale: float = 2.0**16,
growth_factor: float = 2.0,
backoff_factor: float = 0.5,
growth_interval: int = 2000,
enabled: bool = True,
process_group: Any = dist.group.WORLD,
):
super().__init__(
init_scale=init_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
enabled=enabled,
)
if enabled and amp_definitely_not_available():
warnings.warn("torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.")
self._enabled = False
else:
self._enabled = enabled
if self._enabled:
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
self.group = process_group
def scale(self, outputs: Union[torch.Tensor, List[torch.Tensor]]) -> Union[torch.Tensor, abc.Iterable]:
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Args:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda or outputs.device.type == "xla" or outputs.device.type == "cpu"
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device) # type: ignore
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: List[_GeneralMultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val: Union[torch.Tensor, abc.Iterable]) -> Union[torch.Tensor, abc.Iterable]:
if isinstance(val, torch.Tensor):
assert val.is_cuda or val.device.type == "xla" or val.device.type == "cpu"
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device) # type: ignore
assert self._scale is not None
stash.append(_GeneralMultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
elif isinstance(val, abc.Iterable):
iterable = map(apply_scale, val)
if isinstance(val, list) or isinstance(val, tuple):
return type(val)(iterable)
else:
return iterable
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
# This function is required enable cpu based grad scaler. It is inspired from its corresponding CUDA
# implementation which can be found here
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/AmpKernels.cu#L88
def _foreach_non_finite_check_and_unscale_cpu_(
self, grads: List, found_inf: torch.Tensor, inv_scale: torch.Tensor
) -> None:
if len(grads) == 0:
return
assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."
assert found_inf.numel() == 1, "found_inf must be a 1-element tensor."
expected_device = grads[0].device
for tensor in grads:
try:
assert tensor.device == expected_device, "grads must be on the same device"
except AssertionError:
logging.error("tensor device is %s and expected device is %s" % (tensor.device, expected_device))
# check for non_overlapping_and_dense doesn't exist in the python world
# as remarked here https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/AmpKernels.cu#L108
# we assume tensor is not MTA(multi tensor apply) safe. iterate through each item regardless of dtype
if torch.isinf(tensor).any().item() is True or torch.isnan(tensor).any().item() is True: # type: ignore
found_inf.data = torch.tensor([1.0])
break
else:
tensor.data *= inv_scale.item()
def _unscale_grads_( # type: ignore
self, optimizer: SGD, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool = True
) -> Dict[torch.device, torch.Tensor]:
per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale)
per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf)
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
# There could be hundreds of grads, so we'd like to iterate through them just once.
# However, we don't know their devices or dtypes in advance.
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
# Google says mypy struggles with defaultdicts type annotations.
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
param.grad = param.grad.coalesce()
to_unscale = param.grad._values()
else:
to_unscale = param.grad
# TODO: is there a way to split by device and dtype without appending in the inner loop?
per_device_and_dtype_grads[to_unscale.device][to_unscale.dtype].append(to_unscale)
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
if grads[0].device.type == "cpu":
self._foreach_non_finite_check_and_unscale_cpu_(
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
else:
torch._amp_foreach_non_finite_check_and_unscale_( # type: ignore
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer: SGD) -> None: # type: ignore
if not self._enabled:
return
super()._check_scale_growth_tracker("unscale_") # type: ignore
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.UNSCALED:
raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
elif optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, True)
optimizer_state["stage"] = OptState.UNSCALED
# Synchronize the detected inf across the ranks
optimizer_state = self._per_optimizer_states[id(optimizer)]
last_handle = None
for v in optimizer_state["found_inf_per_device"].values():
if v.device.type == "cpu":
v_on_cuda = v.cuda()
last_handle = dist.all_reduce(v_on_cuda, async_op=True, group=self.group)
v_on_cuda.cpu()
else:
last_handle = dist.all_reduce(v, async_op=True, group=self.group)
# Make sure that the calls are done before moving out.
# The calls are executed in sequence, waiting for the last one is enough
if last_handle is not None:
last_handle.wait()
def step(self, optimizer: SGD, *args, **kwargs) -> Optional[float]: # type: ignore
"""
:meth:`step` carries out the following two operations:
1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Returns the return value of ``optimizer.step(*args, **kwargs)``.
Args:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
.. warning::
Closure use is not currently supported.
Note: This is an exact copy of the step function in grad_scaler.py. If this copy is deleted then the
unittest test_cpu_offload_and_cpu_grads fails. This is because the parent class step function calls
the parent class unscale_ function which does not handle torch.distributed.all_reduce on cpu.
"""
if not self._enabled:
return optimizer.step(*args, **kwargs)
if "closure" in kwargs:
raise RuntimeError("Closure use is not currently supported if GradScaler is enabled.")
self._check_scale_growth_tracker("step") # type: ignore
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("step() has already been called since the last update().")
retval = None
if hasattr(optimizer, "_step_supports_amp_scaling") and optimizer._step_supports_amp_scaling:
# This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
# The contract with custom optimizers is that their step() should accept an additional,
# optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
# it can query its own state, invoke unscale_ on itself, etc
retval = optimizer.step(*args, **dict(kwargs, grad_scaler=self))
optimizer_state["stage"] = OptState.STEPPED
return retval
if optimizer_state["stage"] is OptState.READY:
self.unscale_(optimizer)
assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer."
retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs) # type: ignore
optimizer_state["stage"] = OptState.STEPPED
return retval
# This function is required enable cpu based grad scaler. It is inspired from its corresponding CUDA
# implementation which can be found here
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/AmpKernels.cu#L219
def _amp_update_scale_cpu_(self, found_inf): # type: ignore
"""
If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero.
Otherwise, scale is multiplied by the growth factor when the growth interval is reached.
"""
if found_inf.item() == 1.0:
self._scale *= self._backoff_factor # type: ignore
self._growth_tracker = 0
else:
successful = self._growth_tracker + 1
if successful == self._growth_interval: # type: ignore
self._scale *= self._growth_factor # type: ignore
self._growth_tracker = 0
else:
self._growth_tracker = successful
def update(self, new_scale: Optional[Union[float, FloatTensor]] = None) -> None:
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale)
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale)
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
if _scale.device.type == "cpu":
self._amp_update_scale_cpu_(found_inf_combined) # type: ignore
else:
if torch_version() >= (1, 9, 0):
torch._amp_update_scale_( # type: ignore
self._scale,
self._growth_tracker,
found_inf_combined,
self._growth_factor, # type: ignore
self._backoff_factor, # type: ignore
self._growth_interval, # type: ignore
)
elif torch_version() >= (1, 8, 0) and torch_version() < (1, 9, 0):
self._scale = torch._amp_update_scale( # type: ignore
self._growth_tracker,
_scale,
found_inf_combined,
self._growth_factor, # type: ignore
self._backoff_factor, # type: ignore
self._growth_interval, # type: ignore
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
:mod:`fairscale.optim` is a package implementing various torch optimization algorithms.
"""
import logging
from typing import List
from .adascale import AdaScale, AdaScaleWrapper
from .oss import OSS
try:
from .adam import Adam, Precision
except ImportError: # pragma: no cover
pass # pragma: no cover
try:
from .grad_scaler import GradScaler
except ImportError:
logging.warning("Torch AMP is not available on this platform")
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import copy
import io
from itertools import chain
import logging
from math import inf
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
import torch
from torch.autograd import profiler
import torch.distributed as dist
from torch.nn import Parameter
from torch.optim import SGD, Optimizer
from fairscale.internal.params import calc_grad_norm, get_global_rank, recursive_copy_to_device
from fairscale.nn.misc import ParamBucket
__all__ = ["OSS"]
if TYPE_CHECKING: # pragma: no cover
from torch.optim.optimizer import _params_t
else:
_params_t = Any
_gpu_is_old: Optional[bool] = None
def _gpu_capabilities_older_than_50() -> bool:
"""Return True if the GPU's compute capability is older than SM50."""
global _gpu_is_old
if _gpu_is_old is None:
for i in range(torch.cuda.device_count()):
major, minor = torch.cuda.get_device_capability(f"cuda:{i}")
if major <= 5:
_gpu_is_old = True
if _gpu_is_old is None:
_gpu_is_old = False
return _gpu_is_old
def _broadcast_object(
obj: Any, src_rank: int, group: object = dist.group.WORLD, dist_device: torch.device = torch.device("cpu")
) -> Any:
"""
Either broadcast from master to the fleet (default),
or use the src setting as the original rank.
This is only needed for some older GPUs where dist.broadcast_object_list seems to hang. Also
the hang behavior persist across processes once it happens. I.e. once we call dist.broadcast_object_list,
subsequent calls with _broadcast_object also hang.
"""
if dist.get_rank() == src_rank:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
data = bytearray(buffer.getbuffer())
length_tensor = torch.LongTensor([len(data)]).to(dist_device)
data_send_tensor = torch.ByteTensor(data).to(dist_device)
dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)
dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False)
else:
# Fetch from the source
length_tensor = torch.LongTensor([0]).to(dist_device)
dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)
data_recv_tensor = torch.empty([int(length_tensor.item())], dtype=torch.uint8, device=dist_device)
dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False)
buffer = io.BytesIO(data_recv_tensor.cpu().numpy())
obj = torch.load(buffer, map_location=dist_device)
return obj
class OSS(Optimizer):
"""Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
optimizer and shards its state as described by ZeRO_.
::
opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
.. _ZeRO: https://arxiv.org/abs/1910.02054
We use a greedy algorithm to pack a number of parameters
at each rank. Each parameter belongs to a single rank and
is not divided among rank.
After each rank completed their parameter update, they broadcast
the new version of the parameters to all other ranks to synchronize
the parameters for next round forward/backward computation.
Args:
params (list of tensors):
parameters to be optimized
Keyword Args:
optim (torch.nn.Optimizer):
optimizer to shard (default: SGD)
group (group):
torch.distributed group (default: group.WORLD)
broadcast_buffer_size (int):
(deprecated) used to cap the size of the broadcast buffers, not being used anymore.
broadcast_fp16 (bool):
Compress the model shards in fp16 before sharing them in between ranks.
This is safe to use when PyTorch AMP is activated. Without torch AMP this will lead to a slight
degradation in terms of accuracy.
force_broadcast_object (bool):
If True, '_broadcast_object' will be used for rebuilding the sharded optimizer.
If False, whether to use '_broadcast_object' or 'dist.broadcast_object_list' will be determined by GPU capabilities.
This feature is needed since some newer GPUs still get some memory issues when applying dist.broadcast_object_list.
.. warning: the communication patterns that OSS use depend on the "trainability" graph,
meaning that all the parameters which `require_grad` are handled differently. This is
not reevaluated at every step, please use `refresh_trainable()` if your model changed
(freeze or unfreeze for instance).
If used with :class:<fairscale.nn.ShardedDDP> then an automatic change detection is possible,
via the `auto_refresh_trainable` parameter.
"""
#: The optimizer used for a given shard
optim: Optimizer
in_super_constructor: bool
def __init__(
self,
params: _params_t,
optim: Type[Optimizer] = SGD,
group: Optional[Any] = None,
broadcast_buffer_size: int = -1,
broadcast_fp16: bool = False,
force_broadcast_object: bool = False,
**default: Any,
):
# Hold all the model params in the root .param_groups
self.in_super_constructor = True
super().__init__(params, default)
self.in_super_constructor = False
# Partition information. lazy evaluation, computed when requested
self.__per_device_params: Dict[torch.device, List[List[Parameter]]] = OrderedDict() # device, rank, params
self.__param_rank: Dict[torch.Tensor, int] = {}
self._partition_parameters: List[List[dict]] = []
self.__param_to_index: Dict[int, int] = {}
self.__local_params: Optional[List[torch.Tensor]] = None
# Default empty values + immutables
self._optim_defaults = default
self._optim_constructor = optim
self.group = group if group is not None else dist.group.WORLD
self.world_size = dist.get_world_size(self.group)
self.backend = dist.get_backend(self.group)
self.rank = dist.get_rank(self.group)
self.global_rank = get_global_rank(self.group, self.rank)
self._local_to_global_rank = [get_global_rank(self.group, i) for i in range(self.world_size)]
self.broadcast_fp16 = broadcast_fp16
self.force_broadcast_object = force_broadcast_object
self.buckets: Dict[torch.device, Dict[int, ParamBucket]] = {}
self._all_states: List[Dict[str, Any]] = [] # Optional consolidated optimizer state
self._default_device = torch.device("cpu")
# Setup everything which is related to the parameters to be trained
# (partition and optimizer for the shard)
self.refresh_trainable()
# Partition helpers
def partition_parameters(self) -> List[List[dict]]:
"""Partitions parameters across distributed data parallel ranks.
Returns a list of param_groups (which is a list of dict) where each
element of the list contains the param_groups for a rank. Element 0
corresponds to rank 0, etc. We need all the ranks for the broadcast
inside step().
"""
if len(self._partition_parameters) == 0:
self._partition_parameters = [list() for _ in range(self.world_size)]
sizes = [0] * self.world_size
for param_group in self.param_groups:
param_lists: List[List] = [list() for _ in range(self.world_size)]
for param in param_group["params"]:
# Add this param to rank with smallest size.
rank = sizes.index(min(sizes))
param_lists[rank].append(param)
# We're partitioning the optimizer state,
# so trainable parameters are the ones which really count
if param.requires_grad:
sizes[rank] += param.numel()
else:
# Spread frozen params on a per-tensor basis
# Mostly useful for balance partitions for fine tuning for instance
# Not required strictly speaking
sizes[rank] += 1
for rank, params in enumerate(param_lists):
param_group_rank = copy.copy(param_group)
param_group_rank["params"] = params
self._partition_parameters[rank].append(param_group_rank)
return self._partition_parameters
# NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.
# For example, the apex library contains fused optimizers with a step that supports extra kwargs.
def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:
"""Performs a single optimization step (parameter update).
Arguments:
closure (callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
.. note: Any extra parameter is passed to the base optimizer as-is"""
# Sync oss param_groups attributes in case they've been updated by a scheduler.
OSS._sync_param_groups(self.param_groups, self.optim.param_groups)
# Catch a possible change of devices in between OSS construction and step()
with profiler.record_function("fairscale::oss::refresh_trainable"):
if self._default_device.type != self.param_groups[0]["params"][0].device.type:
logging.info("OSS detected that the parameter changed devices, re-allocating buffers")
self._clear_cache()
self.refresh_trainable()
# Run the optimizer step on this shard only:
with profiler.record_function("fairscale::oss::optim_step"):
if closure is not None:
loss = self.optim.step(closure=closure, **kwargs) # type: ignore
else:
loss = self.optim.step(**kwargs)
# Sync all the updated shards in between the ranks
self._broadcast_params()
# Sync hypothethical new results from the wrapped optimizer to the exposed param_groups
OSS._sync_param_groups(self.optim.param_groups, self.param_groups)
return loss
def clip_grad_norm(
self,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
filter_params_fn: Callable[[Any], Any] = None,
) -> torch.Tensor:
"""
Clip all gradients at this point in time. The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
.. note: This is analogous to `torch.nn.utils.clip_grad_norm_` but handles the partitioning and multiple devices per rank
under the hood. The default torch util is not applicable here, because each rank only has a partial view of all the grads
in the model, so calling it in the OSS context would lead to different scaling being applied per subset of model parameters
.. warning: This needs to be called on all ranks, since synchronization primitives will be used
"""
# Compute the max norm for this shards's worth of gradients
max_norm = float(max_norm)
norm_type = float(norm_type)
with profiler.record_function("fairscale::oss::clip_grad_norm"):
# Option to filter parameters from the grad_norm calculation. This is useful for model parallelism.
# To avoid double counting, only consider parameters on rank zero + anything marked 'model_parallel'
# 'model_parallel' flag is set in Megatron-LM:
# https://github.com/NVIDIA/Megatron-LM/blob/19301985dd31c8b612095cbad15bd903e8ddd497/megatron/mpu/layers.py#L54
local_params = filter_params_fn(self._local_params) if filter_params_fn is not None else self._local_params
local_norm = calc_grad_norm(local_params, norm_type).to(self._default_device)
# Compute the norm on this grad set,
# then sync all the norms from all ranks
if norm_type == inf:
total_norm = local_norm
# all reduce over data parallel and model parallel workers
dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=dist.group.WORLD)
else:
# local norm result can be accumulated with the remote ones if put to the right power
# n_i = sum_rank(a^p)^1/p
# -> n_total = all_reduce(n_i^p)^(1/p) = sum_i(n_i^p)^1/p = sum_i(sum_rank(a^p))^1/p
# all reduce over data parallel and model parallel workers
total_norm = local_norm**norm_type
dist.all_reduce(total_norm)
total_norm = total_norm ** (1.0 / norm_type)
clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)
if clip_coef < 1:
for device, device_params in self._per_device_params.items():
for p in filter(lambda x: x.grad is not None, device_params[self.rank]):
p.grad.detach().mul_(clip_coef.to(device))
return total_norm
# State dict interfaces
def consolidate_state_dict(self, recipient_rank: int = 0) -> None:
"""Update the consolidated state_dict list, one per rank.
Arguments:
recipient_rank (int): on which rank to materialize the full state dict.
-1 is a special value, which means that all ranks should have the state
.. warning: This needs to be called on all replicas"""
# Sync lr and other attributes in case its been updated
OSS._sync_param_groups(self.param_groups, self.optim.param_groups)
# Pull the sharded state from all the other replicas
# Store all the states in order, rank by rank
logging.debug("Pulling the sharded optimizer state from all replicas")
self._all_states = []
should_collect_state = self.rank == recipient_rank or recipient_rank == -1
should_send_state = self.rank != recipient_rank
# NCCL requires CUDA tensors for all communication primitives
dist_device = torch.device("cuda") if self.backend == dist.Backend.NCCL else self._default_device
for rank in range(self.world_size):
if rank == self.rank:
if should_collect_state:
logging.debug("Saving self state")
self._all_states.append(
recursive_copy_to_device(self.optim.state_dict(), non_blocking=True, device=torch.device("cpu"))
)
# Sync with other replicas
state_to_share = (
self.optim.state_dict()
if should_send_state
else torch.tensor([0], dtype=torch.uint8, device=dist_device)
)
if self.force_broadcast_object or _gpu_capabilities_older_than_50():
_broadcast_object(
state_to_share, src_rank=self.global_rank, group=self.group, dist_device=dist_device
)
else:
obj_list = [state_to_share]
dist.broadcast_object_list(
obj_list,
src=self.global_rank,
group=self.group,
)
else:
# Fetch the optim state from the other replicas
if self.force_broadcast_object or _gpu_capabilities_older_than_50():
replica_state = _broadcast_object(
torch.tensor([0], dtype=torch.uint8, device=dist_device),
src_rank=self._local_to_global_rank[rank],
group=self.group,
dist_device=dist_device,
)
else:
obj_list = [torch.tensor([0], dtype=torch.uint8, device=dist_device)]
dist.broadcast_object_list(
obj_list,
src=self._local_to_global_rank[rank],
group=self.group,
)
replica_state = obj_list[0]
if should_collect_state:
self._all_states.append(
recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device("cpu"))
)
logging.debug("State from rank %s received", rank)
def state_dict(self, all_ranks: bool = False) -> Dict[str, Any]:
"""Return the last known global optimizer state. The returned state is compatible with Pytorch, in that the
sharded properties are not exposed.
Arguments:
all_ranks (bool): materialize the state on all ranks. In that case, `.state_dict()` needs to be called on
all ranks
Returns:
a dict with two entries
* state - a dict holding current optimization state. Its content
differs between optimizer classes.
* param_groups - a dict containing all parameter groups
.. warning:
Returning the global state is limited to the replica which was responsible for the consolidation,
if `all_ranks` was not set to `True`. In that case, the state may also not be up to date,
depending on when `consolidate_state_dict` was last called.
"""
if not all_ranks and len(self._all_states) == 0:
raise RuntimeError(
"Optimizer state has not been consolidated on this rank. \
Please call `consolidate_state_dict()` on all ranks beforehand if you meant to save the global state"
)
if all_ranks:
# Consolidate the state on every rank
self.consolidate_state_dict(recipient_rank=-1)
# Unify the shard states and the state that pytorch would expect, given the model.
# Indexation needs several redirections, since each shard only knows a limited scope of the model
# - get the pytorch compliant parameter indexing
state_dict = super().state_dict()
# - go through the per-shard states, which are all indexed locally
for rank, s in enumerate(self._all_states):
# -- match the local indexing and the global partition, update the corresponding saved state globally
for local_pg, global_pg in zip(s["param_groups"], self.partition_parameters()[rank]):
local_index_to_param_id = {
i_param: id(global_pg["params"][i]) for i, i_param in enumerate(local_pg["params"])
}
for local_param_index in local_pg["params"]:
# Update the state, if any
if local_param_index in s["state"].keys():
global_id = self._param_to_index[local_index_to_param_id[local_param_index]]
state_dict["state"][global_id] = s["state"][local_param_index]
# Make sure that the parameters are sorted in the state, as expected for a pytorch dict
state_dict["state"] = dict(sorted(state_dict["state"].items()))
return state_dict
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""Restore the global parameter groups as well as the shard.
Arguments:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`
"""
# Update the state, trusting the ordering in param_groups
# Apart from the removal of states not owned by this rank, the pytorch logic is kept
# (See torch.optim.optimizer)
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable((g["params"] for g in state_dict["param_groups"])),
chain.from_iterable((g["params"] for g in self.param_groups)),
)
}
for key, value in state_dict["state"].items():
param = id_map[key]
# Populate the sharded optimizer state on the fly,
# remove the params that this rank does not own
if self._param_to_rank[param] != self.rank:
state_dict["state"][key] = {}
else:
self.optim.state[param] = recursive_copy_to_device(value, non_blocking=True, device=param.device)
super().load_state_dict(state_dict)
# Sync with the optimizer param groups
OSS._sync_param_groups(state_dict["param_groups"], self.param_groups)
OSS._sync_param_groups(self.param_groups, self.optim.param_groups)
def refresh_trainable(self) -> None:
"""Updates the partitioning and communication patterns if the trainability (`requires_grad`)
of some parameters changed.
"""
# Make sure that we capture the current default device
self._default_device = list(self._per_device_params.keys())[0]
# Create the optim which will work on the param shard
if not hasattr(self, "optim"):
self._clear_cache()
self.optim = self._optim_constructor(self.partition_parameters()[self.rank], **self._optim_defaults)
OSS._sync_param_groups(self.optim.param_groups, self.param_groups)
self._setup_flat_buffers()
def add_param_group(self, param_group: dict) -> None:
"""Add a param group to the :class:`Optimizer` s `param_groups`.
This can be useful when fine tuning a pre-trained network as frozen layers can be made
trainable and added to the :class:`Optimizer` as training progresses.
Arguments:
param_group (dict): Specifies what Tensors should be optimized along with group
specific optimization options
.. warning: This handles updating the shards on all partitions, but needs to be called on all ranks.
"""
super().add_param_group(param_group)
if not self.in_super_constructor:
# Force a re-partitioning
self._clear_cache()
# Update the partition
param_groups = self.partition_parameters()[self.rank]
if len(param_groups) == len(self.optim.param_groups) + 1:
self.optim.add_param_group(param_groups[-1])
# Update the bucketing strategy accordingly
self._setup_flat_buffers()
@property
def _local_params(self) -> List[torch.Tensor]:
"""Iterable which goes through the parameters that this rank owns"""
if self.__local_params is None:
self.__local_params = list(
chain(
*[
list(filter(lambda x: x.grad is not None, device_params[self.rank]))
for device_params in self._per_device_params.values()
]
)
)
# Make sure that the iterator is not consumed, only expose a copy
return self.__local_params
@property
def _param_to_index(self) -> Dict[int, int]:
"""Hash table in between parameter indices in the global optimizer scheme, and the actual params"""
if len(self.__param_to_index) == 0:
self.__param_to_index = {id(p): i for i, p in enumerate(chain(*(g["params"] for g in self.param_groups)))}
return self.__param_to_index
@property
def _per_device_params(self) -> Dict[torch.device, List[List[Parameter]]]:
"""Sorted list of all the params, first per device then per rank.
Within a list params are sorted per number of elements to allow for an easy bucketing.
"""
if len(self.__per_device_params) == 0:
# Go through all params, log them per device
# The ordering is important here, needs to be the same on all ranks
# So that ulterior broadcast calls are matching
for param_group in self.param_groups:
for param in param_group["params"]:
device = param.device
if self.__per_device_params.get(device) is None:
self.__per_device_params[device] = [[] for _ in range(self.world_size)]
self.__per_device_params[device][self._param_to_rank[param]] += [param]
# Sort param_lists by size
for device in self.__per_device_params.keys():
for rank_params in self.__per_device_params[device]:
rank_params.sort(key=lambda x: x.numel())
return self.__per_device_params
@property
def _param_to_rank(self) -> Dict[torch.Tensor, int]:
"""Map the params to the rank which owns them"""
if len(self.__param_rank) == 0:
for rank, param_groups in enumerate(self.partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
self.__param_rank[param] = rank
logging.debug("FairScale OSS: Parameters dispatched to ranks %s " % list(self.__param_rank.values()))
return self.__param_rank
def _clear_cache(self) -> None:
self._partition_parameters.clear()
self.__per_device_params.clear()
self.__param_rank.clear()
self.__param_to_index.clear()
self.__local_params = None
@staticmethod
def _sync_param_groups(source: List[Dict[Any, Any]], destination: List[Dict[Any, Any]]) -> None:
"""Sync learning rate and other optimizer attributes (needed to support schedulers)."""
for source_group, destination_group in zip(source, destination):
# Sync everything but the parameters
for k in filter(lambda x: x != "params", source_group.keys()):
destination_group[k] = source_group[k]
@torch.no_grad()
def _broadcast_params(self) -> None:
"""Helper function to broadcast all the parameters from a given device"""
with profiler.record_function("fairscale::oss::refresh_trainable"):
# if NCCL broadcasts will be done in an independent stream
# make sure that prior compute work is complete
if torch.device("cuda").type == self._default_device.type:
for device in self._per_device_params.keys():
torch.cuda.synchronize(device=device)
work_handles = [] # Work handles are consumed within this scope, no callback
# Populate the fp16 shards
if self.broadcast_fp16:
for device in self.buckets.keys():
for dst_rank, bucket in self.buckets[device].items():
bucket.to(dtype=torch.float16, device=device, non_blocking=True, keep_param_alignment=False)
if torch.cuda.is_available():
torch.cuda.synchronize()
# Exchange all the shards with the other ranks
for device in self.buckets.keys():
for dst_rank, bucket in self.buckets[device].items():
work_handles.append(
dist.broadcast(
tensor=bucket.buffer,
src=self._local_to_global_rank[dst_rank],
group=self.group,
async_op=True,
)
)
_ = list(filter(lambda x: x.wait(), work_handles))
# Populate back the fp32 shards
if self.broadcast_fp16:
for device in self.buckets.keys():
for dst_rank, bucket in self.buckets[device].items():
bucket.to(dtype=torch.float32, device=device, non_blocking=True, keep_param_alignment=True)
def _setup_flat_buffers(self) -> None:
"""Make all params which are on the same device and tied to the same rank views of a single buffer.
This is used at construction time, and anytime parameter trainability is changed (frozen or unfrozen) and
`refresh_trainability` is called.
"""
for device, per_rank_params in self._per_device_params.items():
# Only wipe the existing buckets if there are none
# (could be that this is called twice, when trainability changes)
if device not in self.buckets.keys():
self.buckets[device] = {}
# Make parameters a view of the bucket
for dst_rank, params in enumerate(per_rank_params):
if len(params) > 0:
# Clone the non-trainable params, if in a bucket it will get destroyed
for param in filter(lambda x: not x.requires_grad, params):
param.data = param.data.detach().clone()
# Merge all the trainable params in a single bucket
trainable_params = list(filter(lambda x: x.requires_grad, params))
if trainable_params:
buffer_size = sum(map(lambda x: x.numel(), trainable_params))
bucket = ParamBucket(size=buffer_size, dtype=trainable_params[0].dtype, device=device)
for param in trainable_params:
bucket.add_param(param)
self.buckets[device][dst_rank] = bucket
# Clear the buffer keys which are not in use anymore (could be that the devices changed)
devices_in_use = list(self._per_device_params.keys())
devices_to_pop = list(filter(lambda x: x not in devices_in_use, self.buckets.keys()))
for d in devices_to_pop:
self.buckets.pop(d)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
import torch
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
try:
from fairscale import fused_adam_cuda # type: ignore
class Precision(Enum):
FULL_PRECISION = auto()
MIXED_PRECISION = auto()
MEMORY_EFFICIENT_MIXED_PRECISION = auto()
PURE_FP16 = auto()
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor: torch.Tensor):
assert master_tensor.is_cuda
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device: torch.device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
class Adam(torch.optim.Optimizer):
state: dict
defaults: dict
"""
Implements Adam algorithm. Currently GPU-only.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
precision (Precision, optional): One of Precision.FULL_PRECISION,
Precision.MIXED_PRECISION, Precision.MEMORY_EFFICIENT_MIXED_PRECISION
or Precision.PURE_FP16. Inferred based on model parameter precision if
None. (default: None)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params: _params_t,
lr: Optional[float] = 1e-3,
bias_correction: Optional[bool] = True,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
eps: Optional[float] = 1e-8,
eps_inside_sqrt: Optional[bool] = False,
weight_decay: Optional[float] = 0.0,
max_grad_norm: Optional[float] = 0.0,
amsgrad: Optional[bool] = False,
precision: Optional[Precision] = None,
):
parameters: List[Any] = list(params)
self.precision = precision
if self.precision is None:
self.precision = (
Precision.FULL_PRECISION if parameters[0].dtype == torch.float32 else Precision.MIXED_PRECISION
)
if self.precision is not Precision.FULL_PRECISION:
assert parameters[0].dtype == torch.float16
self.optim_type = torch.float16 if precision is Precision.PURE_FP16 else torch.float32
self._optim_scale = float(2**16) if precision is Precision.PURE_FP16 else 1.0
self._steps_since_optim_scale_change = 0
self._optim_scale_update_freq = 2000 # This is the value that GradScaler uses by default
self._overflow_buf = torch.cuda.IntTensor([0]) # type: ignore
if amsgrad:
raise RuntimeError("FusedAdam does not support the AMSGrad variant.")
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"max_grad_norm": max_grad_norm,
}
super().__init__(parameters, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.fp32_param_groups: List[Any] = []
if self.mixed_precision:
self._build_fp32_params(parameters)
def _build_fp32_params(self, params: Any) -> None:
# create FP32 copy of parameters and grads
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float()).to(p.device)
p32.grad = torch.zeros_like(p32.data)
fp32_params.append(p32)
params = fp32_params
self.fp32_param_groups = []
param_groups = list(params)
if not isinstance(param_groups[0], dict):
param_groups = [{"params": param_groups}]
for param_group in param_groups:
params = param_group["params"]
if isinstance(params, torch.Tensor):
param_group["params"] = [params]
else:
param_group["params"] = list(params)
for name, default in self.defaults.items():
param_group.setdefault(name, default)
params = param_group["params"]
param_set = set()
for group in self.param_groups:
param_set.update(set(group["params"]))
self.fp32_param_groups.append(param_group)
@property
def supports_memory_efficient_fp16(self) -> bool:
return True
@property
def _step_supports_amp_scaling(self) -> bool:
return False
@property
def mixed_precision(self) -> bool:
return self.precision is Precision.MIXED_PRECISION
def state_dict(self) -> Dict[str, Any]:
d = super().state_dict()
d["optim_scale"] = self._optim_scale
return d
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
super().load_state_dict(state_dict)
self._optim_scale = state_dict["optim_scale"]
# TODO: Optimizer state gets cast to FP16 and back to FP32 for
# mixed-precision and memory-efficient mixed-precision. Eventually
# we want to fix this, as some precision may be lost
for group in self.param_groups:
for p in group["params"]:
self.state[p]["exp_avg"] = self.state[p]["exp_avg"].type(self.optim_type)
self.state[p]["exp_avg_sq"] = self.state[p]["exp_avg_sq"].type(self.optim_type)
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
for i in range(len(self.param_groups)):
group = self.param_groups[i]
bias_correction = 1 if group["bias_correction"] else 0
tensorlists: Dict[torch.device, List[List[torch.Tensor]]] = dict()
for j in range(len(group["params"])):
p = group["params"][j]
# note: p.grad should not ever be set for correct
# operation of mixed precision optimizer that sometimes
# sends None gradients
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, " "please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p, dtype=self.optim_type)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p, dtype=self.optim_type)
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
out_p = p.data if self.mixed_precision else torch.tensor([])
param = self.fp32_param_groups[i]["params"][j] if self.mixed_precision else p
scale = 1.0
if self.mixed_precision:
pl = [param.data, exp_avg, exp_avg_sq, grad, out_p]
if p.device not in tensorlists:
tensorlists[p.device] = [[], [], [], [], []]
for tl, t in zip(tensorlists[p.device], pl):
tl.append(t)
else:
pl = [param.data, exp_avg, exp_avg_sq, grad]
if p.device not in tensorlists:
tensorlists[p.device] = [[], [], [], []]
for tl, t in zip(tensorlists[p.device], pl):
tl.append(t)
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=list(tensorlists.keys())[0])
per_device_found_inf = _MultiDeviceReplicator(found_inf)
for tensordevice, tensorlist in tensorlists.items():
with torch.cuda.device(tensordevice):
fused_adam_cuda.adam(
2048 * 32,
self._overflow_buf,
tensorlist,
group["lr"],
beta1,
beta2,
group["eps"],
scale,
self._optim_scale,
per_device_found_inf.get(tensordevice),
state["step"],
self.eps_mode,
bias_correction,
group["weight_decay"],
)
if sum(v.item() for v in per_device_found_inf._per_device_tensors.values()):
self._steps_since_optim_scale_change = 0
self._optim_scale /= 2
if self._optim_scale < 1.0:
raise RuntimeError("Optimizer state scale < 1. This may mean that gradients are exploding")
for group in self.param_groups:
for p in group["params"]:
self.state[p]["exp_avg"] = torch.zeros_like(p, dtype=self.optim_type)
self.state[p]["exp_avg_sq"] = torch.zeros_like(p, dtype=self.optim_type)
else:
self._steps_since_optim_scale_change += 1
if self._steps_since_optim_scale_change == self._optim_scale_update_freq:
self._steps_since_optim_scale_change = 0
if self._optim_scale < 2**16:
self._optim_scale *= 2
return loss
except ImportError:
pass
|
import logging
from typing import List, Tuple
import torch
import torch.nn as nn
class LayerInfo:
"""
A class to record the layer attributes.
"""
def __init__(self, name: str, layer: nn.Module, scale: float = 1.0, scale_layer: bool = False) -> None:
"""
layer_name: name of the layer e.g. fc1, conv1, relu1
layer: type of the layer e.g. Linear, Conv2d, ReLU
scaling_factor: user configurable scaling factor for the layer, defaults to 1.0
found_inf_or_nan: a boolean indicating if any parameter of layer's gradient contains inf/nan
growth_tracker: tracks number of step since last time scale was increased
scale_layer: a boolean indicating if the layer should be scaled or not
"""
self.layer_name = name
self.layer = layer
self.scaling_factor = scale
self.found_inf_or_nan = False
self.growth_tracker = 0
self.scale_layer = scale_layer
class GradientHelper:
"""
A helper class to create instances of backward hooks. The hooks are registered in the
scale method of LayerwiseGradientScaler.
"""
def __init__(self, name: str, inputs_multiplier: float, outputs_multiplier: float):
self.layer_name = name
self.inputs_multiplier = inputs_multiplier
self.outputs_multiplier = outputs_multiplier
def scale_gradients(self, m: nn.Module, inputs: Tuple, outputs: Tuple) -> Tuple[torch.Tensor]:
"""
Backward hook that is attached to the layers to scale the gradients.
"""
scaled_up_grads = list()
for idx in range(len(inputs)):
if inputs[idx] is not None:
if self.inputs_multiplier != 1.0 or self.outputs_multiplier != 1.0:
logging.debug(
"layer = %s \t scale = %s \t scale_down = %s"
% (self.layer_name, self.inputs_multiplier, self.outputs_multiplier)
)
scaled_up_grads.append(inputs[idx].mul(self.inputs_multiplier * self.outputs_multiplier))
else:
logging.debug("next layer is None")
scaled_up_grads.append(inputs[idx])
return tuple(scaled_up_grads) # type: ignore
class LayerwiseGradientScaler:
"""
LayerwiseGradientScaler enables using distinct scaling factors for each layer
of the network.
Example:
# Create a convolutional network
class ConvNet(nn.Module):
def __init__(self):
...
def forward(self, x):
...
# Create an instance of the model
model = ConvNet()
optimizer = torch.optim.SGD(model.parameters())
# specify the layers to scale and their scaling factor
layer_scale_dict = {"conv1": 2**10, "conv2": 2**8, "fc1": 2**10, "fc2": 2**9}
scaler = LayerwiseGradientScaler(model, layer_scale_dict)
for epoch in num_epochs:
for inputs, targets in batch:
optimizer.zero_grad()
# scale the gradients
scaler.scale()
# enables mixed precision training
with autocast():
predictions = model(inputs)
loss = loss_function(predictions, targets)
loss.backward()
# unscale the gradients
loss.unscale()
# step is taken if there are no inf/nan in the gradients
# scaling factor for each layer are updated
loss.step(optimizer)
Args:
model : instance of a Model class, such as ConvNet above
layer_scale_dict (dict) : dictionary with key = layer_name and value = scaling_factor
growth_factor (float) : per layer scaling factor multiplier
backoff_factor (float) : per layer scaling factor multiplier when an inf/nan is found
growth_interval (int) : number of steps after which scale is multiplied by growth_factor
min_scaling_factor (float) : smallest scaling factor
max_scaling_factor (float) : largest scaling factor
"""
def __init__( # type: ignore
self,
model,
layer_scale_dict: dict,
growth_factor: float = 2.0,
backoff_factor: float = 0.5,
growth_interval: int = 10000,
min_scale: float = torch.finfo(torch.float32).tiny, # type: ignore
max_scale: float = torch.finfo(torch.float32).max, # type: ignore
) -> None:
self._model = model
self._layer_scale_dict: dict = layer_scale_dict
self._growth_factor: float = growth_factor
self._backoff_factor: float = backoff_factor
self._growth_interval: int = growth_interval
self._apply_layerwise_scaling: bool = True if len(layer_scale_dict.keys()) > 0 else False
self._min_scale = min_scale
self._max_scale = max_scale
self._handles: List = []
self.layer_info: List = []
if self._apply_layerwise_scaling:
assert self._growth_factor > 1.0, "The growth factor must be > 1.0."
assert self._backoff_factor < 1.0, "The backoff factor must be < 1.0."
self.layer_info = self._build_layer_info()
def _build_layer_info(self) -> List:
"""
Helper function to create a list of LayerInfo instances.
"""
layer_info_list = list()
for name, layer in self._model.named_modules():
if name != "":
if name not in self._layer_scale_dict.keys():
logging.debug("name = %s, layer = %s, scaling_factor = %s" % (name, layer, 1.0))
layer_info_list.append(LayerInfo(name, layer, 1.0))
else:
logging.debug(
"name = %s, layer = %s, scaling_factor = %s" % (name, layer, self._layer_scale_dict[name])
)
layer_info_list.append(LayerInfo(name, layer, self._layer_scale_dict[name], True))
return layer_info_list
def scale(self) -> None:
"""
For each layer calculates the scaling factor for preceding layer's grad inputs
and current layer's grad outputs. These values are used to register a full backward
hook. The handle returned from registering the backward hook is appended to a list
of handles. New hooks are created and registered at every step and a new list of
handles is created. The handles are flushed out in the unscale function.
"""
if not self._apply_layerwise_scaling:
return
for idx in range(len(self.layer_info)):
elt = self.layer_info[idx]
layer_name, layer = elt.layer_name, elt.layer
inputs_multiplier = 1.0
if idx > 0:
inputs_multiplier = self.layer_info[idx - 1].scaling_factor
outputs_multiplier = 1.0 / elt.scaling_factor
helper = GradientHelper(layer_name, inputs_multiplier, outputs_multiplier)
layer_handle = layer.register_full_backward_hook(helper.scale_gradients)
self._handles.append(layer_handle)
logging.debug("name = %s \t scale = %s" % (layer_name, elt.scaling_factor))
def _get_layers_with_finite_values(self) -> List[LayerInfo]:
layers_with_finite_values: List = []
for item in self.layer_info:
if not item.found_inf_or_nan:
layers_with_finite_values.append(item)
return layers_with_finite_values
def unscale(self) -> None:
"""
For each layer, check if any of the layer's parameters contain an inf/nan.
If there are no inf/nan in the gradient, then gradient of that layer is
unscaled by the reciprocal of the scaling factor for that layer.
Finally, all handles recorded while registering the hooks are deleted.
"""
if not self._apply_layerwise_scaling:
return
layers_with_finite_values = self._get_layers_with_finite_values()
for item in layers_with_finite_values:
for param_name, param in item.layer.named_parameters():
if hasattr(param, "grad") and param.grad is not None:
logging.debug("%s scaling down %s by %s" % (item.layer_name, param_name, 1.0 / item.scaling_factor))
param.grad.mul_(1.0 / item.scaling_factor)
while len(self._handles) > 0:
elt = self._handles.pop()
elt.remove()
def _check_for_inf_or_nan(self) -> None:
"""
For each layer, check if any of the parameters with a gradient attribute
contain an inf/nan. If any of the parameters' gradient contain an inf/nan,
then that layer's found_inf_or_nan attribute is set to True and all
remaining parameters for that layer are skipped.
"""
for elt in self.layer_info:
elt.found_inf_or_nan = False
for _, param in elt.layer.named_parameters():
if hasattr(param, "grad") and param.grad is not None:
if torch.isinf(param.grad).any().item() or torch.isnan(param.grad).any().item(): # type: ignore
elt.found_inf_or_nan = True
break # skip all remaining named parameters
def step(self, optimizer) -> None: # type: ignore
"""
If there are no inf/nan in the gradients' of all layers, then optimizer
takes a step, otherwise not. Update the scaling factor for each layer.
"""
# using layerwise gradient scaling
if self._apply_layerwise_scaling:
self._check_for_inf_or_nan()
inf_nan_found = any(elt.found_inf_or_nan for elt in self.layer_info)
if not inf_nan_found:
optimizer.step()
self._update_scale()
# not using layerwise gradient scaling
else:
optimizer.step()
def _update_scale(self) -> None:
"""
For each layer, if an inf/nan is found, then multiply the scaling factor
of that layer by the backoff factor and set the growth tracker of that
layer to 0. Else, increment the growth tracker of the layer. If growth
tracker equals the growth interval, then multiply the scaling factor of
the layer by the growth factor and reset the layer's growth tracker to 0.
Finally, clip the scaling factor to the range
[self.min_scaling_factor, self.max_scaling_factor]. The min/max scaling
factor values are user configurable.
"""
if not self._apply_layerwise_scaling:
return
for layer in self.layer_info:
if layer.found_inf_or_nan:
if layer.scale_layer:
layer.scaling_factor = max(
self._min_scale,
min(self._backoff_factor * layer.scaling_factor, self._max_scale),
)
layer.growth_tracker = 0
else:
layer.growth_tracker += 1
if layer.scale_layer and layer.growth_tracker == self._growth_interval:
layer.scaling_factor = max(
self._min_scale,
min(self._growth_factor * layer.scaling_factor, self._max_scale),
)
layer.growth_tracker = 0
def get_layer_info(self) -> List[LayerInfo]:
"""
Returns a list of LayerInfo instances of the model.
"""
return self.layer_info
def get_backward_hooks(self) -> List:
"""
Returns a list of tuples. Each tuple contains the layer name and the
hook attached to it.
"""
layer_name_and_hooks = list()
for name, layer in self._model.named_modules():
if name != "":
layer_name_and_hooks.append((name, layer._get_backward_hooks()))
return layer_name_and_hooks
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of Petuum, Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
import numpy as np
import torch
from torch.autograd import Variable
import torch.distributed as dist
from torch.optim import SGD, Optimizer
if TYPE_CHECKING: # pragma: no cover
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class AdaScale(Optimizer):
"""
Implements the AdaScale_ algorithm for scaling the learning rate for
distributed and large batch size training. Can be used in combination with
``torch.nn.parallel.DistributedDataParallel`` and ``torch.optim.SGD``.
.. _AdaScale: https://proceedings.icml.cc/static/paper_files/icml/2020/4682-Supplemental.pdf
This class subclasses `Optimizer` so that `torch.optim.lr_scheduler` can
work with it. In other words, AdaScale is intended to be a complete wrapper of an
torch Optimizer.
Note that, AdaScale does *not* help increase per-GPU batch size.
There are several ways to integrate AdaScale with your training loop.
We show two examples below.
Example 1: using PyTorch's `lr_scheduler` classes.
.. code-block:: python
optim = AdaScale(SGD(model.parameters(), lr=0.001))
model = DistributedDataParallel(model)
scheduler = LambdaLR(optim, lr_lambda=...)
last_epoch = 0
done = False
step = 0
while not done:
for batch in dataset:
optim.zero_grad()
logits = model()
loss = criterion(logits, ...)
loss.backward()
step += optim.gain()
optim.step()
epoch = step // len(dataset)
if epoch > last_epoch:
scheduler.step()
last_epoch = epoch
if epoch >= MAX_EPOCHS:
done = True
Example 2: using a custom `update_lr()` function that update the learning
rate based on the current step count per epoch.
.. code-block:: python
optim = AdaScale(SGD(model.parameters(), lr=0.001))
model = DistributedDataParallel(model)
step = 0
while step < max_steps:
for batch in ...:
optim.zero_grad()
logits = model()
loss = criterion()
loss.backward()
step += optim.gain()
optim.step()
update_lr(step)
Args:
optimizer (torch.optim.Optimizer):
Optimizer to apply AdaScale to.
world_size (int):
Number of world_size for distributed training.
If None, defaults to ``dist.get_world_size()``.
scale (float):
Scaling factor of the batch size from scale equals 1, e.g. using a 10x
larger batch size (summed across all ranks with gradient accumulation)
means a scale of 10.
If None, defaults to ``world_size * num_gradients_to_accumulate``.
smoothing (float):
Smoothing factor for moving average.
If None, it defaults to ``max(1 - (world_size * num_gradients_to_accumulate)/1000, 0)``.
Note, for very high scale training, higher smoothing value might be needed,
esp at the begining of the training. Therefore, if your scale is close to or larger
than 1000, try experimenting with smoothing value > 0 if the final accuracy is poor.
num_gradients_to_accumulate (int):
Number of passes that we accumulate gradients locally
between each optimizer step. This can be changed during
training as long as the train loop changes gradient accumulation
accordingly.
The loss in each pass can be either scaled or unscaled. See `is_scaled_loss` below.
Default to 1, which does not accumulate gradients.
is_scaled_loss (bool):
If True, assume that the loss is scaled by `num_gradients_to_accumulate`.
If False, the loss is not scaled.
Default: True.
debias_ewma (bool):
(experimental) Use debias exponential moving average
for smoothing and mu and sigma variables. False will
use the method in the paper's Appendix B.3.
Default: True, which is what have been validated so far.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
world_size: Optional[int] = None,
scale: Optional[float] = None,
smoothing: float = None,
num_gradients_to_accumulate: int = 1,
is_scaled_loss: bool = True,
debias_ewma: bool = True,
):
# Init hook_handles list, otherwise, a partial init'ed object may fail in ``__del__``.
self._hook_handles: List[Any] = []
# Init other fields.
self._optimizer = optimizer
self._local_grad_sqr: Optional[torch.Tensor] = None
self._world_size: int = (
world_size if world_size is not None else dist.get_world_size() if dist.is_initialized() else 1
)
self._num_backward_calls = 0
self._last_final_backward_call = 0
self._num_grads_to_accum = num_gradients_to_accumulate
self._debias_ewma = debias_ewma
self._is_scaled_loss = is_scaled_loss
# Proxy the param_groups so that `torch.optim.lr_scheduler` can work.
self.param_groups = self._optimizer.param_groups
self.set_num_gradients_to_accumulate(num_gradients_to_accumulate, update_smoothing=True)
# The previous function call sets smoothing to its default value.
# Override that here if smoothing was passed as an argument.
if smoothing is not None:
self._smoothing = smoothing
if self._world_size * self._num_grads_to_accum <= 1:
# gain will be NaN since we will be dividing by zero in paper's B.3 where (S-1) == 0.
raise RuntimeError("AdaScale does not support a single worker without grad accumulation.")
# Per-param-group sqr & var states (sigma^2 & mu^2 in the paper).
self._optimizer.state.setdefault(
"adascale",
{
"grad_sqr_avg": np.ones(len(optimizer.param_groups)),
"grad_var_avg": np.zeros(len(optimizer.param_groups)),
},
)
self._scale = 1.0 # Assign to inform mypy about the typing of this variable.
self.set_scale(self._world_size * self._num_grads_to_accum if scale is None else scale)
# Safer to register hooks after all init actions are done.
self._hook()
def _hook(self) -> None:
"""Internal function to register the gradient hooks.
Note, don't assume every parameter will generate a gradient (i.e. triggering the hook)
in every backward pass, which is the reason that we have ``find_unused_params`` flag
in the DDP class in ``torch.nn.parallel``.
"""
assert self._hook_handles == [], "Must run unhook first"
for idx, param_group in enumerate(self._optimizer.param_groups):
for param in param_group["params"]:
h = param.register_hook(functools.partial(self._backward_hook, idx))
self._hook_handles.append(h)
def __del__(self) -> None:
"""Unhook in case caller forgets to call unhook.
This however may not "work" since there would be circular reference
between the hook objects and this objects. In that case, neither will
get GC'ed. Calling unhook explicitly if you really want to delete
AdaScale from memory.
"""
self.unhook()
def unhook(self) -> None:
"""Unregister hook handles.
This is public because caller may need to call this to ensure all GPU
memory are released. Otherwise, the hook may prevent parameters from being
released from the GPU memory pool.
Internally, we use this to support ``add_param_group()`` API.
"""
for h in self._hook_handles:
h.remove()
self._hook_handles = []
@property
def _state(self) -> Dict[str, np.ndarray]:
"""
Return the states of AdaScale.
"""
return self._optimizer.state["adascale"]
@property
def scale(self) -> float:
"""
The scaling factor of the current batch size, relative to the baseline
batch size, which could be a DDP training. For example, if the
baseline batch size is 32 on 2 GPUs, but using a scaled-up batch size
of 80 on 4 GPUs, then then the scaling factor is 80 * 4 / 32 / 2 = 5.
This is exposed API mainly for logging purpose. Note, this is different
from ``self.gain()``.
Returns:
(float):
The current scaling factor.
"""
return self._scale
@property
def smoothing(self) -> float:
"""
The smoothing constant used in exponentially-weighted moving average
tracking the gradient norm mean and variance within AdaScale.
This is exposed API since the value is computed and caller may
want to obtain this value and log it.
Returns:
(float):
The current smoothing value.
"""
return self._smoothing
def set_scale(self, scale: float, update_estimate: bool = True) -> None:
"""
Set the scaling factor of the current batch size. It is up to the
application to invoke this function to make sure that AdaScale's
scaling factor matches the actual batch size used during training.
Args:
scale (float):
New scaling factor to be applied to AdaScale.
update_estimate (bool):
Whether to update the scale-depenent estimate of gradient
variance; this is highly recommended. (default: True)
"""
assert self._local_grad_sqr is None, "Don't change scale in backward phase"
assert scale >= 1, "Scale must be at least 1"
if update_estimate and hasattr(self, "_scale"):
assert self._scale >= 1, "bug: old scale isn't valid"
# Rescale grad_var_avg to account for the change in scale
if self._debias_ewma and "grad_var_avg_biased" in self._state:
self._state["grad_var_avg_biased"] *= self._scale / scale
elif "grad_var_avg_total" in self._state: # _debias_ewma==False
self._state["grad_var_avg_total"] *= self._scale / scale
self._state["grad_var_avg"] *= self._scale / scale
self._scale = scale
def _grad_sqr_avg(self, pg_idx: Optional[int] = None) -> float:
"""
Current estimate of the squared l2-norm of the true gradient
(sigma squared in the AdaScale paper).
Args:
pg_idx (Optional[int]):
Optional index for a parameter group.
Returns:
(float):
Estimate of squared l2-norm.
"""
if pg_idx is not None:
return self._state["grad_sqr_avg"][pg_idx]
else:
return float(np.sum(self._state["grad_sqr_avg"]))
def _grad_var_avg(self, pg_idx: Optional[int] = None) -> float:
"""
Current estimate of the trace of the covariance of the true gradient
(mu squared in the AdaScale paper).
Args:
pg_idx (Optional[int]):
Optional index for a parameter group.
Returns:
(float):
Estimate of trace of the covariance.
"""
if pg_idx is not None:
return self._state["grad_var_avg"][pg_idx]
else:
return float(np.sum(self._state["grad_var_avg"]))
def gain(self, pg_idx: Optional[int] = None) -> float:
"""
Current estimate of the AdaScale gain ratio (r_t in the paper).
Args:
pg_idx (int):
Optional index of a parameter group.
Default None: returns "averaged" gain for all groups.
Returns:
(float):
Estimate of gain ratio.
"""
var = self._grad_var_avg(pg_idx)
sqr = self._grad_sqr_avg(pg_idx)
gain = (var + sqr) / (var / self.scale + sqr)
return gain
def _update_avg(self, name: str, value: np.ndarray, factor: float) -> None:
if self._debias_ewma:
# This function computes and stores the moving average of a vector
# using a smoothing factor.
biased = self._state.get(name + "_biased", np.zeros(value.shape[0]))
unbias = self._state.get(name + "_unbias", np.zeros(value.shape[0]))
biased = factor * biased + (1.0 - factor) * value
unbias = factor * unbias + (1.0 - factor)
self._state[name + "_biased"] = biased
self._state[name + "_unbias"] = unbias
self._state[name] = biased / unbias
else:
# Moving average procedure described in Appendix B.3
# For iterations t < 1 / (1 - smoothing) define grad_var_avg
# and grad_sqr_avg as mean of the past samples. After that
# start using running average.
#
# Note: we only keep a single _count for all parameter groups.
# Ideally, it should be a vector and in case a PG is added
# after some iterations are done. But, then the if condition
# below will need to be a np.where. I leave this corner
# case to a future exercise.
count = self._state.get(name + "_count", np.zeros(1))
count[0] += 1
self._state[name + "_count"] = count
if count < 1 / (1 - self._smoothing):
total = self._state.get(name + "_total", None)
if total is None:
total = value
else:
total += value
self._state[name + "_total"] = total
self._state[name] = total / count
else:
self._state[name] = factor * self._state[name] + (1.0 - factor) * value
def _gather_flat_grad(self) -> torch.Tensor:
"""
Helper function for gathering all gradients into a single vector.
Duplicated from torch.optim.lbfgs.
"""
def _to_flat_view(p: torch.Tensor) -> torch.Tensor:
"""
Local helper function for _gather_flat_grad.
Returns a flattened view of the input tensor.
"""
if p.grad is None:
return p.new(p.numel()).zero_() # type: ignore
elif p.grad.is_sparse: # type: ignore
return p.grad.to_dense().view(-1)
else:
return p.grad.view(-1)
views = [_to_flat_view(p) for param_group in self._optimizer.param_groups for p in param_group["params"]]
return torch.cat(views, 0)
def _compute_intra_grad_corr_mean(self) -> torch.Tensor:
"""
Helper function for computing average intra correlation among gradients on different GPUs.
This should be called under `model.no_sync()` context.
"""
assert self._world_size > 1, "Only for distributed training"
flat_grad = self._gather_flat_grad()
corr_mean = torch.tensor(0.0).cuda()
if dist.get_rank() == 0:
size = flat_grad.numel()
gathered_tensors = [torch.zeros(size, device=0) for _ in range(self._world_size)]
dist.gather(flat_grad, gather_list=gathered_tensors, dst=0)
# the following requires torch 1.10+
corr = torch.stack(gathered_tensors).corrcoef() # type: ignore
# pick out the upper triangular part of the correlation matrix
corr = corr[torch.triu(torch.ones_like(corr), diagonal=1) == 1]
corr_mean = corr.mean()
else:
dist.gather(flat_grad, gather_list=None, dst=0)
dist.broadcast(corr_mean, src=0)
return corr_mean
def _backward_hook(self, pg_idx: int, grad: torch.Tensor) -> None:
# This method should be invoked once for each parameter during the
# backward pass, before gradients are synchronized between world_size.
# Store the local gradient square sums in a vector.
# This vector is also used for error checking. Whenever it is not None,
# it means that we are in backward pass.
if self._local_grad_sqr is None:
self._local_grad_sqr = torch.zeros(
len(self._optimizer.param_groups),
device=grad.device,
requires_grad=False,
)
self._local_grad_sqr[pg_idx] += grad.pow(2).sum()
# Now, ensure we queue a callback at the end of the callback queue.
# This will fire after all gradient callbacks are done (esp. those
# queued by DDP.
self._final_callback_queued = False
Variable._execution_engine.queue_callback(self._queue_callback)
def _queue_callback(self) -> None:
# This method should be invoked after the entire backward pass. We want
# to make sure self._final_callback is invoked once, only after all
# gradients have been synchronized between each worker. However, the
# synchronization code in DistributedDataParallel is also done in a
# callback, which might not yet be executed. Therefore, we enqueue
# self._final_callback from this method, which should ensure it is
# invoked after the gradient synchronization callback.
if self._final_callback_queued:
return
self._final_callback_queued = True
Variable._execution_engine.queue_callback(self._final_callback)
def _final_callback(self) -> None:
# This method should be invoked once for each backward pass, after
# gradients have been synchronized between each worker, unless we
# are in gradient accumulation mode, where grads are not all_reduced
# between the GPUs.
self._final_callback_queued = False
assert isinstance(self._local_grad_sqr, torch.Tensor)
# Keep track of number of backward calls for gradient accumulation.
# TODO (min): this may not work with activation checkpointing when
# multiple backward calls happen in a big backward.
self._num_backward_calls += 1
# TODO (min, mike): We need to have a way to check that training loop & DDP
# is doing the right thing where the gradient is reduced
# in this backward pass.
# Longer term, we may compute the gain and then inform
# the training loop when it is a good time to step().
assert (
self._num_backward_calls - self._last_final_backward_call
) <= self._num_grads_to_accum, (
f"bug: {self._num_backward_calls} - {self._last_final_backward_call} should <= {self._num_grads_to_accum}"
)
if (self._num_backward_calls - self._last_final_backward_call) % self._num_grads_to_accum != 0:
assert self._local_grad_sqr is not None, "We should still be in backward phase"
return
# Since self._local_grad_sqr is FP32, sum shouldn't overflow.
# This vector has length of # of param_groups, so it is small, but we
# use async to hide the all_reduce latency, esp when # of nodes is large.
work = None
if self._world_size > 1:
work = dist.all_reduce(self._local_grad_sqr, async_op=True) # SUM
# Compute the sums of squares for reduced gradients.
# Divide by _num_grads_to_accum since the gradients are accumulated.
total_grad_sqr = np.array(
[sum(param.grad.pow(2).sum().item() for param in group["params"]) for group in self._optimizer.param_groups]
)
# Wait for all_reduce to be done and move it to cpu & np.
if work:
work.wait()
local_grad_sqr = self._local_grad_sqr.cpu().numpy()
if self._num_grads_to_accum > 1:
# Handle scaling for for gradient accumulation
if self._is_scaled_loss:
# If loss is scaled down, we need to scale the local gradients back by a factor of _num_grads_to_accum squared;
# total_grad_sqr is already scaled by _num_grads_to_accum squared.
local_grad_sqr *= self._num_grads_to_accum**2
else:
# If loss is not scaled, local gradients are correct, but we need to scale the total_grad_sqr down to account for gradient accumulation.
total_grad_sqr /= self._num_grads_to_accum**2
# See appendix B.3 of the paper.
# Modified to handle cases where scale != world_size
#
# local_grad_sqr is \sum_{i=1}^{c N} \norm{g_t_i}^2
# where N is world size and c is num_grads_to_accum
# total_grad_sqr is \norm{\bar{g}_t}^2
S = self._scale
cN = self._world_size * self._num_grads_to_accum
grad_var = local_grad_sqr * (S / cN) / (cN - 1) - total_grad_sqr * S / (cN - 1)
grad_sqr = total_grad_sqr - grad_var / S
grad_var = np.maximum(grad_var, 1e-6)
grad_sqr = np.maximum(grad_sqr, 0.0)
self._update_avg("grad_sqr_avg", grad_sqr, self.smoothing)
self._update_avg("grad_var_avg", grad_var, self.smoothing)
self._last_final_backward_call = self._num_backward_calls
# Indicating backward is done.
self._local_grad_sqr = None
def step(self, *args: Any, **kwargs: Any) -> Optional[float]:
"""
Run one optimizer step using Adascale. Essentially just invokes
``optimizer.step(*args, **kwargs)`` with a scaled learning rate.
.. note::
It is possible that this function becames a performance
bottleneck if you have frequent updates. To avoid that,
making bigger steps and reducing update frequency is generally
better for performance.
Args:
args (Any):
Positional arguments passed to ``optimizer.step``.
kwargs (Any):
Keyword arguments passed to ``optimizer.step``.
Returns:
(Tensor):
The loss tensor if a closure if used to re-evaluate the model.
"""
assert self._local_grad_sqr is None, "Don't step without finishing backward phase"
# Set original LR and set new LR.
original_lr = []
for idx, param_group in enumerate(self._optimizer.param_groups):
original_lr.append(param_group["lr"])
param_group["lr"] *= self.gain(pg_idx=idx)
# Step it.
res = self._optimizer.step(*args, **kwargs)
# Restore the original LR.
for lr, param_group in zip(original_lr, self._optimizer.param_groups):
param_group["lr"] = lr
return res
def add_param_group(self, pg: Dict) -> None:
"""Support adding parameter groups
We need to re-size some of the state and re-register the backward hooks.
"""
assert self._local_grad_sqr is None, "Can't add parameter group during backward"
self._optimizer.add_param_group(pg)
# Update the hooks.
self.unhook()
self._hook()
# Extend the states.
for name in self._state.keys():
assert name.startswith("grad_sqr_avg") or name.startswith("grad_var_avg"), name
if name.endswith("_count"):
# This is the "_count" variable, should be a 1D int.
assert self._state[name].shape == (1,), self._state[name].shape
continue
# must be a np array, extend it with the right value and check the shape.
val = 1 if name == "grad_sqr_avg" else 0
self._state[name] = np.append(self._state[name], val) # type: ignore
assert self._state[name].shape == (len(self._optimizer.param_groups),)
def zero_grad(self) -> None:
"""Proxy function to optimizer, because some training loops need this."""
assert self._local_grad_sqr is None, "Don't zero_grad in backward"
return self._optimizer.zero_grad()
def state_dict(self) -> Dict:
"""Proxy function to optimizer, checkpointing needs this.
.. note::
Do NOT checkpoint in the middle of gradient accumulation since
associated AdaScale internal states are not saved in the checkpoint.
"""
assert self._local_grad_sqr is None, "Don't checkpoint in backward"
return self._optimizer.state_dict()
def load_state_dict(self, data: Dict) -> None:
"""Proxy function to optimizer, checkpointing needs this.
.. note::
Do NOT checkpoint in the middle of gradient accumulation since
associated AdaScale internal states are not saved in the checkpoint.
"""
assert self._local_grad_sqr is None, "Don't load checkpoint in backward"
return self._optimizer.load_state_dict(data)
def set_num_gradients_to_accumulate(
self,
num_gradients_to_accumulate: int,
update_smoothing: bool = True,
) -> None:
"""Set the number of gradients to accumulate to a new value.
This is experimental. This could be called while training so that
we can gradually increasing the steps between updates. Almost always,
`set_scale` needs to be called to update the scale as well.
TODO (min): need a way of determine how much to increase the step size?
TODO (min): have both `set_scale` and `set_num_gradients_to_accumulate`
is hard to use and easy to make mistake. I think it is better
to specific a specify a `base_scale`. But more discussion is
needed here.
Args:
num_gradients_to_accumulate (int):
Number of gradients to accumulate (calls to backward) between
each optimizer step
update_smoothing (bool):
Whether to update smoothing factor or not. Default: True.
"""
assert self._local_grad_sqr is None, "Don't change num_grad_to_accum in backward"
assert num_gradients_to_accumulate >= 1, f"Invalid value {num_gradients_to_accumulate}"
self._num_grads_to_accum = num_gradients_to_accumulate
if update_smoothing:
# Set smoothing based on effective world_size rather than scale here,
# since world_size determines the number of samples being averaged over
# at every update.
#
# When effective world size is large enough, smoothing is probably
# not needed, so the smoothing factor is 0.
self._smoothing = max(1 - self._world_size * self._num_grads_to_accum / 1000, 0)
def scale_grad_by_num_grads_to_accum(self) -> None:
"""Scale the gradient down by the number of gradients to accumulate.
This should be called after the gradient accumulation is done and the unscaled loss is used.
"""
assert self._local_grad_sqr is None, "Only call this after backward"
assert self._num_grads_to_accum > 1, "Must be accumulating gradients"
assert not self._is_scaled_loss, "Must use unscaled loss"
for group in self._optimizer.param_groups:
for param in group["params"]:
param.grad.div_(self._num_grads_to_accum)
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped optimizer."""
try:
return super().__getattr__(name) # defer to Optimizer logic
except AttributeError:
return getattr(self._optimizer, name) # fallback to wrapped optim
class AdaScaleWrapper(AdaScale):
"""
A thin wrapper for AdaScale so that the constructor resembles a
standard optimizer. This allows it to work with other Optimizer
Wrappers, like `OSS`.
.. warn::
OSS(AdaScaleWrapper) (i.e. OSS wrapping AdaScale) resulting in each
rank's AdaScale operates on different set of parameters. They
will get different gain values and it is unclear how to adjust
effective step size in that case. We have not validated effectiveness
or benefit in this case.
OTOH, AdaScale(OSS) (i.e. AdaScale wrapping OSS) is recommended
and is numerically identical to AdaScale without OSS. Since
AdaScale doesn't incur per-parameter state, the memory benefit
of OSS is still the same.
Args:
params (list of tensors):
parameters to be optimized
optim (class subtyping torch.optim.Optimizer):
a optimizer class to be wrapped.
additional_optim_args (argument dict):
keyward arguments to the `optim` class above.
The rest params are in-sync with the `AdaScale` class above.
"""
def __init__(
self,
params: _params_t,
world_size: Optional[int] = None,
scale: Optional[float] = None,
smoothing: float = None,
num_gradients_to_accumulate: int = 1,
debias_ewma: bool = True,
optim_cls: Type[Optimizer] = SGD,
**additional_optim_args: Any,
):
optim_obj = optim_cls(params, **additional_optim_args)
super().__init__(optim_obj, world_size, scale, smoothing, num_gradients_to_accumulate, debias_ewma)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
#
#
# We need to have __init__.py in tests dir due to a pytest issue.
#
# if you have:
# tests/
# aa/test_name.py
# bb/test_name.py
#
# running `pytest tests` will give an error like "import file mismatch"
# because it can't distinguish between the file in `aa` and `bb` with
# the same file name. Add __init__.py file fixes it.
#
# However, `pytest tests/__init__.py` triggers running tests that's
# not related. So we just don't include any __init__.py in the test
# list files.
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from fairscale.experimental.tooling.layer_memory_tracker import (
LayerwiseMemoryTracker,
ProcessGroupTracker,
find_best_reset_points,
)
from fairscale.fair_dev.testing.testing import GPT2, dist_init, skip_if_no_cuda, skip_if_single_gpu, temp_files_ctx
from fairscale.nn import FullyShardedDataParallel
@skip_if_no_cuda()
def test_memory_tracking_traces():
"""
Minimal test case to check that we can collect memory traces
outside of the context of distributed training (DDP or FSDP)
"""
# Create a model with a hierarchy of modules
torch.manual_seed(0)
model = nn.Sequential(
nn.Sequential(
nn.Conv2d(3, 64, kernel_size=(3, 3), padding=(1, 1), bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=False),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
),
nn.Flatten(start_dim=1),
nn.Sequential(nn.Linear(64, 2), nn.ReLU(inplace=True)),
).cuda()
# Track a fake forward / backward
tracker = LayerwiseMemoryTracker()
tracker.monitor(model)
x = torch.randn(size=(2, 3, 224, 224)).cuda()
target = torch.LongTensor([0, 1]).cuda()
criterion = nn.CrossEntropyLoss()
criterion(model(x), target).backward()
# Verify that only leaf modules are tracked and that the order
# of the traces is consistent with backward/forward
tracked_names = [t.module_name for t in tracker.memory_traces]
expected_names = ["0.0", "0.1", "0.2", "0.3", "1", "2.0", "2.1"]
assert set(expected_names) == set(tracked_names)
assert tracked_names == (expected_names + expected_names[::-1])
# Verify that memory tracking for ReLU is sound
assert (
2 * 64 * 224 * 224 * 4 == tracker.forward_traces[2].event.memory_activations
), "ReLU(inplace=False) should allocate activations"
assert 0 == tracker.forward_traces[6].event.memory_activations, "ReLU(inplace=True) should NOT allocate activations"
# Verify that overall memory tracking is sound
summary = tracker.summary
assert summary.total_forward_allocations >= summary.total_activation_allocations
# Verify that the identification of top memory activation producer works:
# these are the first layers, all allocating (2, 64, 224, 224) feature maps
top_act_producers = summary.top_forward_activation_producers[:3]
assert "0.0" == top_act_producers[0].module_name
assert "0.1" == top_act_producers[1].module_name
assert "0.2" == top_act_producers[2].module_name
assert 3 * 3 * 64 * 3 * 4 == top_act_producers[0].module_params
assert 64 * 2 * 4 == top_act_producers[1].module_params
assert 0 == top_act_producers[2].module_params
for trace in top_act_producers:
assert 2 * 64 * 224 * 224 * 4 == trace.event.memory_activations
@skip_if_no_cuda
def test_memory_tracking_nlp_model():
"""
Check that we can collect memory traces of a realistic model
outside of the context of distributed training (DDP or FSDP)
"""
BACH_SIZE = 10
INPUT_DIM = 16
model = GPT2(
embed_dim=256, num_heads=2, num_layers=6, num_positions=INPUT_DIM * INPUT_DIM, num_vocab=512, num_classes=2
).cuda()
tracker = LayerwiseMemoryTracker()
tracker.monitor(model)
input_tensor = torch.randint(10, (BACH_SIZE, INPUT_DIM)).cuda()
output = model(input_tensor)
output.sum().backward()
assert len(tracker.memory_traces) > 0, "failed to collected memory traces"
assert len(tracker.forward_traces) > 0, "failed to collect forward memory traces"
assert len(tracker.backward_traces) > 0, "failed to collect backward memory traces"
assert tracker.summary.total_activation_allocations == 12462080
@skip_if_single_gpu
def test_memory_tracking_ddp():
"""
Check that we can collect memory traces of a simplistic model
in the context of DDP distributed training
"""
with temp_files_ctx(num=2) as sync_files:
world_size = 2
mp.spawn(
_layer_memory_tracking_ddp_worker,
(sync_files, world_size),
nprocs=world_size,
)
def _layer_memory_tracking_ddp_worker(gpu_id: int, sync_files: Tuple[str, str], world_size: int):
dist_init(world_size=world_size, rank=gpu_id, filename=sync_files[0], filename_rpc=sync_files[1])
torch.backends.cudnn.deterministic = True
# Create different inputs on each GPU
batch_size = 16
torch.manual_seed(gpu_id)
fake_inputs = torch.randn(size=(batch_size, 10)).cuda(gpu_id)
fake_targets = torch.randn(size=(batch_size, 10)).cuda(gpu_id)
fake_criterion = nn.MSELoss()
# Create a simple model
torch.manual_seed(0)
torch.cuda.manual_seed(0)
model = nn.Sequential(
nn.Linear(10, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 10),
)
model = model.cuda(gpu_id)
ddp_model = DistributedDataParallel(model, device_ids=[gpu_id])
# Track the model on a forward / backward pass
tracker = LayerwiseMemoryTracker()
tracker.monitor(ddp_model)
fake_criterion(ddp_model(fake_inputs), fake_targets).backward()
tracker.stop()
# Check the overall structure of the collected traces
forward_names = [f"module.{i}" for i in range(5)]
backward_names = [f"module.{i}" for i in reversed(range(5))]
trace_names = [t.module_name for t in tracker.memory_traces]
assert trace_names == (forward_names + backward_names)
@skip_if_single_gpu
def test_memory_tracking_fsdp():
"""
Check that we can collect memory traces of a simplistic model
in the context of FSDP distributed training
"""
with temp_files_ctx(num=2) as sync_files:
world_size = 2
mp.spawn(
_layer_memory_tracking_fsdp_worker,
(sync_files, world_size),
nprocs=world_size,
)
def _layer_memory_tracking_fsdp_worker(gpu_id: int, sync_files: Tuple[str, str], world_size: int):
dist_init(world_size=world_size, rank=gpu_id, filename=sync_files[0], filename_rpc=sync_files[1])
torch.backends.cudnn.deterministic = True
# Create different inputs on each GPU
batch_size = 16
torch.manual_seed(gpu_id)
fake_inputs = torch.randn(size=(batch_size, 10)).cuda(gpu_id)
fake_targets = torch.randn(size=(batch_size, 10)).cuda(gpu_id)
fake_criterion = nn.MSELoss()
# Create a global group and a tracker around it
group = dist.new_group()
group = ProcessGroupTracker(group)
# Create a simple model
torch.manual_seed(0)
torch.cuda.manual_seed(0)
model = nn.Sequential(
nn.Linear(10, 10).cuda(gpu_id),
nn.ReLU(),
FullyShardedDataParallel(
nn.Linear(10, 10).cuda(gpu_id),
flatten_parameters=False,
process_group=group,
),
nn.ReLU(),
FullyShardedDataParallel(
nn.Linear(10, 10).cuda(gpu_id),
flatten_parameters=True,
process_group=group,
),
)
model = model.cuda(gpu_id)
dist_model = FullyShardedDataParallel(model, flatten_parameters=False, process_group=group)
# Track the model on a forward / backward pass
tracker = LayerwiseMemoryTracker()
tracker.monitor(dist_model)
fake_criterion(dist_model(fake_inputs), fake_targets).backward()
tracker.stop()
# Check results of all gathers tracking (feature specific to FSDP)
all_gathered_traces = [
(t.module_name, t.all_gathered, t.cumul_all_gathered) for t in tracker.memory_traces if t.all_gathered > 0
]
assert all_gathered_traces == [
("_fsdp_wrapped_module._fpw_module.0", 440, 440),
("_fsdp_wrapped_module._fpw_module.2._fsdp_wrapped_module._fpw_module", 440, 880),
("_fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module._fpw_module", 440, 880),
("_fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module._fpw_module", 440, 0),
("_fsdp_wrapped_module._fpw_module.2._fsdp_wrapped_module._fpw_module", 440, 0),
], all_gathered_traces
def test_find_best_reset_points():
"""
Verify that the reset points are correctly computed
"""
activations = [10, 8, 8, 9, 7, 7, 5, 4, 4]
# Check boundary condition: no checkpoints
memory, split_points = find_best_reset_points(activations, num_checkpoints=0)
assert memory == sum(activations)
# Check boundary condition: checkpoints everywhere
memory, split_points = find_best_reset_points(activations, num_checkpoints=len(activations))
assert memory == max(activations)
# Check one checkpoint allocation
memory, split_points = find_best_reset_points(activations, num_checkpoints=1)
assert memory == 35
assert split_points == [4]
assert sum(activations[: split_points[0]]) == 35
assert sum(activations[split_points[0] :]) == 27
# Check multiple checkpoint allocation
memory, split_points = find_best_reset_points(activations, num_checkpoints=2)
assert memory == 24
delimiters = [0] + split_points + [len(activations)]
splits_memory = [sum(activations[i:j]) for i, j in zip(delimiters[:-1], delimiters[1:])]
assert max(splits_memory) == memory
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import os
import pytest
import torch
from fairscale.experimental.nn import MEVO
from fairscale.experimental.nn.mevo import BaselineSoftmaxNllLoss, get_data
from fairscale.fair_dev.testing.testing import skip_if_no_cuda
@pytest.fixture(scope="session", params=[torch.float16, torch.float32])
def input_data(request):
shape = ((2, 3), (3, 4))
return get_data(shape, dtype=request.param)
_dense_out = {} # type: ignore
_dense_grad = {} # type: ignore
@skip_if_no_cuda
def test_mevo_eval():
"""Test eval mode without target tensor"""
weight = torch.nn.Linear(3, 4).cuda().weight
input = torch.rand(1, 5, 3).cuda()
k = MEVO(weight)
k.eval()
out = k(input, None)
assert out.shape == (1, 5, 4)
# Note for the lmcl_scale, overly large value, like 64 for small shape input
# will cause inf/nan in mevo. Larger scale value is only needed for large shape inputs.
@skip_if_no_cuda
@pytest.mark.parametrize("lmcl_scale", [None, 8])
def test_mevo(lmcl_scale):
"""Test the MEVO kernel in a single process (no DDP/FSDP)."""
# Set seed and reset peak mem so that peak measure below is correct.
torch.random.manual_seed(os.getpid())
torch.cuda.reset_peak_memory_stats()
shape = ((5, 3), (3, 7))
# Turn on large data for local testing.
large = False
if large:
shape = ((1 * 2048, 4096), (4096, 256008))
print("\nshapes are", shape)
input, weight, target = get_data(shape, dtype=torch.float16)
k = MEVO(weight, tile_factor=16, scale=lmcl_scale)
o = k(input, target)
o.backward()
print("MEVO loss", o, o.shape)
del o
cur_mem = round(torch.cuda.memory_allocated() / 1024 / 1024)
mem = round(torch.cuda.max_memory_allocated() / 1024 / 1024)
print("cur and peak mem for tiled fwd+bwd =", cur_mem, mem)
assert input.shape == input.grad.shape
input_data = input.data.cpu()
input_grad1 = input.grad.cpu()
del input
cur_mem = round(torch.cuda.memory_allocated() / 1024 / 1024)
mem = round(torch.cuda.max_memory_allocated() / 1024 / 1024)
print("after moving input and its grad, cur and peak mem for tiled fwd+bwd =", cur_mem, mem)
print("MEVO grad norm and grad", weight.grad.norm(), weight.grad)
g1 = weight.grad.clone()
weight.grad = None
input = input_data.cuda().requires_grad_(True)
refk = BaselineSoftmaxNllLoss(weight, scale=lmcl_scale)
o = refk(input, target)
o.backward()
print("Reference loss", o, o.shape)
del o
print("Reference grad norm and grad", weight.grad.norm(), weight.grad)
g2 = weight.grad.clone()
input_grad2 = input.grad.cpu()
# Print the diff. We use .cuda() since in torch 1.7 and 1.8, min() and max() are not
# implemented for cpu float16. The diff should in general be below 0.01 in magnitude.
diff = g1 - g2
print("weight grad diff", diff.cuda().min(), diff.cuda().max())
diff = input_grad1 - input_grad2
print("input grad diff", diff.cuda().min(), diff.cuda().max())
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import tempfile
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from fairscale.experimental.nn import SyncBatchNorm
from fairscale.nn.checkpoint import checkpoint_wrapper
pytestmark = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def pg_worker(rank, world_size, init_file, func, *args):
dist.init_process_group(dist.Backend.NCCL, init_method="file://" + init_file, rank=rank, world_size=world_size)
func(*args)
dist.destroy_process_group()
def pg_test(world_size=torch.cuda.device_count()):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
mp.spawn(pg_worker, args=(world_size, tempfile.mkstemp()[1], func, *kwargs.values()), nprocs=world_size)
globals()["test_" + func.__name__] = wrapper
return func
return decorator
def check_parity(torch_bn, fs_bn, x):
yh = torch.randn_like(x)
torch_x = x.detach()
torch_x.requires_grad = True
torch_y = torch_bn(torch_x)
torch_y.backward(yh)
fs_x = x.detach()
fs_x.requires_grad = True
fs_y = fs_bn(fs_x)
fs_y.backward(yh)
torch.testing.assert_allclose(torch_y, fs_y)
torch.testing.assert_allclose(torch_bn.running_mean, fs_bn.running_mean)
torch.testing.assert_allclose(torch_bn.running_var, fs_bn.running_var)
torch.testing.assert_allclose(torch_bn.weight, fs_bn.weight)
torch.testing.assert_allclose(torch_bn.bias, fs_bn.bias)
torch.testing.assert_allclose(torch_bn.weight.grad, fs_bn.weight.grad)
torch.testing.assert_allclose(torch_bn.bias.grad, fs_bn.bias.grad)
torch.testing.assert_allclose(torch_x.grad, fs_x.grad)
def check_parity_ddp(torch_bn, fs_bn, x):
yh = torch.randn_like(x)
rank = dist.get_rank()
torch_ddp = DDP(torch_bn, device_ids=[rank])
torch_bn = torch_ddp.module
torch_x = x.detach()
torch_x.requires_grad = True
torch_y = torch_ddp(torch_x)
torch_y.backward(yh)
fs_ddp = DDP(fs_bn, device_ids=[rank])
fs_bn = fs_ddp.module
fs_x = x.detach()
fs_x.requires_grad = True
fs_y = fs_ddp(fs_x)
fs_y.backward(yh)
torch.testing.assert_allclose(torch_y, fs_y)
torch.testing.assert_allclose(torch_x.grad, fs_x.grad)
if isinstance(torch_bn, nn.Sequential):
torch_bn = torch_bn[0]
fs_bn = fs_bn[0]
torch.testing.assert_allclose(torch_bn.running_mean, fs_bn.running_mean)
torch.testing.assert_allclose(torch_bn.running_var, fs_bn.running_var)
torch.testing.assert_allclose(torch_bn.weight, fs_bn.weight)
torch.testing.assert_allclose(torch_bn.bias, fs_bn.bias)
torch.testing.assert_allclose(torch_bn.weight.grad, fs_bn.weight.grad)
torch.testing.assert_allclose(torch_bn.bias.grad, fs_bn.bias.grad)
@pg_test(world_size=1)
def parity3d_bn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4, 4, 4).cuda()
torch_bn = torch.nn.BatchNorm3d(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
check_parity(torch_bn, fs_bn, x)
@pytest.mark.skip("broken at head")
def test_parity3d_checkpoint_syncbn():
assert 1 == 2
# @pg_test()
def parity3d_checkpoint_syncbn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4, 4, 4).cuda() * rank
torch_bn = torch.nn.SyncBatchNorm(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
fs_bn = checkpoint_wrapper(fs_bn)
check_parity_ddp(torch_bn, fs_bn, x)
@pytest.mark.skip("broken at head")
def test_parity3d_checkpoint_syncbn_twice():
assert 1 == 2
# @pg_test()
def parity3d_checkpoint_syncbn_twice():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4, 4, 4).cuda() * rank
torch_bn = torch.nn.SyncBatchNorm(3)
torch_bn = nn.Sequential(torch_bn, torch_bn).cuda()
fs_bn = SyncBatchNorm(3)
fs_bn = nn.Sequential(fs_bn, fs_bn).cuda()
fs_bn = checkpoint_wrapper(fs_bn)
check_parity_ddp(torch_bn, fs_bn, x)
@pg_test()
def parity3d_syncbn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4, 4, 4).cuda() * rank
torch_bn = torch.nn.SyncBatchNorm(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
check_parity_ddp(torch_bn, fs_bn, x)
@pg_test(world_size=1)
def parity2d_bn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4, 4).cuda()
torch_bn = torch.nn.BatchNorm2d(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
check_parity(torch_bn, fs_bn, x)
@pg_test()
def parity2d_syncbn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4, 4).cuda() * rank
torch_bn = torch.nn.SyncBatchNorm(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
check_parity_ddp(torch_bn, fs_bn, x)
@pg_test(world_size=1)
def parity1d_bn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4).cuda()
torch_bn = torch.nn.BatchNorm1d(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
check_parity(torch_bn, fs_bn, x)
@pg_test()
def parity1d_syncbn():
rank = dist.get_rank()
torch.cuda.set_device(rank)
torch.manual_seed(rank)
x = torch.randn(4, 3, 4).cuda()
torch_bn = torch.nn.SyncBatchNorm(3).cuda()
fs_bn = SyncBatchNorm(3).cuda()
check_parity_ddp(torch_bn, fs_bn, x)
@pg_test()
def memory_allocated():
rank = dist.get_rank()
torch.cuda.set_device(rank)
x = torch.randn(50, 2048, 7, 7).to(rank)
torch_bn = torch.nn.SyncBatchNorm(2048).cuda()
torch_bn = DDP(torch_bn, device_ids=[rank])
fs_bn = SyncBatchNorm(2048).cuda()
fs_bn = DDP(fs_bn, device_ids=[rank])
torch_x = x.detach()
torch_x.requires_grad = True
fs_x = x.detach()
fs_x.requires_grad = True
torch.cuda.empty_cache()
mem_at_start = torch.cuda.memory_stats()["allocated_bytes.all.current"]
torch_y = torch_bn(torch_x)
torch.cuda.empty_cache()
mem_after_torch = torch.cuda.memory_stats()["allocated_bytes.all.current"]
fs_y = fs_bn(fs_x)
torch.cuda.empty_cache()
mem_final = torch.cuda.memory_stats()["allocated_bytes.all.current"]
torch_used = mem_after_torch - mem_at_start
fs_used = mem_final - mem_after_torch
assert fs_used < (torch_used * 1.01), f"{fs_used} < {torch_used * 1.01}"
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing MultiProcessPipe Module
"""
import functools
import tempfile
from typing import Any, Dict, List, NamedTuple, Tuple
import pytest
import torch
import torch.distributed.autograd as dist_autograd
from torch.distributed.nn import RemoteModule
from torch.distributed.optim import DistributedOptimizer
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.nn as nn
from fairscale.experimental.nn.distributed_pipeline import DistributedLoss, DistributedPipeline, PipelineModulesGraph
from fairscale.fair_dev.testing.testing import skip_due_to_flakyness, skip_if_single_gpu
from fairscale.internal import torch_version
pytestmark = pytest.mark.skipif(
not torch.cuda.is_available() or torch_version() < (1, 9, 0),
reason="CPU tests fail right now and all tests require torch version >= 1.9.0.",
)
CPU_DEVICES = ["worker0/cpu", "worker1/cpu"]
GPU_DEVICES = ["worker0/cuda:0", "worker1/cuda:1"]
if torch.cuda.is_available():
DEVICES = [CPU_DEVICES, GPU_DEVICES]
else:
DEVICES = [CPU_DEVICES]
def rpc_worker(rank, world_size, init_file, func, *args):
options = rpc.TensorPipeRpcBackendOptions(init_method="file://" + init_file)
for i in range(world_size):
options.set_device_map("worker" + str(i), {rank: i})
rpc.init_rpc(
"worker" + str(rank),
rank=rank,
world_size=world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=options,
)
if rank == 0:
func(*args)
rpc.shutdown()
class RemoteModuleParams(NamedTuple):
module_cls: nn.Module
args: Tuple
kwargs: Dict[str, Any]
def create_sequence_pipeline(
layers: List[RemoteModuleParams], balance: List[int], devices: List[str], **kwargs: Any
) -> DistributedPipeline:
"""A simple helper function to create a pipeline from list of pipeline-modules that run sequentially.
Args:
layers: list of modules. They should not be already assigned a remote-device.
balance: a list of integers how layers should be paritioned. Sum of numbers in 'balance'
should be equal to the number of layers.
devices: specification of remote device for each partition. Should be of the same length
as 'balance'.
"""
remote_modules: List[RemoteModule] = []
index = 0
for num_layers, remote_device in zip(balance, devices):
next_index = index + num_layers
for li in range(index, next_index):
remote_modules.append(RemoteModule(remote_device, **layers[li]._asdict()))
index = next_index
graph = PipelineModulesGraph()
graph.add_sequence(remote_modules, [0])
return DistributedPipeline(graph, **kwargs)
def rpc_test(world_size=1):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
mp.spawn(rpc_worker, args=(world_size, tempfile.mkstemp()[1], func, *kwargs.values()), nprocs=world_size)
globals()["test_" + func.__name__] = wrapper
return func
return decorator
@rpc_test()
@pytest.mark.parametrize("devices", DEVICES)
def create(devices):
model = [RemoteModuleParams(nn.Linear, (4, 4), {})]
pipe = create_sequence_pipeline(model, balance=[1], chunks=1, devices=devices[:1])
@rpc_test()
@skip_if_single_gpu
def create_multiple_layers():
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=1, devices=["worker0/cpu", "worker0/cpu"])
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu
@skip_due_to_flakyness
def create_multiple_workers(devices):
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=1, devices=devices[:2])
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu
def parameter_rrefs(devices):
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=1, devices=devices[:2])
parameter_rrefs = pipe.parameter_rrefs()
assert len(parameter_rrefs) == 2
@rpc_test(world_size=1)
@pytest.mark.parametrize("devices", DEVICES)
def forward(devices):
yh = torch.tensor([1.0, 0.0])
x = torch.tensor([1.0, -1.0])
model = [RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1], chunks=1, devices=devices[:1])
y = pipe(x).to_here().cpu()
assert torch.equal(y, yh), f"{y} != {yh}"
@rpc_test(world_size=1)
@pytest.mark.parametrize("devices", DEVICES)
def forward_chunks(devices):
yh = torch.tensor([1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 4.0, 0.0])
x = torch.tensor([1.0, -1.0, 2.0, -2.0, 3.0, -3.0, 4.0, -4.0])
model = [RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1], chunks=4, devices=devices[:1])
y = pipe(x).to_here().cpu()
assert torch.equal(y, yh), f"{y} != {yh}"
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
@skip_if_single_gpu
def forward_multi(devices, checkpoint):
device = devices[0].split("/")[1]
torch.random.manual_seed(3)
torch.cuda.manual_seed_all(3)
x = torch.randn(8, 4).to(device)
x.requires_grad = True # TODO(msb) remove this limitation
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=4, devices=devices[:2], checkpoint=checkpoint)
y = pipe(x).to_here()
expected_sum = torch.tensor(5.0615)
assert y.shape == torch.Size([8, 4])
assert y.requires_grad is True
assert torch.allclose(y.sum(), expected_sum), f"{y.sum()} != {expected_sum}"
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu
def backward(devices):
device = devices[0].split("/")[1]
torch.random.manual_seed(3)
criterion = DistributedLoss(torch.nn.MSELoss)
x = torch.randn(8, 4).to(device)
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=4, devices=devices[:2])
with dist_autograd.context() as context_id:
y = pipe(x)
loss = criterion(y, rpc.RRef(x))
loss.backward(context_id)
grads = dist_autograd.get_gradients(context_id)
assert len(grads) == 2
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu
def update(devices):
device = devices[0].split("/")[1]
torch.random.manual_seed(3)
criterion = DistributedLoss(torch.nn.MSELoss)
x = torch.randn(8, 4).to(device)
model = [RemoteModuleParams(nn.Linear, (4, 4), {}), RemoteModuleParams(nn.ReLU, (), {})]
pipe = create_sequence_pipeline(model, balance=[1, 1], chunks=4, devices=devices[:2])
opt = DistributedOptimizer(
torch.optim.SGD,
pipe.parameter_rrefs(),
lr=0.05,
)
losses = []
for i in range(2):
with dist_autograd.context() as context_id:
y = pipe(x)
loss = criterion(y, rpc.RRef(x))
losses.append(loss)
loss.backward(context_id)
opt.step(context_id)
losses = [l.to_here() for l in losses]
assert losses[0] > losses[1], f"{losses[0]} !> {losses[1]}"
class ConcatenateTensors(nn.Module):
def forward(self, *inputs):
return torch.cat(inputs, dim=1)
class SplitTensors(nn.Module):
def forward(self, input):
return torch.split(input, (input.shape[1] + 1) // 2, dim=1)
def extract_partitions(graph: PipelineModulesGraph, pipeline: DistributedPipeline) -> List[List[int]]:
return [list(map(graph.nodes.index, p.nodes)) for p in pipeline.partitions]
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu
def multi_input_multi_output_layers(devices):
device = devices[0].split("/")[1]
torch.random.manual_seed(3)
criterion = DistributedLoss(torch.nn.MSELoss)
x = torch.randn(8, 4).to(device)
# / ->linear_layer_2_1
# input -> linear_layer1 -> split ->concatenate
# \ ->linear_layer_2_2
linear_layer_1 = RemoteModule(devices[0], nn.Linear, (4, 4), {})
split = RemoteModule(devices[0], SplitTensors, (), {})
linear_layers_2 = [
RemoteModule(devices[0], nn.Linear, (2, 2), {}),
RemoteModule(devices[1], nn.Linear, (2, 2), {}),
]
concatenate = RemoteModule(devices[1], ConcatenateTensors, ())
graph = PipelineModulesGraph()
graph.add_sequence([linear_layer_1, split], [0], 2)
for i, l in enumerate(linear_layers_2):
graph.add_layer(l, [(split, i)])
graph.add_layer(concatenate, linear_layers_2)
pipe = DistributedPipeline(graph, chunks=4)
assert [[0, 1], [2], [3], [4]] == extract_partitions(graph, pipe)
parameter_rrefs = pipe.parameter_rrefs()
assert len(parameter_rrefs) == 6
opt = DistributedOptimizer(
torch.optim.SGD,
parameter_rrefs,
lr=0.05,
)
losses = []
for i in range(2):
with dist_autograd.context() as context_id:
y = pipe(x)
loss = criterion(y, rpc.RRef(x))
losses.append(loss)
loss.backward(context_id)
opt.step(context_id)
losses = [l.to_here() for l in losses]
assert losses[0] > losses[1], f"{losses[0]} !> {losses[1]}"
# A test for extracting the same graph as in test multi_input_multi_output_layers automatically
class ShardedLinearLayer(nn.Module):
def __init__(self, input_device, shard_devices, output_device):
super().__init__()
self.split = RemoteModule(input_device, SplitTensors, (), {})
self.linear_layers_2 = nn.ModuleList(
[
RemoteModule(shard_devices[0], nn.Linear, (2, 2), {}),
RemoteModule(shard_devices[1], nn.Linear, (2, 2), {}),
]
)
self.concatenate = RemoteModule(output_device, ConcatenateTensors, ())
def forward(self, input):
shards = self.split(input)
shards = [self.linear_layers_2[i](shards[i]) for i in range(2)]
return self.concatenate(*shards)
@rpc_test(world_size=2)
@pytest.mark.parametrize("devices", DEVICES)
@skip_if_single_gpu
def auto_graph_extract(devices):
from fairscale.experimental.nn.distributed_pipeline.trace import make_graph
device = devices[0].split("/")[1]
torch.random.manual_seed(3)
criterion = DistributedLoss(torch.nn.MSELoss)
x = torch.randn(8, 4).to(device)
# create model
model = nn.Sequential(
RemoteModule(devices[0], nn.Linear, (4, 4), {}),
ShardedLinearLayer(devices[0], devices, devices[1]),
RemoteModule(devices[0], nn.Linear, (4, 4), {}),
)
graph = make_graph(model)
pipe = DistributedPipeline(graph, chunks=4)
partitions = extract_partitions(graph, pipe)
assert [[0, 1], [2], [3], [4], [5]] == partitions, f"partitions={partitions}"
parameter_rrefs = pipe.parameter_rrefs()
assert len(parameter_rrefs) == 8
opt = DistributedOptimizer(
torch.optim.SGD,
parameter_rrefs,
lr=0.05,
)
losses = []
for i in range(2):
with dist_autograd.context() as context_id:
y = pipe(x)
loss = criterion(y, rpc.RRef(x))
losses.append(loss)
loss.backward(context_id)
opt.step(context_id)
losses = [l.to_here() for l in losses]
assert losses[0] > losses[1], f"{losses[0]} !> {losses[1]}"
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing Offload Module
"""
import contextlib
import copy
import numpy as np
import pytest
import torch
from fairscale.experimental.nn.offload import OffloadModel
from fairscale.fair_dev.testing.testing import skip_if_no_cuda
from fairscale.internal import torch_version
if torch_version() >= (1, 8, 0):
from fairscale.experimental.nn.auto_shard import shard_model
def _init():
torch.cuda.set_device(0)
torch.manual_seed(0)
np.random.seed(0)
device = torch.device("cuda")
offload_device = torch.device("cpu")
return device, offload_device
@skip_if_no_cuda
def test_single_run():
device, offload_device = _init()
model = _get_model()
peak_mem = {}
for checkpoint_activation in [True, False]:
offload_model = OffloadModel(
model=model,
device=device,
offload_device=offload_device,
num_slices=2,
checkpoint_activation=checkpoint_activation,
)
offload_optimizer = torch.optim.SGD(offload_model.parameters(), lr=0.001)
input = torch.ones(1000, 2).to(device)
labels = torch.ones(1000, 2).to(device)
offload_model.train()
pred = offload_model(input)
loss_fn = torch.nn.MSELoss(reduction="sum")
loss = loss_fn(pred, labels)
loss.backward()
offload_optimizer.step()
key = "ca_" + str(checkpoint_activation)
peak_mem[key] = torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]
print(
"Peak allocated bytes on cuda:0 for checkpoint_activation "
+ str(checkpoint_activation)
+ ": {:2f}".format(peak_mem[key])
)
# TODO(anj-s): We need a better requirement since this fails on CircleCI right now.
assert peak_mem["ca_True"] <= peak_mem["ca_False"]
def _get_model(num_inputs=2, num_hidden=20, num_layers=10, num_outputs=2):
model = torch.nn.Sequential(
torch.nn.Linear(num_inputs, num_hidden),
*([torch.nn.Linear(num_hidden, num_hidden) for _ in range(num_layers)]),
torch.nn.Linear(num_hidden, num_outputs),
)
return model
def _check_parity(rmodel, omodel, ropt, oopt, rloss, oloss):
for oparams, rparams in zip(omodel.parameters(), rmodel.parameters()):
assert torch.allclose(oparams, rparams, atol=1e-2), f"Model params are different {oparams} {rparams}"
for o_pg, reg_pg in zip(oopt.param_groups, ropt.param_groups):
for o_pg, reg_pg in zip(o_pg["params"], reg_pg["params"]):
assert torch.allclose(
o_pg, reg_pg, atol=1e-2
), f"Model parameters differ in between Offlad and Vanilla {[o_pg]} {reg_pg}"
for o_buf, reg_buf in zip(omodel.buffers(), rmodel.buffers()):
assert torch.allclose(o_buf, reg_buf, atol=1e-2), "Model buffers differ in between Offload and Vanilla."
def _get_fp16_context(use_fp16=False):
if use_fp16:
return torch.cuda.amp.autocast()
else:
return contextlib.nullcontext()
def _train(model, optimizer, use_fp16, device):
inputs = torch.ones(32, 2).to(device)
labels = torch.ones(32, 2).to(device)
loss_fn = torch.nn.MSELoss(reduction="sum")
model.train()
with _get_fp16_context(use_fp16):
pred = model(inputs)
loss = loss_fn(pred, labels)
loss.backward()
optimizer.step()
return model, optimizer, loss
def _train_reg_model(model, device, offload_device, use_fp16=False):
reg_model = copy.deepcopy(model)
reg_model = reg_model.cuda()
reg_optimizer = torch.optim.SGD(reg_model.parameters(), lr=0.001)
return _train(reg_model, reg_optimizer, use_fp16, device)
def _train_offload_model(
model, device, offload_device, use_fp16=False, checkpoint_activation=False, num_microbatches=1
):
omodel = copy.deepcopy(model)
offload_model = OffloadModel(
model=omodel,
device=device,
offload_device=offload_device,
num_slices=2,
checkpoint_activation=checkpoint_activation,
num_microbatches=num_microbatches,
)
offload_optimizer = torch.optim.SGD(offload_model.parameters(), lr=0.001)
return _train(offload_model, offload_optimizer, use_fp16, device)
@skip_if_no_cuda
@pytest.mark.parametrize("use_fp16", [True, False])
@pytest.mark.parametrize("checkpoint_activation", [True, False])
@pytest.mark.parametrize("num_microbatches", [1, 5])
@pytest.mark.parametrize("use_auto_shard", [True, False])
def test_correctness(use_fp16, checkpoint_activation, num_microbatches, use_auto_shard):
pytest.skip("skip this test until the issue #900 is resolved.")
if use_auto_shard and torch_version() < (1, 8, 0):
pytest.skip("auto_shard requires torch version >= 1.8.0")
if (use_fp16 or checkpoint_activation) and not hasattr(torch.cuda.amp, "custom_fwd"):
pytest.skip(f"AMP APIs are not supported in torch version {torch.__version__}")
if not checkpoint_activation and num_microbatches > 1:
pytest.skip("We only support microbatches with activation offloading.")
device, offload_device = _init()
model = _get_model()
if use_auto_shard:
offload_model = shard_model(model)
else:
offload_model = model
rmodel, ropt, rloss = _train_reg_model(model, device, offload_device)
omodel, oopt, oloss = _train_offload_model(
offload_model,
device,
offload_device,
use_fp16=use_fp16,
checkpoint_activation=checkpoint_activation,
num_microbatches=num_microbatches,
)
_check_parity(rmodel.cpu(), omodel.cpu(), ropt, oopt, rloss, oloss)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing Auto Shard functionality of non nn.Sequential models.
"""
import math
import sys
import pytest
import torch
import torch.nn
import torch.nn as nn
from fairscale.internal import torch_version
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.d_model = d_model
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0)]
return self.dropout(x)
class TransformerModel(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = torch.nn.TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = torch.nn.TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, *args):
src = args[0]
src_mask = args[1]
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
output = self.decoder(output)
return output
bptt = 35
ntokens = 28783 # the size of vocabulary
emsize = 200 # embedding dimension
nhid = 200 # the dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 1 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # the number of heads in the multiheadattention models
dropout = 0.2 # the dropout value
def test_single_run():
if sys.version_info.major == 3 and sys.version_info.minor > 10:
pytest.skip("torch.fx doesn't seem to work 3.11 yet")
if torch_version() < (1, 8, 0):
pytest.skip("requires torch version >= 1.8.0")
from fairscale.experimental.nn.auto_shard import shard_model
model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)
sharded_model = shard_model(model)
assert len(sharded_model) == 2, "Length is sharded model is incorrect."
expected_param_nums = [5998600, 5785383]
for i, model in enumerate(sharded_model):
param_count = {}
for name, module in model.named_modules():
if "." in name:
continue
param_count[name] = sum([x.numel() for x in module.parameters()])
assert expected_param_nums[i] == param_count[""]
src_mask = torch.randn((35, 35), dtype=torch.float32)
src = torch.randint(1, ntokens, (35, 20))
input = [src, src_mask]
for model in sharded_model:
if type(input) == list:
input = model(*input)
else:
input = model(input)
assert input.size() == torch.Size([35, 20, 28783])
class Branch(torch.nn.Module):
def __init__(self, features: int):
super().__init__()
self.left = nn.Linear(in_features=features, out_features=features)
self.right = nn.Linear(in_features=features, out_features=features)
def forward(self, x):
if x.sum() > 1000:
return self.left(x)
else:
return self.right(x)
class BranchedNetwork(torch.nn.Module):
def __init__(self, features: int):
super().__init__()
self.net = torch.nn.ModuleList([Branch(features) for _ in range(10)])
def forward(self, x):
for module in self.net:
x = module(x)
return x
def test_dynaimc_conditionals_auto_wrapped():
if torch_version() < (1, 8, 0):
pytest.skip("requires torch version >= 1.8.0")
from fairscale.experimental.nn.auto_shard import shard_model
features = 10
model = BranchedNetwork(features)
sharded_model = shard_model(model, 3)
assert len(sharded_model) == 3
input_ = torch.randn(3, features)
model_output = model(input_)
sharded_model_output = input_
for shard in sharded_model:
sharded_model_output = shard(sharded_model_output)
assert torch.allclose(model_output, sharded_model_output)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import tempfile
from typing import Any, Dict, List, Tuple, Type
import unittest
import pytest
import torch
from torch import nn
import torch.distributed
import torch.nn.functional as F
import fairscale.experimental.nn.data_parallel.gossip as gossip
from fairscale.fair_dev.testing.testing import skip_if_single_gpu, spawn_for_all_world_sizes
# Enfore CUBLAS reproducibility, see https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
def get_gpus_for_rank(world_size: int) -> List[List[int]]:
"""This will return a list, each element of which contains a list of GPUs
to be used by the respective process.
Examples (results are shown for a machine with 2 GPUs):
>>> get_gpus_for_rank(2) # [[0], [1]]
>>> get_gpus_for_rank(4) # [[0], [0], [1], [1]]
>>> get_gpus_for_rank(1) # [[0, 1]]
Args:
world_size (int): denotes number of subsets to split the available GPUs into
"""
visible_devices = list(range(torch.cuda.device_count()))
num_visible_devices = torch.cuda.device_count()
if num_visible_devices >= world_size:
gpus_for_rank = [[i] for i in range(world_size)]
else:
visible_devices_repeated = [
[device]
for device in visible_devices
for _ in range((world_size + num_visible_devices - 1) // num_visible_devices)
]
gpus_for_rank = visible_devices_repeated[:world_size]
return gpus_for_rank
def step_model(model: nn.Module, input: torch.Tensor, target: torch.Tensor) -> None:
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(optimizer: torch.optim.Optimizer) -> None:
optimizer.step()
optimizer.zero_grad()
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x: Any) -> torch.Tensor: # type: ignore
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class LargeNet(Net):
def __init__(self) -> None:
super(LargeNet, self).__init__()
self.fc2 = nn.Linear(10, 5000000, bias=False)
self.fc3 = nn.Linear(5000000, 4, bias=False)
def find_memory_used_by_model(model_class: Type[nn.Module], device: torch.device) -> int:
torch.cuda.synchronize(device)
torch.cuda.reset_peak_memory_stats(device)
initial_memory = torch.cuda.max_memory_allocated(device)
_ = model_class().to(device)
torch.cuda.synchronize(device)
final_memory = torch.cuda.max_memory_allocated(device)
model_memory = final_memory - initial_memory
# print(model_memory)
return model_memory
def _prepare_single_device_module(
rank,
world_size,
tempfile,
devices: List[torch.device],
slowmo_init_dict: Dict[Any, Any],
global_batch_size: int,
) -> Tuple[nn.Module, gossip.SlowMoDistributedDataParallel, torch.Tensor, torch.Tensor]:
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
"nccl",
init_method=f"file://{tempfile}",
rank=rank,
world_size=world_size,
)
model = Net()
slowmo_model = gossip.SlowMoDistributedDataParallel(
copy.deepcopy(model).to(devices[0]),
comm_device=devices[0],
process_rank=rank,
process_world_size=world_size,
**slowmo_init_dict,
)
model.to(devices[0])
input = torch.randn(global_batch_size, 2).to(devices[0])
target = torch.randn(global_batch_size, 4).to(devices[0])
return model, slowmo_model, input, target
def run_test_slowmo_with_slowmo_freq_1(
rank: int, world_size: int, tempfile: str, _filename_rpc: str, slowmo_init_dict: Dict[Any, Any]
) -> None:
"""
Note: we pass down `device_ids` all the way to SlowMoDistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
torch.cuda.set_device(devices[0])
local_batch_size = len(devices)
global_batch_size = world_size * local_batch_size
model, slowmo_model, input, target = _prepare_single_device_module(
rank, world_size, tempfile, devices, slowmo_init_dict, global_batch_size
)
model_optimizer = torch.optim.SGD(
model.parameters(),
lr=slowmo_model.slowmo_lr,
momentum=slowmo_model.slowmo_momentum,
)
slowmo_model_optimizer = torch.optim.SGD(slowmo_model.module.parameters(), lr=1, momentum=0)
slowmo_model._init_global_momentum_buffers(slowmo_model_optimizer)
# check two model parameters over 3 iterations
for iteration in range(3):
# single cpu/gpu training
step_model(model, input, target)
# SlowMo training, SlowMo scatters subsets of input_cpu to nodes/GPUs
step_model(
slowmo_model,
input[rank * local_batch_size : (rank + 1) * local_batch_size],
target[rank * local_batch_size : (rank + 1) * local_batch_size],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model_optimizer)
update_parameters(slowmo_model_optimizer)
slowmo_model.perform_slowmo(slowmo_model_optimizer)
for a, b in zip(model.parameters(), slowmo_model.module.parameters()):
assert torch.allclose(a, b)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def run_test_localsgd_with_freq_ge_2(
rank: int, world_size: int, tempfile: str, _filename_rpc: str, slowmo_init_dict: Dict[Any, Any], *_, **__
) -> None:
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
torch.cuda.set_device(devices[0])
local_batch_size = len(devices)
global_batch_size = world_size * local_batch_size
model, slowmo_model, input, target = _prepare_single_device_module(
rank, world_size, tempfile, devices, slowmo_init_dict, global_batch_size
)
assert not slowmo_model.slowmo
model_optimizer = torch.optim.SGD(model.parameters(), lr=1, momentum=0)
slowmo_model_optimizer = torch.optim.SGD(slowmo_model.module.parameters(), lr=1, momentum=0)
# check two model parameters over 3 iterations
for iteration in range(6):
# single cpu/gpu training
step_model(
model,
input[rank * local_batch_size : (rank + 1) * local_batch_size],
target[rank * local_batch_size : (rank + 1) * local_batch_size],
)
# SlowMo training, SlowMo scatters subsets of input_cpu to nodes/GPUs
step_model(
slowmo_model,
input[rank * local_batch_size : (rank + 1) * local_batch_size],
target[rank * local_batch_size : (rank + 1) * local_batch_size],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model_optimizer)
update_parameters(slowmo_model_optimizer)
# This block simulates the behaviour of localsgd by doing an allreduce on
# parameters of the regular model
if (iteration + 1) % slowmo_model.localsgd_frequency == 0:
for param in model.parameters():
torch.distributed.all_reduce(param)
with torch.no_grad():
param /= world_size # type: ignore
slowmo_model.perform_slowmo(slowmo_model_optimizer)
for a, b in zip(model.parameters(), slowmo_model.module.parameters()):
assert torch.allclose(a, b)
# Shuffle the input so that distributed input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def run_test_slowmo_with_slowmo_freq_ge_2(
rank: int, world_size: int, tempfile: str, _filename_rpc: str, slowmo_init_dict: Dict[Any, Any], *_, **__
) -> None:
"""
Note: we pass down `device_ids` all the way to SlowMoDistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
torch.cuda.set_device(devices[0])
local_batch_size = len(devices)
global_batch_size = world_size * local_batch_size
model, slowmo_model, input, target = _prepare_single_device_module(
rank, world_size, tempfile, devices, slowmo_init_dict, global_batch_size
)
base_lr, base_momentum = 1, 0
model_optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=base_momentum)
model_slow_momentum_optimizer = torch.optim.SGD(
model.parameters(),
lr=slowmo_model.slowmo_lr,
momentum=slowmo_model.slowmo_momentum,
)
slowmo_model_optimizer = torch.optim.SGD(slowmo_model.module.parameters(), lr=base_lr, momentum=base_momentum)
slowmo_model._init_global_momentum_buffers(slowmo_model_optimizer)
old_parameters = [copy.deepcopy(params) for params in model.parameters()]
# check two model parameters over 6 iterations
for iteration in range(6):
# single cpu/gpu training
step_model(model, input, target)
# SlowMo training, SlowMo scatters subsets of input_cpu to nodes/GPUs
step_model(
slowmo_model,
input[rank * local_batch_size : (rank + 1) * local_batch_size],
target[rank * local_batch_size : (rank + 1) * local_batch_size],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model_optimizer)
update_parameters(slowmo_model_optimizer)
slowmo_model.perform_slowmo(slowmo_model_optimizer)
# This block simulates the behaviour of slow momentum by applying it manually
# to the regular model
if (iteration + 1) % slowmo_init_dict["slowmo_frequency"] == 0:
for params, old_params in zip(model.parameters(), old_parameters):
params.grad = -(params - old_params)
with torch.no_grad():
params.copy_(old_params)
update_parameters(model_slow_momentum_optimizer)
for params, old_params in zip(model.parameters(), old_parameters):
with torch.no_grad():
old_params.copy_(params)
for a, b in zip(model.parameters(), slowmo_model.module.parameters()):
assert torch.allclose(a, b, atol=1e-6), f"{a} = {b}"
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def run_test_memory_usage_localsgd_with_slowmo(
rank: int,
world_size: int,
tempfile: str,
slowmo_init_dict: Dict[Any, Any],
use_gossip_data_parallel: bool = False,
*_,
**__,
) -> int:
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
torch.cuda.set_device(devices[0])
torch.cuda.reset_peak_memory_stats(devices[0])
initial_max_memory = torch.cuda.max_memory_allocated(devices[0])
local_batch_size = len(devices)
global_batch_size = world_size * local_batch_size
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
"nccl",
init_method=f"file://{tempfile}",
rank=rank,
world_size=world_size,
)
if use_gossip_data_parallel:
model: nn.Module = gossip.SlowMoDistributedDataParallel(
LargeNet().to(devices[0]),
comm_device=devices[0],
process_rank=rank,
process_world_size=world_size,
**slowmo_init_dict,
)
else:
model = LargeNet().to(devices[0])
input = torch.randn(global_batch_size, 2).to(devices[0])
target = torch.randn(global_batch_size, 4).to(devices[0])
model_optimizer = torch.optim.SGD(model.parameters(), lr=1, momentum=0.5)
# check two model parameters over 3 iterations
for iteration in range(3):
step_model(
model,
input[rank * local_batch_size : (rank + 1) * local_batch_size],
target[rank * local_batch_size : (rank + 1) * local_batch_size],
)
update_parameters(model_optimizer)
if hasattr(model, "perform_slowmo"):
model.perform_slowmo(model_optimizer) # type: ignore
# Shuffle the input so that distributed input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
torch.cuda.synchronize(devices[0])
final_max_memory = torch.cuda.max_memory_allocated(devices[0])
# print(f"{initial_max_memory}, {final_max_memory}")
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
return final_max_memory - initial_max_memory
_SLOWMO_TEST_SETTINGS = [
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 1,
"nprocs_per_node": 1,
"slowmo_momentum": 0.0,
},
"test_function": run_test_slowmo_with_slowmo_freq_1,
"test_name": "nccl_backend_device_ids_torch_device_list",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 100, # Localsgd has to be disabled since it would fail in the 1 node case. TODO: Need to allow it to run without failing in SlowMoDistributedDataParallel in the one node case
"nprocs_per_node": 2,
"slowmo_momentum": 0.0,
},
"test_function": run_test_slowmo_with_slowmo_freq_1,
"test_name": "nccl_backend_2_proc_1_node",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 1,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 1,
"slowmo_memory_efficient": True,
},
"test_function": run_test_slowmo_with_slowmo_freq_1,
"test_name": "localsgd_slowmo_freq_1",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.SGP,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 1,
"slowmo_memory_efficient": False,
},
"test_function": run_test_slowmo_with_slowmo_freq_1,
"test_name": "sgp_slowmo_freq_1",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 1,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 2,
"slowmo_memory_efficient": True,
},
"test_function": run_test_slowmo_with_slowmo_freq_ge_2,
"test_name": "localsgd_slowmo",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 1,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 2,
"slowmo_memory_efficient": False,
},
"test_function": run_test_slowmo_with_slowmo_freq_ge_2,
"test_name": "localsgd_slowmo_no_sharding",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.SGP,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 2,
"slowmo_memory_efficient": True,
},
"test_function": run_test_slowmo_with_slowmo_freq_ge_2,
"test_name": "sgp_slowmo",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.SGP,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 2,
"slowmo_memory_efficient": False,
},
"test_function": run_test_slowmo_with_slowmo_freq_ge_2,
"test_name": "sgp_slowmo_no_sharding",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 1,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 2,
"slowmo_num_shards": 1,
"slowmo_memory_efficient": True,
},
"test_function": run_test_slowmo_with_slowmo_freq_ge_2,
"test_name": "slowmo_small_worldsize",
},
{
"slowmo_settings": {
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 2,
"nprocs_per_node": 1,
"slowmo_momentum": 0.0,
},
"test_name": "localsgd_freq2",
"test_function": run_test_localsgd_with_freq_ge_2,
},
]
@pytest.mark.skipif(not torch.distributed.is_nccl_available(), reason="This test requires NCCL")
@skip_if_single_gpu
@pytest.mark.parametrize("test_settings", _SLOWMO_TEST_SETTINGS)
def test_settings(test_settings) -> None:
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
print("Testing ", test_settings["test_function"], " with settings ", test_settings["test_name"])
spawn_for_all_world_sizes(
test_settings["test_function"],
world_sizes=[world_size],
args=(test_settings["slowmo_settings"],),
deterministic=True,
)
# @requires_nccl()
# @skip_if_lt_x_gpu(4)
# def test_nccl_backend_2_proc_2_node():
# # 2 device, 2 node
# # 4 device, 1 node
# # 1 device, 4 node
# # can change world size to 4
# # will need to change world_size to 4 for this
# world_size = 4
# temp_file_name = tempfile.mkstemp()[1]
# slowmo_settings = {
# "slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
# "localsgd_frequency": 1,
# "rank": rank,
# "world_size": world_size,
# "nprocs_per_node": 2,
# "local_node_group": process_group,
# "master_group": process_group,
# "slowmo_momentum": 0.0,
# }
# mp.spawn(
# run_test_slowmo_with_process_group,
# args=(world_size, temp_file_name, process_group, slowmo_settings),
# nprocs=world_size,
# join=True,
# )
def run_max_memory_used_localsgd_slowmo_memory_efficient(rank, world_size, tempfile_1, tempfile_2) -> None:
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
# Memory usage when running optimization locally on a single GPU
max_memory_local = run_test_memory_usage_localsgd_with_slowmo(
rank,
world_size,
tempfile_1,
{"localsgd_frequency": 1},
use_gossip_data_parallel=False,
)
# Memory usage when running optimization using LocalSGD-SlowMo
max_memory_localsgd_slowmo = run_test_memory_usage_localsgd_with_slowmo(
rank,
world_size,
tempfile_2,
{
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 1,
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 1,
"slowmo_memory_efficient": True,
},
use_gossip_data_parallel=True,
)
model_memory_usage = find_memory_used_by_model(LargeNet, devices[0])
extra_memory_used_by_localsgd_slowmo = max_memory_localsgd_slowmo - max_memory_local
extra_memory_used_by_slowmo = (
model_memory_usage # This is expected on 2 GPU experiments and confirmed in below test
)
extra_memory_used_by_localsgd = extra_memory_used_by_localsgd_slowmo - extra_memory_used_by_slowmo
# Extra memory used by localsgd should be close to 0 for large models, because we discard the gradients before the localsgd step
# which should allow us some extra memory for the averaging itself
# TODO: Above is a hypothesis. Need to test it out for those later, once we know how much memory is typically used by activations
# This try-catch block is to prevent a flaky test failure in which model_memory_usage is 0
try:
# Just setting a number below to match what I found here. This test needs to be revised
assert extra_memory_used_by_localsgd / model_memory_usage < 0.3
except ZeroDivisionError:
if rank == 0:
print("Skipping flaky test due to 0 memory error")
@pytest.mark.skipif(not torch.distributed.is_nccl_available(), reason="This test requires NCCL")
@skip_if_single_gpu
def test_max_memory_used_localsgd_slowmo_memory_efficient() -> None:
world_size = 2
spawn_for_all_world_sizes(
run_max_memory_used_localsgd_slowmo_memory_efficient,
world_sizes=[world_size],
args=(),
deterministic=True,
)
def run_max_memory_used_slowmo_memory_efficient(rank: int, world_size: int, tempfile_1: str, tempfile_2: str):
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
max_memory_local = run_test_memory_usage_localsgd_with_slowmo(
rank,
world_size,
tempfile_1,
{"localsgd_frequency": 1},
use_gossip_data_parallel=False,
)
max_memory_slowmo = run_test_memory_usage_localsgd_with_slowmo(
rank,
world_size,
tempfile_2,
{
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 100, # This is so that localsgd does not occur
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 1,
"slowmo_memory_efficient": True,
},
use_gossip_data_parallel=True,
)
extra_memory_used_by_slowmo = max_memory_slowmo - max_memory_local
model_memory_usage = find_memory_used_by_model(LargeNet, devices[0])
# This try-catch block is to prevent a flaky test failure in which model_memory_usage is 0
try:
# Just setting a number below to match what I found here. This test needs to be revised
assert extra_memory_used_by_slowmo / model_memory_usage == pytest.approx(1.0, 0.1)
except (ZeroDivisionError, AssertionError):
if rank == 0:
print("Skipping flaky test due to memory error")
@pytest.mark.skipif(not torch.distributed.is_nccl_available(), reason="This test requires NCCL")
@skip_if_single_gpu
def test_max_memory_used_slowmo_memory_efficient() -> None:
world_size = 2
spawn_for_all_world_sizes(
run_max_memory_used_slowmo_memory_efficient,
world_sizes=[world_size],
args=(),
deterministic=True,
)
def run_max_memory_used_slowmo_no_sharding(rank, world_size, tempfile_1, tempfile_2):
int_devices = get_gpus_for_rank(world_size)[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
max_memory_local = run_test_memory_usage_localsgd_with_slowmo(
rank,
world_size,
tempfile_1,
{"localsgd_frequency": 1},
use_gossip_data_parallel=False,
)
max_memory_slowmo = run_test_memory_usage_localsgd_with_slowmo(
rank,
world_size,
tempfile_2,
{
"slowmo_base_algorithm": gossip.SlowMoBaseAlgorithm.LOCALSGD,
"localsgd_frequency": 100, # This is so that localsgd does not occur
"nprocs_per_node": 1,
"slowmo_momentum": 0.5,
"slowmo_frequency": 1,
"slowmo_memory_efficient": False,
},
use_gossip_data_parallel=True,
)
extra_memory_used_by_slowmo = max_memory_slowmo - max_memory_local
model_memory_usage = find_memory_used_by_model(LargeNet, devices[0])
# This try-catch block is to prevent a flaky test failure in which model_memory_usage is 0
try:
# Just setting a number below to match what I found here. This test needs to be revised
assert extra_memory_used_by_slowmo / model_memory_usage == pytest.approx(2.0, 0.1)
except (ZeroDivisionError, AssertionError):
if rank == 0:
print("Skipping flaky test due to memory error")
@pytest.mark.skipif(not torch.distributed.is_nccl_available(), reason="This test requires NCCL")
@skip_if_single_gpu
def test_max_memory_used_slowmo_no_sharding() -> None:
world_size = 2
spawn_for_all_world_sizes(
run_max_memory_used_slowmo_no_sharding,
world_sizes=[world_size],
args=(),
deterministic=True,
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset
from fairscale.experimental.nn.ampnet_pipe.pipe import AMPnetPipe
from fairscale.fair_dev.testing.testing import get_worker_map, torch_spawn
class MySGD(Optimizer):
r"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate (required)
"""
def __init__(self, params, lr=0.01):
defaults = dict(lr=lr)
super(MySGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(MySGD, self).__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
p.data.add_(d_p, alpha=-group["lr"])
return loss
class AMPnetDelegate(object):
def __init__(self, vocab_size=100, iteration_per_batch=1000):
self.iteration_per_batch = iteration_per_batch
self.vocab_size = vocab_size
def transform_input(self, cur_batch):
return cur_batch["input"]
def transform_target(self, cur_batch):
return cur_batch["target"]
def log_loss(self, cur_batch, loss, count):
pass
def transform_output_before_loss(self, output_tensor):
return output_tensor
def check_and_save_weights(self, num_gradients):
pass
class FakeDataset(Dataset):
def __init__(
self,
input_dim=10,
output_dim=10,
total_samples=100,
):
self.input_dim = input_dim
self.output_dim = output_dim
self.total_samples = total_samples
self.input_samples = torch.rand(self.total_samples, self.input_dim, self.output_dim)
self.target_samples = torch.rand(self.total_samples, self.input_dim, self.output_dim)
def __getitem__(self, index):
return {
"input": self.input_samples[index, :, :],
"target": self.target_samples[index, :, :],
}
def __len__(self):
return self.total_samples
@torch_spawn([2])
def async_event_loop_interleave_simple():
model = nn.Sequential(nn.Linear(10, 10), nn.ReLU(inplace=False), nn.Linear(10, 10), nn.ReLU(inplace=False))
pipe = AMPnetPipe(
module=model,
balance=[2, 2],
worker_map=get_worker_map(),
chunks=10,
checkpoint="never",
)
fake_dataset = FakeDataset()
fake_dataloader = DataLoader(fake_dataset, batch_size=4, shuffle=True, num_workers=0)
loss = nn.MSELoss()
opt = MySGD(model.parameters(), lr=0.01)
transform_and_log = AMPnetDelegate()
pipe.interleave(fake_dataloader, loss, opt, transform_and_log)
@torch_spawn([4])
def async_event_loop_interleave_hard():
model = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10))
pipe = AMPnetPipe(
module=model,
balance=[1, 1, 1, 1],
worker_map=get_worker_map(),
chunks=10,
checkpoint="never",
)
fake_dataset = FakeDataset()
fake_dataloader = DataLoader(fake_dataset, batch_size=4, shuffle=True, num_workers=0)
loss = nn.MSELoss()
opt = MySGD(model.parameters(), lr=0.01)
transform_and_log = AMPnetDelegate()
pipe.interleave(fake_dataloader, loss, opt, transform_and_log)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing scaler
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from fairscale.experimental.optim.dynamic_loss_scaler import DynamicLossScaler
class ManualLinearRegression(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
device = "cuda" if torch.cuda.is_available() else "cpu"
def _init_dataset():
np.random.seed(42)
x = np.random.rand(100, 1)
y = 1 + 2 * x + 0.1 * np.random.randn(100, 1)
# Shuffles the indices
idx = np.arange(100)
np.random.shuffle(idx)
# Generates train sets
x_train, y_train = x[idx], y[idx]
x_train_tensor = torch.tensor([x_train]).float().to(device)
y_train_tensor = torch.tensor([y_train]).float().to(device)
return x_train_tensor, y_train_tensor
def _train_with_dls(x, y):
scaler = DynamicLossScaler()
torch.manual_seed(42)
lr = 1e-1
n_epochs = 1000
loss_fn = nn.MSELoss(reduction="mean")
model = ManualLinearRegression().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr)
for epoch in range(n_epochs):
optimizer.zero_grad()
model.train()
yhat = model(x)
loss = loss_fn(y, yhat)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return model
def test_dls_without_overflow():
x, y = _init_dataset()
model = _train_with_dls(x, y)
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.data)
if name == "linear.weight":
assert (param.data.item() - 2) <= 0.05
if name == "linear.bias":
assert (param.data.item() - 1) <= 0.03
# TODO(tmarkstrum): add test case covering check_overflow function
# TODO(tmarkstrum): add test case covering the state_dict, FP16
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
import shutil
import sys
import pytest
import torch
from torch import nn
from fairscale.experimental.wgit.sha1_store import SHA1_Store
from fairscale.fair_dev.testing.testing import objects_are_equal
# Get the absolute path of the parent at the beginning before any os.chdir(),
# so that we can proper clean it up at any CWD.
TESTING_STORE_DIR = Path("sha1_store_testing").resolve()
# Used to filter metadata json keys.
SHA1_KEY_STR_LEN = 40
@pytest.fixture(scope="function")
def sha1_store(request):
"""A fixture for setup and teardown.
This only runs once per test function. So don't make this too slow.
Tests must be written in a way that either all of the tests run
in the order they appears in this file or a specific test is
run separately by the user. Either way, the test should work.
"""
# Attach a teardown function.
def teardown():
os.chdir(TESTING_STORE_DIR.joinpath("..").resolve())
if TESTING_STORE_DIR.exists():
shutil.rmtree(TESTING_STORE_DIR)
request.addfinalizer(teardown)
# Teardown in case last run didn't clean it up.
teardown()
# Get an empty sha1 store.
sha1_store = SHA1_Store(TESTING_STORE_DIR, init=True)
return sha1_store
@pytest.mark.parametrize("compress", [True, False])
def test_sha1_add_file(sha1_store, compress):
os.chdir(TESTING_STORE_DIR)
# Create random checkpoints
size_list = [25e5, 27e5, 30e5, 35e5, 40e5]
chkpts = [
"checkpoint_1a.pt",
"checkpoint_1b.pt",
"checkpoint_1c.pt",
"checkpoint_2.pt",
"checkpoint_3.pt",
]
for file, size in zip(chkpts, size_list):
torch.save(nn.Linear(1, int(size)).state_dict(), file)
# Add those 5 random files.
for c in chkpts:
sha1_store.add(c, compress)
# Add a fixed data twice.
module = nn.Linear(100, 100, bias=False)
module.weight.data = torch.zeros(100, 100)
zeros_file = "zeros.pt"
torch.save(module.state_dict(), zeros_file)
sha1_store.add(zeros_file, compress)
sha1_store.add(zeros_file, compress)
# Assert the ref counts are 1,1,1,1,1 and 2
with sha1_store._readonly_json_ctx:
json_dict = sha1_store._json_dict
key = "3c06179202606573a4982d91c2829a1a675362b3"
assert key in json_dict.keys() and json_dict[key]["ref_count"] == 2, json_dict
json_dict = dict(filter(lambda item: len(item[0]) == SHA1_KEY_STR_LEN, json_dict.items()))
assert sorted(map(lambda x: x["ref_count"], json_dict.values())) == [1, 1, 1, 1, 1, 2], json_dict
@pytest.mark.parametrize("compress", [True, False])
def test_sha1_add_state_dict(sha1_store, compress):
os.chdir(TESTING_STORE_DIR)
# add once
for i in range(3):
sha1_store.add(nn.Linear(100, 100).state_dict(), compress)
# add twice
for i in range(3):
sd = nn.Linear(80, 80).state_dict()
sha1_store.add(sd, compress)
sha1_store.add(sd, compress)
with sha1_store._readonly_json_ctx:
json_dict = sha1_store._json_dict
json_dict = dict(filter(lambda item: len(item[0]) == SHA1_KEY_STR_LEN, json_dict.items()))
assert sorted(map(lambda x: x["ref_count"], json_dict.values())) == [1, 1, 1, 2, 2, 2], json_dict
@pytest.mark.parametrize("compress", [True, False])
def test_sha1_add_tensor(sha1_store, compress):
os.chdir(TESTING_STORE_DIR)
sha1_store.add(torch.Tensor([1.0, 5.5, 3.4]).repeat(100), compress)
with sha1_store._readonly_json_ctx:
json_dict = sha1_store._json_dict
key = "81cb2a3f823cfb78da8dd390e29e685720974cc7"
assert key in json_dict.keys() and json_dict[key]["ref_count"] == 1, json_dict
@pytest.mark.parametrize("compress", [True, False])
def test_sha1_get(sha1_store, compress):
"""Testing the get() API: normal and exception cases."""
if sys.version_info.major == 3 and sys.version_info.minor > 10:
pytest.skip("pgzip package doesn't work with 3.11's gzip module")
os.chdir(TESTING_STORE_DIR)
# Add a file, a state dict and a tensor.
file = "test_get.pt"
torch.save(nn.Linear(100, 100).state_dict(), file)
state_dict = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 20)).state_dict()
tensor = torch.ones(20, 30)
# Check that we can get them back.
file_sha1 = sha1_store.add(file, compress)
sd = sha1_store.get(file_sha1)
assert objects_are_equal(sd, torch.load(file))
sd_sha1 = sha1_store.add(state_dict, compress)
sd = sha1_store.get(sd_sha1)
assert objects_are_equal(sd, state_dict)
tensor_sha1 = sha1_store.add(tensor, compress)
tensor_got = sha1_store.get(tensor_sha1)
assert objects_are_equal(tensor_got, tensor)
# Make sure invalid sha1 cause exceptions.
with pytest.raises(ValueError):
sha1_store.get(tensor_sha1[:-1])
@pytest.mark.parametrize("compress", [True, False])
def test_sha1_delete(sha1_store, compress):
"""Testing the delete() API: with ref counting behavior."""
os.chdir(TESTING_STORE_DIR)
# Add once and delete, second delete should throw an exception.
tensor = torch.ones(30, 50)
sha1 = sha1_store.add(tensor, compress)
sha1_store.delete(sha1)
with pytest.raises(ValueError):
sha1_store.delete(sha1)
# Add multiple times and delete should match that.
state_dict = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 20)).state_dict()
sha1 = sha1_store.add(state_dict, compress)
for i in range(3):
new_sha1 = sha1_store.add(state_dict, compress)
assert sha1 == new_sha1, f"{sha1} vs. {new_sha1}"
for i in range(4):
sha1_store.delete(sha1)
with pytest.raises(ValueError):
sha1_store.delete(sha1)
@pytest.mark.parametrize("compress", [True, False])
def test_sha1_size_info_and_names(sha1_store, compress):
"""Testing the size_info() and names() APIs."""
os.chdir(TESTING_STORE_DIR)
# Add once & check.
tensor = torch.ones(300, 500)
sha1 = sha1_store.add(tensor, compress=compress, name="name1")
orig, dedup, gzip = sha1_store.size_info(sha1)
assert orig == dedup, "no dedup should happen"
if not compress:
assert orig == gzip, "no compression should happen"
else:
assert orig > gzip, "compression should be smaller"
assert (orig, dedup, gzip) == sha1_store.size_info(), "store and entry sizes should match"
names = sha1_store.names(sha1)
assert names == {"name1": 1}, names
# Add second time & check.
sha1 = sha1_store.add(tensor, compress=compress, name="name2")
orig2, dedup2, gzip2 = sha1_store.size_info(sha1)
assert orig2 == orig * 2 == dedup2 * 2, "dedup not correct"
assert gzip == gzip2, "compression shouldn't change"
names = sha1_store.names(sha1)
assert names == {"name1": 1, "name2": 1}, names
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import shutil
import pytest
from fairscale.experimental.wgit.pygit import PyGit
@pytest.fixture
def repo_data():
test_dirs = Path("temp_wgit_testing/.wgit")
file1, file2 = "test_file1", "test_file2"
out_dict = {
"test_path": Path.cwd().joinpath(test_dirs),
"file1": file1,
"file2": file2,
}
return out_dict
@pytest.fixture
def pygit_repo_wrap(repo_data):
path = Path.cwd().joinpath(repo_data["test_path"])
pygit_repo_wrap = PyGit(path, gitignore=[repo_data["file1"], repo_data["file2"]])
return pygit_repo_wrap
def test_setup(repo_data):
curr_dir = Path.cwd()
test_dir = curr_dir.joinpath(repo_data["test_path"])
# Initialize the repo for the first time
pygit_repo = PyGit(test_dir, gitignore=["test_file1", "test_file2"])
# create sample files
test_dir.joinpath("test_file_1.pt").touch()
test_dir.joinpath("test_file_2.pt").touch()
assert test_dir.stem == str(pygit_repo.path.parent.stem)
def test_pygit_add(pygit_repo_wrap):
"""Tests the add functionality of the PyGit class"""
assert str(pygit_repo_wrap.path.parent.stem) == ".wgit"
repo = pygit_repo_wrap.repo
# File status 128 in pygit2 signifies file has Not been added yet
assert repo.status()[".gitignore"] == 128
assert repo.status()["test_file_1.pt"] == 128
assert repo.status()["test_file_2.pt"] == 128
pygit_repo_wrap.add()
# File status 1 in pygit2 signifies file has been added to git repo
assert repo.status()[".gitignore"] == 1
assert repo.status()["test_file_1.pt"] == 1
assert repo.status()["test_file_2.pt"] == 1
def test_pygit_commit(pygit_repo_wrap):
"""Tests the add functionality of the PyGit class"""
assert str(pygit_repo_wrap.path.parent.stem) == ".wgit"
repo = pygit_repo_wrap.repo
# File status 1 in pygit2 signifies file has been added
assert repo.status()[".gitignore"] == 1
assert repo.status()["test_file_1.pt"] == 1
assert repo.status()["test_file_2.pt"] == 1
pygit_repo_wrap.commit("random_message")
# File status {} in pygit2 implies commit has been made
assert repo.status() == {}
def test_tear_down(repo_data):
# clean up: delete the .wgit directory created during this Test
# Making sure the current directory is ./temp_wgit_testing before removing test dir
if (repo_data["test_path"].parent.stem == "temp_wgit_testing") and (repo_data["test_path"].stem == ".wgit"):
shutil.rmtree(repo_data["test_path"].parent)
else:
raise Exception("Exception in testing directory tear down!")
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from fairscale.experimental.wgit.signal_sparsity import SignalSparsity, random_sparse_mask
from fairscale.fair_dev.testing.testing import objects_are_equal
# Our own tolerance
ATOL = 1e-6
RTOL = 1e-5
# enable this for debugging.
# torch.set_printoptions(precision=20)
@pytest.mark.parametrize(
"dense, k, dim",
[
(torch.linspace(0.01, 0.06, 40).reshape(5, 8), 40, None), # top-40, dim=None
(torch.linspace(0.1, 0.6, 30).reshape(5, 6), 5, 0), # top-5, dim=0
(torch.linspace(-0.1, 0.6, 35).reshape(7, 5), 5, 1), # top-5, dim=1
(torch.arange(60).float().reshape(10, 6), 60, None), # top-60, dim=None
(torch.arange(60).float().reshape(10, 6), 10, 0), # top-10, dim=0
(torch.arange(60).float().reshape(10, 6), 6, 1), # top-6, dim=1
(torch.arange(60).float().reshape(2, 5, 6), 5, 1), # top-5, dim=1
],
)
def test_sst_dst_to_perfect_dense_reconstruction(dense, k, dim):
"""Tests whether perfect reconstruction of input dense tensor is generated when top-k matches the numel
across some dimension dim for both SST and DST.
"""
sparser = SignalSparsity(sst_top_k_element=k, sst_top_k_dim=dim, dst_top_k_element=k, dst_top_k_dim=dim)
sst = sparser.dense_to_sst(dense)
dst = sparser.dense_sst_to_dst(dense, sst)
dense_recons = sparser.sst_dst_to_dense(sst, dst)
objects_are_equal(dense, dense_recons, raise_exception=True, rtol=RTOL, atol=ATOL)
def get_valid_conf_arg_list():
"""Returns a map object of keyword arguments (as dicts) to be used as parameters for test_validate_conf."""
def kwargs(vals_list):
"""Maps the values in input vals_list to the keys in arg_key_list"""
arg_key_list = [
"sst_top_k_element",
"sst_top_k_percent",
"sst_top_k_dim",
"dst_top_k_element",
"dst_top_k_percent",
"dst_top_k_dim",
]
return dict(zip(arg_key_list, vals_list))
# Validate value error is raised when, either:
# 1. both sst (or dst) percent and element is not provided a value (not None).
# 2. top_k_percent and top_k_element are not in valid range (elem > 0) and for 0 < percent <= 100.
element = 10
percent = 50
dim = 0
args_list = [
[element, percent, dim, element, None, dim], # case 1.
[element, None, dim, element, percent, dim],
[0, None, dim, None, None, dim], # case 2.
[None, 0, dim, None, None, dim],
[element, None, dim, 0, None, dim],
[element, None, dim, None, 0, dim],
]
return map(kwargs, args_list)
@pytest.mark.parametrize("kwargs", get_valid_conf_arg_list())
def test_validate_conf(kwargs):
"""Validate value error is raised with each kwargs returned by get_valid_conf_arg_list"""
pytest.raises(ValueError, SignalSparsity, **kwargs)
@pytest.mark.parametrize(
"tensor, dim",
[
(torch.arange(20).reshape(4, 5), None),
(torch.arange(20).reshape(4, 5), 0),
(torch.arange(20).reshape(4, 5), 1),
(torch.arange(80).reshape(4, 5, 4), None),
(torch.arange(80).reshape(4, 5, 4), 0),
(torch.arange(80).reshape(4, 5, 4), 1),
(torch.arange(80).reshape(4, 5, 4), 2),
],
)
def test_dense_to_sst_perfect_recons(tensor, dim):
"""Tests the dense_to_sst method whether it simply performs an FFT transformation
when top_k_percent is set at 100.
"""
sparser_2d = SignalSparsity(sst_top_k_percent=100, sst_top_k_dim=dim, dst_top_k_percent=100)
if dim is None:
fft_tensor = torch.fft.fft(tensor.flatten()).reshape(tensor.shape)
else:
fft_tensor = torch.fft.fft(tensor, dim=dim)
assert all((sparser_2d.dense_to_sst(tensor) == fft_tensor).flatten())
#
# Below are fixed input/output testing.
#
def get_test_params():
"""Helper function to create and return a list of tuples of the form:
(dense, expected_sst, expected_dst, expected_reconstructed_tensor (RT), dim, percent, top_k_element)
to be used as parameters for tests.
"""
# Input tensor 0.
# We use `sin()` below to make sure the top-2 values are not index sort
# sensitive. With just `arange()`, we get a linear line and the resulting
# FFT has many identical second-to-the-largest values. That make top-2 potentially
# non-deterministic and implementation dependent.
tensor_4x3_None = torch.arange(12).sin().reshape(4, 3).float()
# Values are: [[ 0.00000000000000000000, 0.84147095680236816406, 0.90929740667343139648],
# [ 0.14112000167369842529, -0.75680249929428100586, -0.95892429351806640625],
# [-0.27941548824310302734, 0.65698659420013427734, 0.98935824632644653320],
# [ 0.41211849451065063477, -0.54402112960815429688, -0.99999022483825683594]]
# SST: with dim=None, top-2
expd_sst_4x3_None = torch.tensor(
[
[0.0000 + 0.0000j, 0.0000 + 0.0000j, -1.3618 - 5.7650j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j, 0.0000 + 0.0000j],
[0.0000 + 0.0000j, -1.3618 + 5.7650j, 0.0000 + 0.0000j],
],
dtype=torch.complex64,
)
# DST: with dim=None, top-2
expd_dst_4x3_None = torch.tensor(
[
[0.22696666419506072998, 0.00000000000000000000, 0.00000000000000000000],
[0.00000000000000000000, 0.00000000000000000000, 0.00000000000000000000],
[0.00000000000000000000, 0.00000000000000000000, 0.00000000000000000000],
[0.18515183031558990479, 0.00000000000000000000, 0.00000000000000000000],
]
)
# RT: expected_reconstructed_tensor with dim=None and top-2 for both sst and dst
expd_rt_4x3_None = torch.tensor(
[
[0.00000000000000000000, 0.71862268447875976562, 0.94558942317962646484],
[0.22696666419506072998, -0.71862268447875976562, -0.94558942317962646484],
[-0.22696666419506072998, 0.71862268447875976562, 0.94558942317962646484],
[0.41211849451065063477, -0.71862268447875976562, -0.94558942317962646484],
]
)
# Input tensor 1.
tensor_4x3_0 = torch.arange(50, 62).sin().reshape(4, 3) / 100
# Values are: [[-0.00262374849990010262, 0.00670229177922010422, 0.00986627582460641861],
# [ 0.00395925156772136688, -0.00558789074420928955, -0.00999755132943391800],
# [-0.00521551026031374931, 0.00436164764687418938, 0.00992872659116983414],
# [ 0.00636737979948520660, -0.00304810609668493271, -0.00966117810457944870]]
# SST: with dim=0, top-1, (use top-1 because top-2 and top-3 would include some identical values)
expd_sst_4x3_0 = torch.tensor(
[
[0.0000 + 0.0j, 0.0000 + 0.0j, 0.0000 + 0.0j],
[0.0000 + 0.0j, 0.0000 + 0.0j, 0.0000 + 0.0j],
[-1.81658901274204254150e-02 + 0.0j, 1.96999348700046539307e-02 + 0.0j, 3.94537299871444702148e-02 + 0.0j],
[0.0000 + 0.0j, 0.0000 + 0.0j, 0.0000 + 0.0j],
],
dtype=torch.complex64,
)
# DST: with dim=0, top-2
expd_dst_4x3_0 = torch.tensor(
[
[0.00191772403195500374, 0.00000000000000000000, 0.00000000000000000000],
[0.00000000000000000000, 0.00000000000000000000, 0.00000000000000000000],
[0.00000000000000000000, 0.00000000000000000000, 0.00000000000000000000],
[0.00000000000000000000, 0.00187687762081623077, 0.00020225439220666885],
]
)
# RT: expected_reconstructed_tensor with dim=0 and top-2 for both sst and dst
expd_rt_4x3_0 = torch.tensor(
[
[-0.00262374849990010262, 0.00492498371750116348, 0.00986343249678611755],
[0.00454147253185510635, -0.00492498371750116348, -0.00986343249678611755],
[-0.00454147253185510635, 0.00492498371750116348, 0.00986343249678611755],
[0.00454147253185510635, -0.00304810609668493271, -0.00966117810457944870],
]
)
# Input tensor 2.
tensor_3x5_1 = torch.Tensor([0, 2, 3, 1, 6, 5, 7, 4, 8, 11, 9, 10, 0, 2, 5]).reshape(3, 5)
# SST: with dim=1, top-3, because FFT always have symmetric output after the top-1
expd_sst_3x5_1 = torch.tensor(
[
[
12.00000000000000000000 + 0.00000000000000000000j,
0,
-5.23606777191162109375 + 4.25325393676757812500j,
-5.23606777191162109375 - 4.25325393676757812500j,
0,
],
[
35.00000000000000000000 + 0.00000000000000000000j,
0,
-5.85410213470458984375 - 1.45308518409729003906j,
-5.85410213470458984375 + 1.45308518409729003906j,
0,
],
[
26.00000000000000000000 + 0.00000000000000000000j,
12.01722049713134765625 - 3.57971239089965820312j,
0,
0,
12.01722049713134765625 + 3.57971239089965820312j,
],
],
dtype=torch.complex64,
)
# DST: with dim=1, top-2
expd_dst_3x5_1 = torch.tensor(
[
[
0.00000000000000000000,
-1.09442710876464843750,
0.00000000000000000000,
0.86524754762649536133,
0.90557289123535156250,
],
[
0.00000000000000000000,
-2.23606777191162109375,
-1.72360706329345703125,
0.00000000000000000000,
2.44721317291259765625,
],
[
0.00000000000000000000,
1.95278644561767578125,
-2.15278673171997070312,
1.53049504756927490234,
0.00000000000000000000,
],
],
)
# RT: expected_reconstructed_tensor with dim=1 and top-2 for both sst and dst
expd_rt_3x5_1 = torch.tensor(
[
[
0.30557289719581604004,
2.00000000000000000000,
3.37082076072692871094,
1.00000000000000000000,
6.00000000000000000000,
],
[
4.65835905075073242188,
7.00000000000000000000,
4.00000000000000000000,
6.82917928695678710938,
11.00000000000000000000,
],
[
10.00688838958740234375,
10.00000000000000000000,
0.00000000000000000000,
2.00000000000000000000,
5.32360696792602539062,
],
]
)
# Input tensor 3.
tensor_3x2x2 = torch.arange(12).cos().reshape(3, 2, 2).float()
# Values are: [[[ 1.00000000000000000000, 0.54030233621597290039],
# [-0.41614684462547302246, -0.98999249935150146484]],
# [[-0.65364360809326171875, 0.28366219997406005859],
# [ 0.96017026901245117188, 0.75390225648880004883]],
# [[-0.14550003409385681152, -0.91113024950027465820],
# [-0.83907151222229003906, 0.00442569795995950699]]]
# SST: with dim=1, top-1
expd_sst_3x2x2_1 = torch.tensor(
[
[[0, 0], [1.41614687442779541016 + 0.0j, 1.53029489517211914062 + 0.0j]],
[[0, 1.03756451606750488281 + 0.0j], [-1.61381387710571289062 + 0.0j, 0]],
[[-0.98457157611846923828 + 0.0j, 0], [0, -0.91555595397949218750 + 0.0j]],
],
dtype=torch.complex64,
)
# DST: with dim=1, top-1
expd_dst_3x2x2_1 = torch.tensor(
[
[[0.00000000000000000000, -0.22484511137008666992], [0.29192659258842468262, 0.00000000000000000000]],
[[0.15326333045959472656, -0.23512005805969238281], [0.00000000000000000000, 0.00000000000000000000]],
[[0.34678575396537780762, -0.45335227251052856445], [0.00000000000000000000, 0.00000000000000000000]],
]
)
# RT: expected_reconstructed_tensor with dim=1 and top-1 for both sst and dst
expd_rt_3x2x2_1 = torch.tensor(
[
[[0.70807343721389770508, 0.54030233621597290039], [-0.41614684462547302246, -0.76514744758605957031]],
[[-0.65364360809326171875, 0.28366219997406005859], [0.80690693855285644531, 0.51878225803375244141]],
[[-0.14550003409385681152, -0.91113024950027465820], [-0.49228578805923461914, 0.45777797698974609375]],
]
)
return [
# input, expected sst, dst, rt, sst_dim, percent, top_k.
(tensor_4x3_None, expd_sst_4x3_None, expd_dst_4x3_None, expd_rt_4x3_None, None, 2 / 12 * 100, 2),
(tensor_4x3_0, expd_sst_4x3_0, expd_dst_4x3_0, expd_rt_4x3_0, 0, 1 / 3 * 100, 1),
(tensor_3x5_1, expd_sst_3x5_1, expd_dst_3x5_1, expd_rt_3x5_1, 1, 3 / 5 * 100, 3),
(tensor_3x2x2, expd_sst_3x2x2_1, expd_dst_3x2x2_1, expd_rt_3x2x2_1, 1, 1 / 2 * 100, 1),
]
@pytest.mark.parametrize("tensor, expd_sst, unused1, unused2, dim, unused3, k", get_test_params())
def test_dense_to_sst(tensor, expd_sst, unused1, unused2, dim, unused3, k):
"""Tests for fixed input dense tensor and fixed expected output SST tensor."""
sparser_2d = SignalSparsity(sst_top_k_element=k, sst_top_k_dim=dim, dst_top_k_percent=100)
sst = sparser_2d.dense_to_sst(tensor)
objects_are_equal(sst, expd_sst, raise_exception=True, rtol=RTOL, atol=ATOL)
@pytest.mark.parametrize("tensor, unused1, unused2, unused3, dim, percent, k", get_test_params())
def test_percent_element(tensor, unused1, unused2, unused3, dim, percent, k):
"""Tests whether comparative values for top_k_element and top_k_percent returns same outputs"""
sparser_2d = SignalSparsity(sst_top_k_percent=None, sst_top_k_element=k, sst_top_k_dim=dim, dst_top_k_percent=100)
sst_element = sparser_2d.dense_to_sst(tensor)
sparser_2d = SignalSparsity(
sst_top_k_percent=percent, sst_top_k_element=None, sst_top_k_dim=dim, dst_top_k_percent=100
)
sst_percent = sparser_2d.dense_to_sst(tensor)
objects_are_equal(sst_element, sst_percent, raise_exception=True, rtol=RTOL, atol=ATOL)
@pytest.mark.parametrize("tensor, sst, expd_dst, unused1, dim, unused2, k", get_test_params())
def test_dense_sst_to_dst(tensor, sst, expd_dst, unused1, dim, unused2, k):
"""Tests fixed expected output DST tensor with fixed input dense and SST tensors."""
sparser_2d = SignalSparsity(sst_top_k_element=k, sst_top_k_dim=dim, dst_top_k_element=k, dst_top_k_dim=dim)
dst = sparser_2d.dense_sst_to_dst(tensor, sst)
objects_are_equal(dst, expd_dst, raise_exception=True, rtol=RTOL, atol=ATOL)
@pytest.mark.parametrize("unused1, sst, dst, expd_rt, dim, unused2, unused3", get_test_params())
def test_sst_dst_to_dense(unused1, sst, dst, expd_rt, dim, unused2, unused3):
"""Tests the correct expected reconstruction from frozen sst and dst tensors."""
sparser = SignalSparsity(sst_top_k_element=1, sst_top_k_dim=dim, dst_top_k_element=1, dst_top_k_dim=dim)
dense_recons = sparser.sst_dst_to_dense(sst, dst)
objects_are_equal(dense_recons, expd_rt, raise_exception=True, rtol=RTOL, atol=ATOL)
@pytest.mark.parametrize("tensor, expd_sst, expd_dst, expd_rt, dim, unused, k", get_test_params())
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_lossy_compress(tensor, expd_sst, expd_dst, expd_rt, dim, unused, k, device):
"""Tests the lossy_compress method against expected sst, dst and reconstruced tensor."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no GPU")
sparser = SignalSparsity(sst_top_k_element=k, sst_top_k_dim=dim, dst_top_k_element=k, dst_top_k_dim=dim)
lossy_dense, sst, dst = sparser.lossy_compress(tensor.to(device))
objects_are_equal(sst.to(device), expd_sst.to(device), raise_exception=True, rtol=RTOL, atol=ATOL)
objects_are_equal(dst.to(device), expd_dst.to(device), raise_exception=True, rtol=RTOL, atol=ATOL)
objects_are_equal(lossy_dense.to(device), expd_rt.to(device), raise_exception=True, rtol=RTOL, atol=ATOL)
@pytest.mark.parametrize(
"tensor, dim, top_k_percent",
[
(torch.linspace(0.01, 0.06, 40).reshape(5, 8), 0, 100),
(torch.linspace(-0.01, 0.06, 42).reshape(7, 6), 0, 100),
(torch.linspace(-10, 15, 36).reshape(6, 6), 1, 100),
],
)
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_lossy_compress_sparsity_0(tensor, dim, top_k_percent, device):
"""Tests whether lossy_compress method simply returns dense tensor when sparsity is 0."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no GPU")
sparser = SignalSparsity(
sst_top_k_percent=top_k_percent, sst_top_k_dim=dim, dst_top_k_percent=top_k_percent, dst_top_k_dim=dim
)
lossy_dense, sst, dst = sparser.lossy_compress(tensor.to(device))
objects_are_equal(lossy_dense.to(device), tensor.to(device), raise_exception=True, rtol=RTOL, atol=ATOL)
objects_are_equal(sst, None, raise_exception=True, rtol=RTOL, atol=ATOL)
objects_are_equal(dst.to(device), tensor.to(device), raise_exception=True, rtol=RTOL, atol=ATOL)
def test_sst_disabled():
"""Tests the case where SST is disabled."""
dense = torch.tensor([0.5000, 0.6000, 0.7000, 0.8000])
result = torch.tensor([0.0, 0.0, 0.7000, 0.8000])
sparser = SignalSparsity(dst_top_k_element=2, dst_top_k_dim=0)
rt, sst, dst = sparser.lossy_compress(dense)
objects_are_equal(rt, result, raise_exception=True, rtol=RTOL, atol=ATOL)
objects_are_equal(dst, result, raise_exception=True, rtol=RTOL, atol=ATOL)
assert sst is None
def test_dst_disabled():
"""Tests the case where DST is disabled."""
dense = torch.tensor([0.5000, 0.6000, 0.7000, 0.8000, 0.9000])
result_rt = torch.tensor([0.6000, 0.7618, 0.7000, 0.6382, 0.8000])
result_sst = torch.tensor(
[
3.50000000000000000000 + 0.00000000000000000000j,
0.00000000000000000000 + 0.00000000000000000000j,
-0.25000002980232238770 + 0.08122986555099487305j,
-0.25000002980232238770 - 0.08122986555099487305j,
0.00000000000000000000 + 0.00000000000000000000j,
]
)
sparser = SignalSparsity(sst_top_k_element=3, sst_top_k_dim=0)
rt, sst, dst = sparser.lossy_compress(dense)
objects_are_equal(rt, result_rt, raise_exception=True, rtol=RTOL, atol=ATOL)
objects_are_equal(sst, result_sst, raise_exception=True, rtol=RTOL, atol=ATOL)
assert dst is None
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_random_sparse_mask(device):
"""Tests random_sparse_mask API."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no GPU")
dense = torch.tensor([0.5000, 0.6000, 0.7000, 0.8000, 0.9000]).to(device)
mask = random_sparse_mask(dense, 20, 0)
assert mask.sum() == 1
for d in [0, 1]:
dense = torch.rand(100, 100).to(device)
mask = random_sparse_mask(dense, 1, d)
assert objects_are_equal(mask.sum(dim=d), torch.ones(100).to(device), raise_exception=True)
assert mask.sum() == 100
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from pathlib import Path
import random
import shutil
import pytest
import torch
from torch import nn
from fairscale.experimental.wgit.repo import Repo, RepoStatus
@pytest.fixture
def create_test_dir():
curr_dir = Path.cwd()
parent_dir = "experimental"
test_dir = curr_dir.joinpath(parent_dir, "wgit_testing/")
# creates a testing directory within ./experimental
try:
os.makedirs(test_dir)
except FileExistsError:
shutil.rmtree(test_dir)
os.makedirs(test_dir)
os.chdir(test_dir)
# create random checkpoints
size_list = [30e5, 35e5, 40e5, 40e5]
for i, size in enumerate(size_list):
sd = {}
sd["model"] = nn.Linear(1, int(size)).state_dict()
sd["step"] = 100
torch.save(sd, f"checkpoint_{i}.pt")
return test_dir
@pytest.fixture
def repo():
repo = Repo(Path.cwd(), init=True)
return repo
def test_setup(create_test_dir):
assert str(create_test_dir.stem) == "wgit_testing"
def test_api_init(capsys, repo):
repo = Repo(Path.cwd(), init=True)
assert Path(".wgit/sha1_store").is_dir()
assert Path(".wgit/.gitignore").is_file()
assert Path(".wgit/.git").exists()
assert Path(".wgit/.gitignore").exists()
@pytest.mark.parametrize("per_tensor", [True, False])
@pytest.mark.parametrize("gzip", [True, False])
def test_api_add(capsys, repo, per_tensor, gzip):
fnum = random.randint(0, 2)
chkpt0 = f"checkpoint_{fnum}.pt"
repo.add(chkpt0, per_tensor=per_tensor, gzip=gzip)
if per_tensor:
# TODO (Min): test per_tensor add more.
return
sha1_hash = repo._sha1_store._get_sha1_hash(chkpt0)
metadata_path = repo._rel_file_path(Path(chkpt0))
with open(os.path.join(".wgit", metadata_path), "r") as f:
json_data = json.load(f)
sha1_dir_0 = f"{sha1_hash[:2]}/" + f"{sha1_hash[2:]}"
# The sha1 are different because add internally use a different pickle method.
assert json_data["SHA1"] != sha1_hash
def test_api_commit(capsys, repo):
commit_msg = "epoch_1"
repo.commit(message=commit_msg)
with open(".wgit/.git/logs/HEAD") as f:
line = f.readlines()
assert line[0].rstrip().split()[-1] == commit_msg
@pytest.mark.parametrize("per_tensor", [True, False])
def test_api_status(capsys, repo, per_tensor):
# delete the repo and initialize a new one:
shutil.rmtree(".wgit")
repo = Repo(Path.cwd(), init=True)
# check status before any file is added
out = repo.status()
assert out == {"": RepoStatus.CLEAN}
# check status before after a file is added but not committed
chkpt0 = f"checkpoint_{random.randint(0, 1)}.pt"
repo.add(chkpt0, per_tensor=per_tensor)
out = repo.status()
key_list = list(repo._get_metdata_files().keys())
assert out == {key_list[0]: RepoStatus.CHANGES_ADDED_NOT_COMMITED}
# check status after commit
repo.commit("e1")
out = repo.status()
assert out == {key_list[0]: RepoStatus.CLEAN}
# check status after a new change has been made to the file
torch.save(nn.Linear(1, int(15e5)).state_dict(), chkpt0)
out = repo.status()
assert out == {key_list[0]: RepoStatus.CHANGES_NOT_ADDED}
# add the new changes made to weigit
repo.add(chkpt0, per_tensor=per_tensor)
out = repo.status()
assert out == {key_list[0]: RepoStatus.CHANGES_ADDED_NOT_COMMITED}
# check status after a new different file is added to be tracked by weigit
chkpt3 = "checkpoint_3.pt"
repo.add(chkpt3, per_tensor=per_tensor)
key_list = list(repo._get_metdata_files().keys())
out = repo.status()
assert out == {
key_list[0]: RepoStatus.CHANGES_ADDED_NOT_COMMITED,
key_list[1]: RepoStatus.CHANGES_ADDED_NOT_COMMITED,
}
# check status after the new file is commited to be tracked by weigit
repo.commit("e2")
out = repo.status()
assert out == {key_list[0]: RepoStatus.CLEAN, key_list[1]: RepoStatus.CLEAN}
def test_api_log(capsys, repo):
repo.log("testfile.pt")
captured = capsys.readouterr()
assert captured.out == "wgit log of the file: testfile.pt\n"
assert captured.err == ""
def test_api_checkout(repo):
try:
repo.checkout("sha1")
except NotImplementedError:
assert True
def teardown_module(module):
# clean up: delete the .wgit directory created during this Test
# Making sure the current directory is ./experimental before removing test dir
if (Path.cwd().parent.name == "experimental") and (Path.cwd().name == "wgit_testing"):
os.chdir(Path.cwd().parent)
shutil.rmtree("./wgit_testing/")
else:
raise Exception("Exception in testing directory tear down!")
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from pathlib import Path
import shutil
import pytest
import torch
from torch import nn
import fairscale.experimental.wgit.cli as cli
from fairscale.experimental.wgit.sha1_store import SHA1_Store
@pytest.fixture(scope="module")
def create_test_dir():
"""This setup function runs once per test of this module and
it creates a repo, in the process, testing the init function.
"""
curr_dir = Path.cwd()
parent_dir = "experimental"
test_dir = curr_dir.joinpath(parent_dir, "wgit_testing/")
# creates a testing directory within ./experimental
try:
os.makedirs(test_dir)
except FileExistsError:
shutil.rmtree(test_dir)
os.makedirs(test_dir)
os.chdir(test_dir)
# create random checkpoints
size_list = [30e5, 35e5, 40e5]
for i, size in enumerate(size_list):
torch.save(nn.Linear(1, int(size)).state_dict(), f"checkpoint_{i}.pt")
# Test init.
cli.main(["init"])
assert str(test_dir.stem) == "wgit_testing"
return test_dir
def test_cli_init(create_test_dir, capsys):
# Check if the json and other files have been created by the init
assert Path(".wgit/sha1_store").is_dir()
assert Path(".wgit/.gitignore").is_file()
assert Path(".wgit/.git").exists()
def test_cli_add(create_test_dir, capsys):
chkpt0 = "checkpoint_0.pt"
cli.main(["add", "--no_per_tensor", chkpt0])
sha1_store = SHA1_Store(
Path.cwd().joinpath(".wgit", "sha1_store"),
init=False,
)
sha1_hash = sha1_store._get_sha1_hash(chkpt0)
with open(os.path.join(".wgit", "wgit_testing/checkpoint_0.pt"), "r") as f:
json_data = json.load(f)
sha1_dir_0 = f"{sha1_hash[:2]}/" + f"{sha1_hash[2:]}"
# The sha1 are different because add internally use a different pickle method.
assert json_data["SHA1"] != sha1_hash
def test_cli_commit(capsys):
commit_msg = "epoch_1"
cli.main(["commit", "-m", f"{commit_msg}"])
with open(".wgit/.git/logs/HEAD") as f:
line = f.readlines()
assert line[0].rstrip().split()[-1] == commit_msg
def test_cli_status(capsys):
cli.main(["status"])
captured = capsys.readouterr()
assert captured.out == "{'wgit_testing/checkpoint_0.pt': <RepoStatus.CLEAN: 1>}\n"
assert captured.err == ""
def test_cli_log(capsys):
cli.main(["log"])
captured = capsys.readouterr()
assert captured.out == "wgit log\n"
assert captured.err == ""
def test_cli_checkout(capsys):
try:
cli.main(["checkout", "sha1"])
except NotImplementedError:
assert True
def teardown_module(module):
# clean up: delete the .wgit directory created during this Test
# Making sure the current directory is ./experimental before removing test dir
if (Path.cwd().parent.name == "experimental") and (Path.cwd().name == "wgit_testing"):
os.chdir(Path.cwd().parent)
shutil.rmtree("./wgit_testing/")
else:
raise Exception("Exception in testing directory tear down!")
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
import pytest
import torch
from fairscale.experimental.wgit.signal_sparsity_profiling import EnergyConcentrationProfile as ECP
from fairscale.fair_dev.testing.testing import objects_are_equal, skip_if_no_cuda
# Our own tolerance
ATOL = 1e-6
RTOL = 1e-5
# enable this for debugging.
# torch.set_printoptions(precision=20)
@skip_if_no_cuda
def test_nonblocking():
"""Tests cpu runs ahead of the GPU in the measuring process."""
big = torch.rand(10, 1000, 1000).cuda()
ecp = ECP(dim=2, top_k_percents=[1, 5, 10, 50, 90])
start = time.time()
out = ecp.measure(big)
out_fft = ecp.measure_fft(big)
cpu_time = time.time() - start
torch.cuda.synchronize()
gpu_time = time.time() - start
assert cpu_time * 5 < gpu_time, f"GPU time should dominate {cpu_time} vs. {gpu_time}"
for o in [out, out_fft]:
# validate the output
p = [x.item() for x in o]
for n, n1 in zip(p, p[1:]):
assert n <= n1 and n >= 0 and n <= 100, f"n={n} n1={n1}"
def get_ones():
"""Return test data with ones tensor"""
return (
0,
[1, 5, 10, 100],
torch.ones(100),
[torch.tensor(0.01), torch.tensor(0.05), torch.tensor(0.1), torch.tensor(1.0)],
)
def get_dim_0():
"""Test case for dim=0 for 2D input."""
return (
0,
[1, 3, 33, 66, 90],
torch.tensor([0.1, 0.2, 0.1, 0.45]).repeat(100, 1),
[torch.tensor(0.01), torch.tensor(0.03), torch.tensor(0.33), torch.tensor(0.66), torch.tensor(0.9)],
)
@pytest.mark.parametrize(
"dim, percents, in_tensor, out_tensors",
[
get_ones(),
get_dim_0(),
],
)
def test_expected_output(dim, percents, in_tensor, out_tensors):
"""Test with a few expected input & outputs."""
ecp = ECP(dim, percents)
out = ecp.measure(in_tensor)
objects_are_equal(out, out_tensors, raise_exception=True, rtol=RTOL, atol=ATOL)
out_fft = ecp.measure_fft(torch.fft.ifft(in_tensor, dim=dim))
objects_are_equal(out_fft, out_tensors, raise_exception=True, rtol=RTOL, atol=ATOL)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from fairscale.nn.misc import ParamBucket
def test_param_values_conserved():
param = torch.rand((2, 3))
bucket = ParamBucket(10, param.dtype, param.device)
param_ = param.clone()
bucket.add_param(param_)
torch.allclose(param, param_)
def test_max_size():
param = torch.rand((20, 30))
bucket = ParamBucket(5, param.dtype, param.device)
with pytest.raises(AssertionError):
bucket.add_param(param)
def test_double_check_int():
param = torch.rand((5, 6))
bucket = ParamBucket(300, param.dtype, param.device)
bucket.add_param(param)
with pytest.raises(AssertionError):
bucket.add_param(param)
def test_type_change():
size = (5, 6)
param = torch.rand(size, requires_grad=True)
param_ = param.clone()
bucket = ParamBucket(30, param.dtype, param.device)
bucket.add_param(param)
# Move the bucket to fp16 and back
bucket.to(dtype=torch.float16, device=param.device)
assert bucket.buffer.dtype == torch.float16
bucket.to(dtype=torch.float32, device=param.device, keep_param_alignment=True)
assert bucket.buffer.dtype == torch.float32
# Same with the reference tensor
param_.to(dtype=torch.float16)
param_.to(dtype=torch.float32)
torch.allclose(param, param_)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from fairscale.nn.misc import GradBucket
def test_grad_values_conserved():
with torch.no_grad(): # remove a warning
param = torch.rand((2, 3), requires_grad=True)
param.grad = torch.rand(2, 3)
bucket = GradBucket(10, param.dtype, param.device, -1)
param_ = param.clone()
bucket.add_grad(param_)
torch.allclose(param.grad, param_.grad)
def test_memory_leak():
with torch.no_grad(): # remove a warning
param = torch.rand((2, 3), requires_grad=True)
param.grad = torch.rand(2, 3)
bucket = GradBucket(300, param.dtype, param.device, -1)
bucket.add_grad(param)
bucket.shrink()
storage = bucket.buffer.storage()
# See https://github.com/pytorch/pytorch/pull/59671/
if hasattr(storage, "nbytes"):
assert storage.nbytes() == 6 * bucket.buffer.element_size()
else:
assert len(storage) == 6
def test_max_size():
with torch.no_grad(): # remove a warning
param = torch.rand((20, 30), requires_grad=True)
param.grad = torch.rand(20, 30)
bucket = GradBucket(5, param.dtype, param.device, -1)
with pytest.raises(AssertionError):
bucket.add_grad(param)
def test_collapse():
with torch.no_grad(): # remove a warning
size = (5, 6)
param = torch.rand(size, requires_grad=True)
param.grad = torch.rand(size)
bucket = GradBucket(300, param.dtype, param.device, -1)
bucket.add_grad(param)
bucket.shrink()
bucket.collapse()
assert bucket.buffer.numel() == 0
assert param.grad is None
bucket.rebuild()
assert param.grad is not None
torch.allclose(param.grad, torch.zeros(size))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
""" Test FlattenParamsWrapper on CPU and GPU (FP32 & FP16 on GPU). """
from collections import OrderedDict
import unittest
import torch
from fairscale.fair_dev.testing.testing import objects_are_equal
from fairscale.nn import FlattenParamsWrapper
class TestFlattenParams(unittest.TestCase):
"""Base test class and used for CPU case."""
def _get_module_init_fns(self):
return [
self._get_basic_linear_module,
self._get_shared_params_transformer,
self._get_2_flatten_group_linear_module,
self._get_2_flatten_group_linear_module_with_names,
]
def _get_empty_module(self, seed=0):
torch.manual_seed(seed) # keep everything deterministic
class Test(torch.nn.Module):
def forward(self, x):
return x + 1
module = Test()
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
return torch.rand(1).to(device=device, dtype=dtype)
module.get_input = get_input
module.param_list = None # No param_list to FPW.
module.flat_param_names = None # No flat_param_names to FPW.
return module
def _get_transformer(self, seed=0):
torch.manual_seed(seed) # keep everything deterministic
module = torch.nn.Transformer(
d_model=32,
num_encoder_layers=2,
num_decoder_layers=2,
dim_feedforward=128,
dropout=0.1,
)
module.register_buffer("dummy_buffer", torch.tensor(1.0))
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
src = torch.rand(20, 8, 32).to(device=device, dtype=dtype) # T x B x C
tgt = torch.rand(10, 8, 32).to(device=device, dtype=dtype) # T x B x C
return (src, tgt)
module.get_input = get_input
module.param_list = None # No param_list to FPW.
module.flat_param_names = None # No flat_param_names to FPW.
return module
def _get_shared_params_transformer(self, seed=0):
module = self._get_transformer(seed=seed)
# share the FFNs
for enc_layer, dec_layer in zip(module.encoder.layers, module.decoder.layers):
dec_layer.linear1.weight = enc_layer.linear1.weight
dec_layer.linear2.weight = enc_layer.linear2.weight
return module
def _get_basic_linear_module(self, seed=0):
module = torch.nn.Sequential(
torch.nn.Sequential(torch.nn.Linear(4, 8), torch.nn.Linear(8, 8)),
torch.nn.Sequential(torch.nn.Linear(8, 16)),
torch.nn.Linear(16, 4),
)
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
return (torch.rand(8, 4).to(device=device, dtype=dtype),)
module.get_input = get_input
module.param_list = None # No param_list to FPW.
module.flat_param_names = None # No flat_param_names to FPW.
return module
def _get_2_flatten_group_linear_module(self, seed=0):
module = torch.nn.Sequential(
torch.nn.Sequential(torch.nn.Linear(4, 8), torch.nn.Linear(8, 16)),
torch.nn.Linear(16, 4),
)
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
return (torch.rand(8, 4).to(device=device, dtype=dtype),)
module.get_input = get_input
assert len(module) == 2, "next line assumes a len==2 sequential module"
module.param_list = [list(module[0].parameters()), list(module[1].parameters())]
module.flat_param_names = None # No flat_param_names to FPW.
return module
def _get_2_flatten_group_linear_module_with_names(self, seed=0):
module = torch.nn.Sequential(
torch.nn.Sequential(torch.nn.Linear(4, 8), torch.nn.Linear(8, 16)),
torch.nn.Linear(16, 4),
)
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
return (torch.rand(8, 4).to(device=device, dtype=dtype),)
module.get_input = get_input
assert len(module) == 2, "next line assumes a len==2 sequential module"
module.param_list = [list(module[0].parameters()), list(module[1].parameters())]
module.flat_param_names = ["layer1", "layer2"]
return module
def _compute_output(self, module):
device = next(module.parameters()).device
dtype = next(module.parameters()).dtype
input = module.get_input(device, dtype)
return module(*input)
def _get_pnorm_after_step(self, module):
optim = torch.optim.SGD(module.parameters(), lr=0.01)
loss = self._compute_output(module).sum()
loss.backward()
optim.step()
return torch.norm(torch.stack([p.detach().norm() for p in module.parameters()]))
def _test_num_params(self, module):
"""Make sure numel of params are the same after flatten."""
ref_num_params = sum(p.numel() for p in module.parameters())
flat_module = FlattenParamsWrapper(module)
flat_num_params = sum(p.numel() for p in flat_module.parameters())
assert ref_num_params == flat_num_params
assert flat_num_params == flat_module.flat_param.numel()
def _test_output(self, module):
ref_output = self._compute_output(module)
flat_module = FlattenParamsWrapper(module)
flat_output = self._compute_output(flat_module)
assert objects_are_equal(ref_output, flat_output)
def test_partial_flattening(self):
"""Testing some parameters are flatten, with others left non-flatten."""
module = self._get_transformer()
num_params = sum(p.numel() for p in module.parameters())
params_to_flatten = list(module.encoder.layers[1].parameters()) + list(module.decoder.layers[0].parameters())
num_params_to_flatten = sum(p.numel() for p in params_to_flatten)
module = FlattenParamsWrapper(module, param_list=params_to_flatten)
assert module.flat_param.numel() == num_params_to_flatten
assert sum(p.numel() for p in module.parameters()) == num_params
# flattened parameters are removed
assert len(list(module.encoder.layers[1].parameters())) == 0
assert len(list(module.decoder.layers[0].parameters())) == 0
# non-flattened parameters remain
assert len(list(module.encoder.layers[0].parameters())) > 0
assert len(list(module.decoder.layers[1].parameters())) > 0
# test that changing the module dtype works properly
orig_dtype = params_to_flatten[0].dtype
new_dtype = torch.float32 if orig_dtype == torch.float16 else torch.float16
assert module.flat_param.dtype == orig_dtype
assert all(p.dtype == orig_dtype for p in module.encoder.layers[0].parameters())
module = module.to(dtype=new_dtype)
assert module.flat_param.dtype == new_dtype
assert all(p.dtype == new_dtype for p in module.encoder.layers[0].parameters())
def test_two_flattening_group(self):
"""Testing 2 flatten groups."""
module = self._get_transformer()
num_params = sum(p.numel() for p in module.parameters())
params_to_flatten1 = list(module.encoder.layers[1].parameters()) + list(module.decoder.layers[0].parameters())
params_to_flatten2 = list(module.encoder.layers[0].parameters()) + list(module.decoder.layers[1].parameters())
num_params_to_flatten1 = sum(p.numel() for p in params_to_flatten1)
num_params_to_flatten2 = sum(p.numel() for p in params_to_flatten2)
module = FlattenParamsWrapper(module, param_list=[params_to_flatten1, params_to_flatten2])
assert module.flat_params[0].numel() == num_params_to_flatten1
assert module.flat_params[1].numel() == num_params_to_flatten2
assert sum(p.numel() for p in module.parameters()) == num_params
def test_flatten_nothing(self):
"""Testing nothing is flatten case."""
module = self._get_transformer()
ref_out = self._compute_output(module)
ref_state_dict = module.state_dict()
for k, v in ref_state_dict.items():
ref_state_dict[k] = v.clone()
module = FlattenParamsWrapper(module, param_list=[[]])
fpw_state_dict = module.state_dict()
assert ref_state_dict.keys() == fpw_state_dict.keys()
for k, v in ref_state_dict.items():
torch.testing.assert_allclose(v, fpw_state_dict[k])
fpw_out = self._compute_output(module)
torch.testing.assert_allclose(ref_out, fpw_out)
def test_empty_module(self):
"""Test module without any param."""
module = self._get_empty_module()
in_data = torch.rand(1)
ref_out = module(in_data)
module = FlattenParamsWrapper(module)
assert len(list(module.parameters())) == 0
assert len(module.state_dict()) == 0
fpw_out = module(in_data)
torch.testing.assert_allclose(ref_out, fpw_out)
def test_num_params(self):
module = self._get_transformer()
self._test_num_params(module)
def test_shared_params_num_params(self):
module = self._get_shared_params_transformer()
self._test_num_params(module)
def test_output(self):
module = self._get_transformer()
self._test_output(module)
def test_shared_params_output(self):
module = self._get_shared_params_transformer()
self._test_output(module)
def test_shared_params_pnorm_after_step(self):
# incorrect parameter sharing is likely to cause problems after an
# optimization step
module = self._get_shared_params_transformer()
ref_pnorm_after_step = self._get_pnorm_after_step(module)
module = self._get_shared_params_transformer() # recreate
flat_module = FlattenParamsWrapper(module)
flat_pnorm_after_step = self._get_pnorm_after_step(flat_module)
torch.testing.assert_allclose(ref_pnorm_after_step, flat_pnorm_after_step)
def test_state_dict_equality(self):
"""Test that unflattened state dict matches original (unwrapped) one."""
modules_to_test = [init_fn() for init_fn in self._get_module_init_fns()]
for module in modules_to_test:
ref_state_dict = module.state_dict()
flat_module = FlattenParamsWrapper(module)
flat_state_dict = flat_module.state_dict()
assert (
ref_state_dict.keys() == flat_state_dict.keys()
), f"{ref_state_dict.keys()} != {flat_state_dict.keys()}"
assert objects_are_equal(ref_state_dict, flat_state_dict), f"{ref_state_dict} != {flat_state_dict}"
def test_load_state_dict(self):
"""Test that original (unwrapped) state_dict can be loaded in wrapped module."""
for module_init_fn in self._get_module_init_fns():
module = module_init_fn()
ref_state_dict = module.state_dict()
ref_output = self._compute_output(module)
module = module_init_fn(seed=1234)
flat_module = FlattenParamsWrapper(
module, param_list=module.param_list, flat_param_names=module.flat_param_names
)
# This should work without the unflatten_params context manager
flat_module.load_state_dict(ref_state_dict)
flat_output = self._compute_output(flat_module)
assert objects_are_equal(ref_output, flat_output)
# And it should work with the context manager too
with flat_module.unflatten_params():
flat_module.load_state_dict(ref_state_dict)
flat_output = self._compute_output(flat_module)
assert objects_are_equal(ref_output, flat_output)
def test_flat_state_dict(self):
"""Test that flat state dict can be reloaded and produces the same results."""
for module_init_fn in self._get_module_init_fns():
orig_module = module_init_fn()
flat_module = FlattenParamsWrapper(
orig_module, param_list=orig_module.param_list, flat_param_names=orig_module.flat_param_names
)
ref_output = self._compute_output(flat_module)
flat_state_dict = flat_module.flat_state_dict()
orig_module = module_init_fn(seed=1234)
new_module = FlattenParamsWrapper(
orig_module, param_list=orig_module.param_list, flat_param_names=orig_module.flat_param_names
)
new_module.load_state_dict(flat_state_dict)
new_output = self._compute_output(new_module)
assert objects_are_equal(ref_output, new_output)
def test_unflatten_params(self):
"""Testing using external flatten params tensors as module's params' backing data."""
for module_init_fn in self._get_module_init_fns():
orig_module = module_init_fn()
module = FlattenParamsWrapper(
orig_module, param_list=orig_module.param_list, flat_param_names=orig_module.flat_param_names
)
# keep a list of buffer's key to be used for verification below.
buffers = {k.replace("_fpw_module.", "") for k, _ in module.named_buffers()}
def clone_state_dict():
"""Return a copy of the module's current state via state_dict() API."""
return OrderedDict((k, v.clone()) for k, v in module.state_dict().items())
ref_flat_params = [fp.clone() for fp in module.flat_params]
# Get the current state as a reference.
with module.unflatten_params():
ref_state_dict = clone_state_dict()
for ref_fp in ref_flat_params:
assert not torch.all(ref_fp == 0.0) # Should not all be 0s.
# get new_state_dict with supplied new_flat_params.
new_flat_params = [torch.full_like(fp, fill_value=42.0) for fp in module.flat_params]
with module.unflatten_params(flat_params=new_flat_params):
new_state_dict = clone_state_dict()
# confirm that unflatten_params reflects values from new_flat_param
assert new_state_dict.keys() == ref_state_dict.keys()
for k, v in new_state_dict.items():
if k in buffers: # buffers are not changed
torch.testing.assert_allclose(v, ref_state_dict[k])
else: # params reflect new_flat_param value
torch.testing.assert_allclose(v, torch.ones_like(v) * 42.0)
# after context manager exits, we go back to previous (reference) state
assert len(module.flat_params) == len(ref_flat_params)
for i in range(len(module.flat_params)):
torch.testing.assert_allclose(module.flat_params[i], ref_flat_params[i])
# get another copy of state from the module (without external backing data)
with module.unflatten_params():
ref_state_dict2 = clone_state_dict()
# Verify it is still the same.
assert objects_are_equal(ref_state_dict, ref_state_dict2)
# if we load the new_state_dict, then the flat param should match new_flat_param
module.load_state_dict(new_state_dict)
assert len(module.flat_params) == len(new_flat_params)
for i in range(len(module.flat_params)):
torch.testing.assert_allclose(module.flat_params[i], new_flat_params[i])
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestFlattenParamsCUDA(TestFlattenParams):
def _get_transformer(self, seed=0):
module = super()._get_transformer(seed=seed)
return module.cuda()
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestFlattenParamsCUDAHalf(TestFlattenParams):
def _get_transformer(self, seed=0):
module = super()._get_transformer(seed=seed)
return module.cuda().half()
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with uneven parameter shards. """
import tempfile
import pytest
import torch
from torch import Tensor
import torch.multiprocessing as mp
from torch.nn import Linear, Sequential
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import dist_init, skip_if_single_gpu, teardown
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel.fully_sharded_data_parallel import TrainingState
def _test_func(rank, world_size, model, fsdp_config, tempfile_name, unused, test_case):
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
my_lr = 0.1
device = torch.device("cuda")
if fsdp_config.get("mixed_precision", False):
dtype = torch.float16
fsdp_config["fp32_reduce_scatter"] = True
else:
dtype = torch.float32
if test_case["assert_ref_out"]:
with torch.no_grad():
# Compute one iteration local output.
fp32_weight = model.weight.T.clone().to(device)
weight = fp32_weight.to(dtype)
v = torch.Tensor(test_case["inputs"][0][rank]).to(device, dtype)
ref_forward_output_my_rank = torch.matmul(v, weight)
# Compute one iteration global weight update.
v = torch.Tensor(test_case["inputs"][0][:world_size]).to(device, dtype)
grad = v.float().sum(0).repeat(weight.shape[0], 1).div(world_size)
ref_weight_out = fp32_weight - grad.T * my_lr
assert ref_weight_out.dtype == torch.float32
model.to(device) # not dtype, since FSDP will manage mixed precision internally
assert isinstance(fsdp_config, dict), str(fsdp_config)
model = FSDP(model, **fsdp_config)
optim = SGD(model.parameters(), lr=my_lr)
inputs = test_case["inputs"]
assert len(inputs) == 1 or not test_case["assert_ref_out"]
assert len(inputs[0]) >= world_size
for in_data in inputs:
in_data = Tensor(in_data[rank]).to(device, dtype)
out = model(in_data)
out.float().sum().backward()
optim.step()
optim.zero_grad()
if test_case["assert_ref_out"]:
with model.summon_full_params():
weight_out = model.module.weight.data.T.clone()
# make sure we can do more fwd/bwd
loss = model(in_data)
loss.sum().backward()
if test_case["assert_ref_out"]:
torch.testing.assert_allclose(ref_forward_output_my_rank, out)
torch.testing.assert_allclose(ref_weight_out, weight_out)
model.assert_state(TrainingState.IDLE)
teardown()
@skip_if_single_gpu
@pytest.mark.parametrize("test_case", [{"inputs": [torch.rand(8, 3)], "assert_ref_out": True}])
@pytest.mark.parametrize(
"fsdp_config",
[{}, {"flatten_parameters": False}, {"mixed_precision": True}],
)
@pytest.mark.parametrize("world_size", list(range(2, 9)))
def test_one_iteration(world_size, test_case, fsdp_config):
"""Test FSDP with uneven divide of parameter shards."""
if torch_version() < (1, 6, 0):
pytest.skip("older pytorch doesn't support reduce_scatter")
if world_size > torch.cuda.device_count():
pytest.skip("Not enough GPUs.")
temp_file_name = tempfile.mkstemp()[1]
unused = tempfile.mkstemp()[1]
# TODO (Min): we may want to extend this to a simple 2 layer model so that it covers
# more cases in FSDP. Also, assert_ref_out can be extended to multiple
# iterations. This could be a good bootcamp task. I should file a github
# issue once we merge.
model = Linear(3, 3, bias=False)
mp.spawn(
_test_func,
args=(world_size, model, fsdp_config, temp_file_name, unused, test_case),
nprocs=world_size,
join=True,
)
@skip_if_single_gpu
@pytest.mark.parametrize("test_case", [{"inputs": [torch.rand(8, 3), torch.rand(8, 3)], "assert_ref_out": False}])
@pytest.mark.parametrize("fsdp_config", [{}, {"flatten_parameters": False}])
@pytest.mark.parametrize("world_size", list(range(2, 9)))
def test_smaller_than_world_size(world_size, test_case, fsdp_config):
"""Test FSDP with uneven divide of parameter shards."""
if torch_version() < (1, 6, 0):
pytest.skip("older pytorch doesn't support reduce_scatter in gloo backend")
if world_size > torch.cuda.device_count():
pytest.skip("Not enough GPUs.")
temp_file_name = tempfile.mkstemp()[1]
unused = tempfile.mkstemp()[1]
model = Sequential(
Linear(3, 3, bias=False),
Linear(3, 4, bias=False),
Linear(4, 5, bias=False),
Linear(5, 4, bias=False),
Linear(4, 3, bias=False),
Linear(3, 1, bias=False),
Linear(1, 1, bias=False), # param here is smaller than world_size if unflattened.
)
mp.spawn(
_test_func,
args=(world_size, model, fsdp_config, temp_file_name, unused, test_case),
nprocs=world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with different input types. """
import tempfile
import pytest
import torch
from torch.nn import Linear, Module
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import dist_init, rmf, skip_if_no_cuda, teardown
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import TrainingState
# A fixture to get tempfiles and ensure they are cleaned up.
@pytest.fixture()
def temp_files():
num = 2 # dist_init needs 2 files
files = [tempfile.mkstemp()[1] for _ in range(num)]
yield tuple(files)
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name)
# We only test on GPU since mix-precision only works on GPU.
@skip_if_no_cuda
@pytest.mark.parametrize(
"fsdp_config",
[{}, {"mixed_precision": True}],
)
@pytest.mark.parametrize("input_cls", [dict, list])
def test_input_type(temp_files, fsdp_config, input_cls):
"""Test FSDP with input being a list or a dict, only single GPU."""
if torch_version() < (1, 7, 0):
# This test runs multiple test cases in a single process. On 1.6.0 it
# throw an error like this:
# RuntimeError: Container is already initialized! Cannot initialize it twice!
pytest.skip("older pytorch doesn't work well with single process dist_init multiple times")
result = dist_init(rank=0, world_size=1, filename=temp_files[0], filename_rpc=temp_files[1])
assert result, "Dist init failed"
assert isinstance(fsdp_config, dict), str(fsdp_config)
class Model(Module):
def __init__(self):
super().__init__()
self.layer = Linear(4, 4)
def forward(self, input):
if isinstance(input, list):
input = input[0]
else:
assert isinstance(input, dict), input
input = input["in"]
return self.layer(input)
model = FSDP(Model(), **fsdp_config).cuda()
optim = SGD(model.parameters(), lr=0.1)
for _ in range(5):
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
if input_cls is list:
in_data = [in_data]
else:
assert input_cls is dict
in_data = {"in": in_data}
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
model.assert_state(TrainingState.IDLE)
teardown()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with different multiple forward of the same module. """
import tempfile
import pytest
import torch
import torch.multiprocessing as mp
from torch.nn import Linear, Module
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import dist_init, skip_if_single_gpu, teardown
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import TrainingState
def _test_func(rank, world_size, fsdp_config, tempfile_name, unused):
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
assert isinstance(fsdp_config, dict), str(fsdp_config)
class Model(Module):
def __init__(self):
super().__init__()
self.inner = FSDP(Linear(4, 4), **fsdp_config)
self.outer = Linear(4, 5)
def forward(self, x):
# Forward twice.
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
model = FSDP(Model(), **fsdp_config).cuda()
optim = SGD(model.parameters(), lr=0.1)
for _ in range(3):
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
model.assert_state(TrainingState.IDLE)
teardown()
# We use strings for precision and flatten instead of bool to
# make the pytest output more readable.
@skip_if_single_gpu
@pytest.mark.parametrize("precision", ["full", "mixed"])
@pytest.mark.parametrize("flatten", ["flatten", "no_flatten"])
def test1(precision, flatten):
if torch_version() < (1, 6, 0):
pytest.skip("older pytorch doesn't support reduce_scatter")
temp_file_name = tempfile.mkstemp()[1]
unused = tempfile.mkstemp()[1]
fsdp_config = {}
fsdp_config["mixed_precision"] = precision == "mixed"
fsdp_config["flatten_parameters"] = flatten == "flatten"
# Some bugs only show up when we are in world_size > 1 due to sharding changing
# the tensor dimensions.
world_size = 2
mp.spawn(
_test_func,
args=(world_size, fsdp_config, temp_file_name, unused),
nprocs=world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
""" Test FSDP with an submodule that is FSDP(checkpoint_wrapper()) or checkpoint_wrapper(FSDP()). """
import contextlib
import pytest
import torch
from torch import nn
import torch.distributed
import torch.multiprocessing as mp
from fairscale.fair_dev.testing.testing import dist_init, skip_if_single_gpu, teardown, temp_files_ctx
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
@skip_if_single_gpu
@pytest.mark.parametrize("flatten", ["flat", "nonflat"])
@pytest.mark.parametrize("mixed_precision", ["fp16", "fp32"])
@pytest.mark.parametrize("amp_context", ["autocast", "noautocast"])
@pytest.mark.parametrize("half_input", ["halfin", "fullin"])
@pytest.mark.parametrize("fsdp_wrap_ckpt", ["F->C", "C->F"])
def test_train_and_eval_with_checkpointing(flatten, mixed_precision, amp_context, half_input, fsdp_wrap_ckpt):
flatten = flatten == "flat"
mixed_precision = mixed_precision == "fp16"
amp_context = amp_context == "autocast"
half_input = half_input == "halfin"
fsdp_wrap_ckpt = fsdp_wrap_ckpt == "F->C"
# Expecting an known bug in 4 out of 32 cases.
if fsdp_wrap_ckpt and mixed_precision and not flatten:
pytest.skip("known bug")
world_size = 2
with temp_files_ctx(2) as (temp_file_name, unused):
mp.spawn(
_test_func,
args=(
world_size,
temp_file_name,
unused,
flatten,
mixed_precision,
amp_context,
half_input,
fsdp_wrap_ckpt,
),
nprocs=world_size,
join=True,
)
def _test_func(
rank, world_size, tempfile_name, unused, flatten, mixed_precision, amp_context, half_input, fsdp_wrap_ckpt
):
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
# Keep initialization deterministic.
torch.manual_seed(0)
model = FSDP(
SimpleModuleWithCheckpointing(flatten, mixed_precision, fsdp_wrap_ckpt).cuda(),
flatten_parameters=flatten,
mixed_precision=mixed_precision,
)
optim = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Collect parameter sizes to ensure these stay consistent through the steps below.
expected_param_shapes = {name: tuple(param.shape) for name, param in model.named_parameters()}
# For clarity, this is what `expected_param_shapes` should look like depending on world size:
if not flatten:
assert expected_param_shapes == {
"ffn.0.weight": (5,),
"ffn.0.bias": (2,),
"ffn.1.weight": (5,),
"ffn.1.bias": (2,),
"ffn.2.weight": (5,),
"ffn.2.bias": (2,),
}
else:
assert expected_param_shapes == {
"_fsdp_wrapped_module.flat_param_0": (12,),
"_fsdp_wrapped_module._fpw_module.ffn.1._fsdp_wrapped_module.flat_param_0": (6,),
}, expected_param_shapes
torch.manual_seed(1 + rank)
# Train for a step.
_train_step(model, optim, expected_param_shapes, amp_context, mixed_precision, half_input)
# Now do an eval step.
_eval_step(model, optim, expected_param_shapes, amp_context, mixed_precision, half_input)
# And finally do another train step.
_train_step(model, optim, expected_param_shapes, amp_context, mixed_precision, half_input)
teardown()
def _train_step(model, optim, expected_param_shapes, amp_context, mixed_precision, half_input):
# Prepare for training step.
optim.zero_grad()
model.train()
# Create input and run forward pass.
input = torch.randn(2, 3).cuda()
# Make it FP16 when it is OK to do so.
if (amp_context and half_input) or (mixed_precision and half_input):
input = input.half()
context = contextlib.suppress()
if amp_context:
context = torch.cuda.amp.autocast(True)
with context:
loss = model(input).sum()
_check_params(model, expected_param_shapes)
# Run backward pass.
loss.backward()
_check_params(model, expected_param_shapes)
# Finally, take a step.
optim.step()
_check_params(model, expected_param_shapes)
def _eval_step(model, optim, expected_param_shapes, amp_context, mixed_precision, half_input):
optim.zero_grad()
model.eval()
with torch.no_grad():
input = torch.randn(2, 3).cuda()
if (amp_context and half_input) or (mixed_precision and half_input):
input = input.half()
context = contextlib.suppress()
if amp_context:
context = torch.cuda.amp.autocast(True)
with context:
model(input).sum()
_check_params(model, expected_param_shapes)
def _check_params(model, expected_param_shapes):
current_param_shapes = {name: tuple(param.shape) for name, param in model.named_parameters()}
assert set(current_param_shapes.keys()) == set(expected_param_shapes.keys())
for key, current_shape in current_param_shapes.items():
expected_shape = expected_param_shapes[key]
assert (
current_shape == expected_shape
), f"Parameter {key} should have shape {expected_shape}, but found shape {current_shape}"
class SimpleModuleWithCheckpointing(nn.Module):
def __init__(self, flatten, mixed_precision, fsdp_wrap_ckpt):
super().__init__()
if fsdp_wrap_ckpt:
middle_module = FSDP(
checkpoint_wrapper(nn.Linear(3, 3)), flatten_parameters=flatten, mixed_precision=mixed_precision
)
else:
middle_module = checkpoint_wrapper(
FSDP(nn.Linear(3, 3), flatten_parameters=flatten, mixed_precision=mixed_precision)
)
self.ffn = nn.Sequential(nn.Linear(3, 3), middle_module, nn.Linear(3, 3))
def forward(self, x):
return self.ffn(x)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import gc
import unittest
from parameterized import parameterized
import pytest
import torch
from fairscale.internal.version import torch_version
from .test_fsdp import CONFIG_OPTIONS, DistributedTest, rename_test, spawn_and_init
def get_cuda_mem():
torch.cuda.synchronize()
gc.collect()
return torch.cuda.memory_allocated()
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestMemory(DistributedTest):
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_memory(self, config):
spawn_and_init(functools.partial(self._test_memory, config))
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_memory_volatile(self, config):
spawn_and_init(functools.partial(self._test_memory, config, volatile=True))
@classmethod
def _test_memory(self, config, rank, group, volatile=False):
model = self.get_wrapped_model(group, cuda_first=False, config=config)
self._train_for_several_steps(model, 1, autocast=model.mixed_precision)
mems = [get_cuda_mem()]
with model.summon_full_params(volatile=volatile):
mems.append(get_cuda_mem())
assert mems[1] >= mems[0]
state_dict = model.state_dict()
mems.append(get_cuda_mem())
assert mems[2] >= mems[1]
mems.append(get_cuda_mem())
assert mems[3] <= mems[2]
del state_dict
mems.append(get_cuda_mem())
# Any value other than `==` indicates a memory leak. If mems[4] >
# mems[0], that indicates we're not cleaning up params properly in
# summon_full_params. If mems[4] < mems[0], that indicates there's a
# memory leak in _train_for_several_steps.
assert mems[4] == mems[0], f"memory leak detected, {mems[4]} != {mems[0]}"
class TestPersistence(DistributedTest):
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_non_volatile(self, config):
spawn_and_init(functools.partial(self._test_persistence, config))
@classmethod
def _test_persistence(self, config, rank, group, volatile=False):
model = self.get_wrapped_model(group, cuda_first=False, config=config)
with model.summon_full_params(volatile=False):
model.module.embed_tokens.weight.data.fill_(42)
with model.summon_full_params():
# non-volatile changes are persisted
assert torch.all(model.module.embed_tokens.weight.data == 42.0)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from fairscale.fair_dev.testing.testing import skip_if_single_gpu, temp_files_ctx
from fairscale.nn import enable_wrap, wrap
from fairscale.nn.data_parallel import FullyShardedDataParallel
class FFN(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(10, 10)
self.fc2 = nn.Linear(10, 10)
self.relu = nn.ReLU()
def forward(self, x):
return self.fc2(self.relu(self.fc1(x)))
def main(rank, sync_file):
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend="nccl",
init_method=f"file://{sync_file}",
world_size=2,
rank=rank,
)
ffn = FFN().cuda().half()
with enable_wrap(wrapper_cls=FullyShardedDataParallel):
model = wrap(
ffn,
process_group=torch.distributed.new_group(),
flatten_parameters=True,
compute_dtype=torch.float16,
)
model = model.train()
# We test this behavior because it might be used by pipelining.
# However, we don't check if the speed (compute/comm overlapping)
# and memory (necessary all-gather & free) are optimal.
losses = []
for _ in range(3):
x = torch.rand((10, 10)).cuda().half()
out = model(x)
loss = out.sum()
losses.append(loss)
# Only the last bwd can be outside of no_sync context.
with model.no_sync():
losses[0].backward()
losses[1].backward()
losses[2].backward()
@skip_if_single_gpu
def test_fwd_fwd_bwd_bwd():
with temp_files_ctx(num=1) as temp_files:
torch.multiprocessing.spawn(
fn=main,
nprocs=2,
args=(temp_files[0],),
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import itertools
from math import inf
import pickle
import sys
from typing import Dict
import unittest
from unittest import mock
from parameterized import parameterized
import pytest
import torch
from torch import nn
import torch.distributed
from fairscale.fair_dev.testing.testing import (
DeviceAndTypeCheckModule,
DummyProcessGroup,
dist_init,
get_cycles_per_ms,
objects_are_equal,
skip_a_test_if_in_CI,
spawn_for_all_world_sizes,
)
from fairscale.internal import torch_version
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from fairscale.nn.data_parallel import FullyShardedDataParallel, TrainingState
if torch_version() >= (1, 8, 0):
from fairscale.optim.grad_scaler import ShardedGradScaler
# How to use remote-pdb: https://gist.github.com/sshleifer/9d43351957179c13606e015b072927d4
# All helper functions called by spawn must be either @classmethod, @staticmethod
class DistributedTest(unittest.TestCase):
def setUp(self):
if torch_version() < (1, 6, 0):
raise unittest.SkipTest("Need pytorch version >= 1.6 due to lack of reduce_scatter")
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
if sys.platform == "win32":
raise unittest.SkipTest("NCCL doesn't support Windows, skipping test")
if torch.cuda.device_count() < 2:
raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping")
@staticmethod
def _train_for_several_steps(model, num_steps, autocast, lr=0.01, norm_type=None):
model_device = next(model.parameters()).device
# use SGD with momentum instead of Adam, since Adam is scale invariant
# and this makes it bad for tests
optim = torch.optim.SGD(params=model.parameters(), lr=lr, momentum=0.9)
scaler = ShardedGradScaler()
for _ in range(num_steps):
optim.zero_grad()
with torch.cuda.amp.autocast(enabled=autocast):
# Inputs always cuda regardless of move_grads_cpu, or model.device
input = model.module.get_input(torch.device("cuda"))
output = model(*input)
loss = model.module.get_loss(input, output).to(model_device)
loss = scaler.scale(loss)
assert loss.dtype == torch.float32
model.module.run_backward(loss)
if norm_type is not None:
clip_norm = 0.3
if isinstance(model, FullyShardedDataParallel):
model.clip_grad_norm_(clip_norm, norm_type)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm, norm_type)
scaler.step(optim)
scaler.update()
if hasattr(model, "assert_idle"):
model.assert_idle()
if isinstance(model, FullyShardedDataParallel):
model.assert_state(TrainingState.IDLE)
return loss.detach()
@staticmethod
def get_wrapped_model(group, cuda_first=False, config={}, **model_kwargs) -> FullyShardedDataParallel:
if cuda_first:
model = FullyShardedDataParallel(TransformerWithSharedParams(group, **model_kwargs).cuda(), group, **config)
else:
model = FullyShardedDataParallel(TransformerWithSharedParams(group, **model_kwargs), group, **config).cuda()
return model
@classmethod
def _test_identical_outputs(
cls,
model_init_fn,
config,
rank,
group,
num_steps=2,
use_cuda=True,
lr=0.01,
ref_ddp_fn=None,
norm_type=2,
):
if config.get("mixed_precision", False):
autocast = True
# Force the compute dtype to be torch.float32 so that we get
# identical results as PyTorch DDP when using autocast. Note that
# this will cause the all-gather to happen in FP32, which is slower
# than necessary in most cases.
config["compute_dtype"] = torch.float32
else:
autocast = False
# Establish reference behavior with PyTorch DDP (+ optionally autocast).
model = model_init_fn(group=group, wrapper_config=None).cuda()
if ref_ddp_fn is None:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[rank], output_device=rank, process_group=group
)
else:
model = ref_ddp_fn(model, group)
ref_loss = cls._train_for_several_steps(model, num_steps, autocast, lr=lr, norm_type=norm_type)
ref_state_dict = model.module.state_dict()
if config.get("cpu_offload", False):
for k in ref_state_dict.keys():
ref_state_dict[k] = ref_state_dict[k].cpu()
# Confirm we get the same behavior using FullyShardedDataParallel.
model = FullyShardedDataParallel(model_init_fn(group=group, wrapper_config=config), group, **config)
if use_cuda:
model = model.cuda()
else:
assert next(model.parameters()).device == torch.device("cpu")
shard_loss = cls._train_for_several_steps(model, num_steps, autocast, lr=lr, norm_type=norm_type)
if config.get("cpu_offload", False):
# In pytorch 1.10, assert_allclose below checks for tensor device match. Therefore,
# we need to move the CPU tensor to CUDA in case we are doing cpu_offload.
shard_loss = shard_loss.cuda()
shard_state_dict = model.state_dict()
if config.get("state_dict_on_rank_0_only", False):
if torch.distributed.get_rank() != 0:
assert shard_state_dict == {}
# rank 0 shard_state_dict test covered in the following test.
# return is needed here, because with state_dict_on_rank_0_only=True, the following assert will fail on rank!=0
return
try:
torch.testing.assert_allclose(ref_loss, shard_loss)
assert objects_are_equal(ref_state_dict, shard_state_dict, raise_exception=True)
except (AssertionError, RuntimeError) as e:
raise Exception(f"FullyShardedDataParallel didn't match PyTorch DDP using config: {config}\n\n {e}")
if config.get("flatten_parameters", True):
metadata = model.local_metadata_dict()
assert isinstance(metadata, dict)
class TestMixedPrecision(DistributedTest):
def test_all_fp32(self):
self._spawn_test_case(
{"mixed_precision": False},
False, # autocast enabled
torch.float32, # expected_input_dtype
torch.float32, # expected_param_dtype
torch.float32, # expected_loss_dtype
torch.float32, # expected_reduce_dtype
)
def test_mixed_precision(self):
self._spawn_test_case(
{"mixed_precision": True},
False, # autocast enabled
torch.float16, # expected_input_dtype
torch.float16, # expected_param_dtype
torch.float16, # expected_loss_dtype
torch.float16, # expected_reduce_dtype
)
def test_mixed_precision_autocast(self):
"""If autocast enabled, loss should be fp32."""
self._spawn_test_case(
{"mixed_precision": True},
True, # autocast enabled
torch.float16, # expected_input_dtype
torch.float16, # expected_param_dtype
torch.float32, # expected_loss_dtype
torch.float16, # expected_reduce_dtype
)
def test_mixed_precision_autocast_buffer_type_fp32(self):
"""If autocast enabled, loss should be fp32."""
self._spawn_test_case(
{"mixed_precision": True, "buffer_dtype": torch.float32},
True, # autocast enabled
torch.float16, # expected_input_dtype
torch.float16, # expected_param_dtype
torch.float32, # expected_loss_dtype
torch.float16, # expected_reduce_dtype
expected_buffer_type=torch.float32,
)
def test_mixed_precision_autocast_fp32_compute(self):
self._spawn_test_case(
{"mixed_precision": True, "compute_dtype": torch.float32},
True, # autocast enabled
torch.float16, # expected_input_dtype
torch.float32, # expected_param_dtype
torch.float32, # expected_loss_dtype
torch.float32, # expected_reduce_dtype
expected_buffer_type=torch.float32,
)
def test_fp32_reduce_scatter(self):
self._spawn_test_case(
{"mixed_precision": True, "fp32_reduce_scatter": True},
False, # autocast enabled
torch.float16, # expected_input_dtype
torch.float16, # expected_param_dtype
torch.float16, # expected_loss_dtype
torch.float32, # expected_reduce_dtype
expected_buffer_type=torch.float16,
)
def test_fp32_reduce_scatter_autocast(self):
self._spawn_test_case(
{"mixed_precision": True, "fp32_reduce_scatter": True},
True, # autocast enabled
torch.float16, # expected_input_dtype
torch.float16, # expected_param_dtype
torch.float32, # expected_loss_dtype
torch.float32, # expected_reduce_dtype
)
def _spawn_test_case(
self,
cfg,
autocast_enabled,
in_dtype,
p_dtype,
loss_dtype,
reduce_dtype,
expected_buffer_type=None,
world_size=2,
):
"""Call test_dtypes inside of torch.multiprocessing.spawn"""
fn = functools.partial(
self._test_dtypes,
cfg,
autocast_enabled,
in_dtype,
p_dtype,
loss_dtype,
reduce_dtype,
expected_buffer_type=expected_buffer_type,
)
spawn_and_init(fn, world_sizes=[world_size])
@staticmethod
def _test_dtypes(
cfg: Dict,
autocast,
in_dtype,
p_dtype,
loss_dtype,
reduce_dtype,
rank,
group,
expected_buffer_type=None,
):
# Patch torch.distributed.reduce_scatter to check the dtype of the reduction
orig_reduce_scatter = torch.distributed.reduce_scatter
model: nn.Module = DeviceAndTypeCheckModule(
expected_input_dtype=in_dtype,
expected_param_dtype=p_dtype,
expected_loss_dtype=loss_dtype,
expected_buffer_dtype=expected_buffer_type,
)
def _reduce_scatter(output, input_list, **kwargs):
for tensor in input_list:
model._check("reduce_scatter.dtype", tensor.dtype, expected=reduce_dtype)
return orig_reduce_scatter(output, input_list, **kwargs)
with mock.patch("torch.distributed.reduce_scatter", new=_reduce_scatter):
model = FullyShardedDataParallel(model, group, **cfg).cuda()
device = next(model.parameters()).device
x = torch.rand(2, 5).to(device)
with torch.cuda.amp.autocast(enabled=autocast):
loss = model(x)
loss.backward()
keys = ["reshard_after_forward", "mixed_precision", "flatten_parameters"]
CONFIG_OPTIONS = [[dict(zip(keys, config))] for config in itertools.product([True, False], repeat=len(keys))]
def rename_test(testcase_func, param_num, param):
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name(str(param.args)),
)
class TestComparisonToPyTorchDDP(DistributedTest):
"""
Compare losses and parameter values after several updates when using
PyTorch DDP vs. FullyShardedDataParallel.
"""
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_nested_wrapped_model(self, config):
test_fn = functools.partial(self._test_identical_outputs, NestedWrappedModule, config)
spawn_and_init(test_fn)
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_nested_all_wrapped_model(self, config):
model_fn = functools.partial(NestedWrappedModule, wrap_everything=True)
test_fn = functools.partial(self._test_identical_outputs, model_fn, config)
spawn_and_init(test_fn)
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_nested_all_wrapped_model_checkpoint(self, config):
model_fn = functools.partial(NestedWrappedModule, wrap_everything=True, checkpoint=True)
test_fn = functools.partial(self._test_identical_outputs, model_fn, config)
spawn_and_init(test_fn)
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_transformer_parameterized(self, config):
# Test every combination of these options:
spawn_and_init(functools.partial(self._test_identical_outputs, TransformerWithSharedParams, config))
# testing moving params to cpu while using full and mixed precision
@parameterized.expand([(True,), (False,)], name_func=rename_test)
def test_cpu_offload_and_cpu_grads(self, mixed_precision):
config = {"mixed_precision": mixed_precision, "cpu_offload": True}
test_fn = functools.partial(
self._test_identical_outputs, TransformerWithSharedParams, config, use_cuda=False, lr=0.01
)
spawn_and_init(test_fn)
# testing full and mixed precision on the gpu
@parameterized.expand([(True,), (False,)], name_func=rename_test)
def test_no_cpu_offload_with_sharded_grad_scaler(self, mixed_precision):
config = {"mixed_precision": mixed_precision, "move_params_to_cpu": False}
test_fn = functools.partial(
self._test_identical_outputs, TransformerWithSharedParams, config, use_cuda=True, lr=0.01
)
spawn_and_init(test_fn)
def test_cpu_offload_and_cuda_grads_breaks(self):
# If grads are on gpu, but model and optimizer are on cpu, backward breaks.
config = {"mixed_precision": True, "cpu_offload": True, "move_grads_to_cpu": False}
with self.assertRaises(Exception): # RuntimeError inside spawn
test_fn = functools.partial(
self._test_identical_outputs, TransformerWithSharedParams, config, use_cuda=False
)
spawn_and_init(test_fn)
def test_delayed_optim_step(self):
# We use a model with a long CUDA delay right before the optimizer step.
# This tests our streams logic, and that we don't start the FP32 -> FP16
# transfer until after the optimization step completes.
config = {"mixed_precision": True}
model_fn = functools.partial(NestedWrappedModuleWithDelay, delay_after_loss_ms=250)
test_fn = functools.partial(self._test_identical_outputs, model_fn, config)
spawn_and_init(test_fn)
def test_delayed_reduce_scatter(self):
# We insert a delay in the torch.distributed.reduce_scatter op, so that
# the post_backward_stream takes much longer than the backward pass.
# This tests that we properly block at the end of the backward pass for
# the reductions to finish.
config = {"mixed_precision": True}
model_fn = functools.partial(NestedWrappedModuleWithDelay, delay_before_reduction_ms=250)
test_fn = functools.partial(self._test_identical_outputs, model_fn, config)
spawn_and_init(test_fn)
@parameterized.expand([[True], [False]], name_func=rename_test)
def test_state_dict_on_rank_0_only(self, state_dict_on_rank_0_only):
config = {"state_dict_on_rank_0_only": state_dict_on_rank_0_only}
model_fn = functools.partial(TransformerWithSharedParams)
test_fn = functools.partial(self._test_identical_outputs, model_fn, config)
spawn_and_init(test_fn)
@parameterized.expand([[{"checkpoint_act": False}], [{"checkpoint_act": True}]], name_func=rename_test)
def test_mixture_of_experts(self, moe_config):
fsdp_config = {"mixed_precision": True}
test_fn = functools.partial(
self._test_identical_outputs,
functools.partial(MixtureOfExperts, **moe_config),
fsdp_config,
# MixtureOfExperts implements custom reduce logic, so the reference
# behavior should use that logic instead of PyTorch DDP.
ref_ddp_fn=self._dummy_ddp_fn,
norm_type=None,
)
spawn_and_init(test_fn)
@parameterized.expand([[{"checkpoint_act": False}], [{"checkpoint_act": True}]], name_func=rename_test)
def test_mixture_of_experts_with_delay_before_free(self, moe_config):
fsdp_config = {"mixed_precision": True}
test_fn = functools.partial(
self._test_identical_outputs,
functools.partial(MixtureOfExperts, delay_before_free_ms=250, **moe_config),
fsdp_config,
# MixtureOfExperts implements custom reduce logic, so the reference
# behavior should use that logic instead of PyTorch DDP.
ref_ddp_fn=self._dummy_ddp_fn,
norm_type=None,
)
spawn_and_init(test_fn)
def test_mixture_of_experts_grad_clip_breaks(self):
config = {"mixed_precision": True}
test_fn = functools.partial(
self._test_identical_outputs,
MixtureOfExperts,
config,
ref_ddp_fn=self._dummy_ddp_fn,
norm_type=2,
)
with self.assertRaises(Exception):
spawn_and_init(test_fn)
@classmethod
def _dummy_ddp_fn(self, model, group):
return DummyDDP(model)
@parameterized.expand([[1], [inf]], name_func=rename_test)
def test_clip_norm_transformer(self, norm_type):
config = {"mixed_precision": True}
test_fn = functools.partial(
self._test_identical_outputs,
TransformerWithSharedParams,
config,
norm_type=norm_type,
)
spawn_and_init(test_fn)
class TestParamInit(DistributedTest):
def test_param_change_after_init(self):
test_fn = functools.partial(self._test_param_change_after_init, config={"mixed_precision": True})
spawn_and_init(test_fn)
@classmethod
def _test_param_change_after_init(self, rank, group, config):
# Establish reference behavior.
model = self.get_wrapped_model(group, cuda_first=False, config=config)
model.eval() # no dropout for this test
input = model.module.get_input(torch.device("cuda"))
ref_output = model(*input)
# Change the weights in place.
model = self.get_wrapped_model(group, cuda_first=False, config=config)
model.eval() # no dropout for this test
first_param = next(model.parameters())
nn.init.normal_(first_param.data)
new_output = model(*input)
assert not objects_are_equal(ref_output, new_output), "new_output did not reflect change to param after init"
class TestReduceScatterProcessGroup(DistributedTest):
def test_reduce_scatter_process_group_size(self):
"""Ensure that reduce_scatter_process_group same size with the world size."""
test_fn = functools.partial(self._test_reduce_scatter_process_group_size, config={})
spawn_and_init(test_fn, world_sizes=[2])
@classmethod
def _test_reduce_scatter_process_group_size(self, rank, group, config):
model = self._get_model(group, config)
assert model.process_group_reduce_scatter.size() == model.world_size
@classmethod
def _get_model(self, group, config):
with torch.no_grad(): # required for multiprocessing
model = NestedWrappedModule(group, wrapper_config=config)
return FullyShardedDataParallel(model, group, **config)
class TestSerialization(DistributedTest):
@parameterized.expand([[False, False], [True, False], [True, True], [False, True]], name_func=rename_test)
def test_pickle(self, mixed_precision, cpu_offload):
"""Ensure that wrapped modules can be pickled/unpickled."""
skip_a_test_if_in_CI()
config = {"mixed_precision": mixed_precision, "cpu_offload": cpu_offload}
test_fn = functools.partial(self._test_pickle, config=config)
spawn_and_init(test_fn, world_sizes=[2])
@parameterized.expand([[False, False], [True, False], [True, True], [False, True]], name_func=rename_test)
def test_multiprocessing(self, mixed_precision, cpu_offload):
"""Ensure that wrapped modules can be sent via multiprocessing."""
skip_a_test_if_in_CI()
config = {"mixed_precision": mixed_precision, "cpu_offload": cpu_offload}
test_fn = functools.partial(self._test_multiprocessing, config=config)
spawn_and_init(test_fn, world_sizes=[2])
@classmethod
def _test_pickle(self, rank, group, config):
model = self._get_model(group, config)
model = pickle.loads(pickle.dumps(model))
if not config["cpu_offload"]:
model = model.cuda()
self._one_step(model, group)
@classmethod
def _test_multiprocessing(self, rank, group, config):
mp = torch.multiprocessing.Pool(1)
dummy_group = DummyProcessGroup(rank=group.rank(), size=group.size())
config["process_group_reduce_scatter"] = DummyProcessGroup(rank=group.rank(), size=group.size())
model = mp.apply(self._get_model, (dummy_group, config))
if not config["cpu_offload"]:
model = model.cuda()
self._one_step(model, group)
@classmethod
def _get_model(self, group, config):
with torch.no_grad(): # required for multiprocessing
model = NestedWrappedModule(group, wrapper_config=config)
return FullyShardedDataParallel(model, group, **config)
@classmethod
def _one_step(self, model, group):
# reset the process group (required after unpickling)
for m in model.modules():
if isinstance(m, FullyShardedDataParallel):
m.process_group = group
m.process_group_reduce_scatter = torch.distributed.new_group()
optim = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
input = model.module.get_input(torch.device("cuda"))
output = model(*input)
loss = model.module.get_loss(input, output)
model.module.run_backward(loss)
optim.step()
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestHooks(DistributedTest):
# Feel free to modify these tests as the implementation changes.
# They aspire to make sure that backward hooks are registered and used
@parameterized.expand([[True], [False]])
def test_output_backward_hooks(self, cuda_first):
fn = functools.partial(self._test_output_backward_hooks, cuda_first=cuda_first)
spawn_and_init(fn)
def test_backward_hooks_after_save(self):
fn = functools.partial(self._test_backward_hooks_after_save, cuda_first=False)
spawn_and_init(fn)
@classmethod
def _test_backward_hooks_after_save(self, rank, group, cuda_first=False):
model = self.get_wrapped_model(group, cuda_first=cuda_first)
self._train_for_several_steps(model, 2, model.mixed_precision)
state_1 = model.local_state_dict()
model.load_local_state_dict(state_1)
self._test_output_backward_hooks(rank, group, cuda_first=cuda_first, model=model)
@classmethod
def _test_output_backward_hooks(self, rank, group, cuda_first=False, model=None):
if model is None:
model = self.get_wrapped_model(group, cuda_first=cuda_first)
optim = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
optim.zero_grad()
# Inputs always cuda regardless of move_grads_cpu, or model.device
input = model.module.get_input(torch.device("cuda"))
output = model(*input)
assert len(output._backward_hooks) == 1 # this is pre-bwd hook
loss = model.module.get_loss(input, output).cuda()
loss.backward()
assert len(output._backward_hooks) == 1 # It doesn't get removed
optim.step()
assert len(output._backward_hooks) == 1
@parameterized.expand([[True], [False]])
def test_register_functions_called(self, cuda_first):
fn = functools.partial(self._test_register_functions_called, cuda_first=cuda_first)
spawn_and_init(fn)
@classmethod
def _test_register_functions_called(self, rank, group, cuda_first=False):
"""Tests that _register_{pre|post}_backward_hooks called during forward."""
model = self.get_wrapped_model(group, cuda_first=cuda_first)
input = model.module.get_input(torch.device("cuda"))
model._register_post_backward_hooks = mock.MagicMock(return_value=None)
model._register_pre_backward_hooks = mock.MagicMock(return_value=None)
assert not model._register_post_backward_hooks.called
assert not model._register_pre_backward_hooks.called
model(*input)
assert model._register_post_backward_hooks.called
assert model._register_pre_backward_hooks.called
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestNoGrad(DistributedTest):
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_transformer_parameterized(self, config):
test_fn = functools.partial(self._test_transformer, config=config)
spawn_and_init(test_fn)
@classmethod
def _test_transformer(self, rank, group, config):
autocast = config["mixed_precision"]
# Train model for a step
model = self.get_wrapped_model(group, cuda_first=False, config=config)
self._train_for_several_steps(model, 1, autocast)
model.eval() # no dropout for this test
# Eval in standard mode (i.e., without no_grad)
input = model.module.get_input(torch.device("cuda"))
ref_output = model(*input)
# Eval with no_grad and compare
with torch.no_grad():
no_grad_output = model(*input)
assert objects_are_equal(ref_output, no_grad_output, raise_exception=True)
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestModuleProperties(DistributedTest):
@parameterized.expand([[{"flatten_parameters": False}], [{"flatten_parameters": True}]], name_func=rename_test)
def test_named_parameters(self, config):
test_fn = functools.partial(self._test_named_params, config=config)
spawn_and_init(test_fn)
@classmethod
def _test_named_params(self, rank, group, config):
# Get the named parameters before wrapping.
before_wrap_model = TransformerWithSharedParams(group)
before_wrap_params = before_wrap_model.named_parameters()
# Train the model for 1 step.
model = self.get_wrapped_model(group, cuda_first=False, config=config)
self._train_for_several_steps(model, 1, autocast=False)
# Get the named parameters after wrapping to compare.
after_wrap_params = model.named_parameters()
if not config["flatten_parameters"]:
for before_nm, after_nm in zip(before_wrap_params, after_wrap_params):
assert before_nm[0] == after_nm[0]
else:
named_params_flat = [p for p in after_wrap_params][0][0]
assert "flat_param_0" in named_params_flat
# Compare name and size under the `summon_full_params` context.
with model.summon_full_params():
after_wrap_params = model.named_parameters()
for before_nm, after_nm_original in zip(before_wrap_params, after_wrap_params):
assert before_nm[0] == after_nm_original[0]
torch.testing.assert_allclose(before_nm[1].shape, after_nm_original[1].cpu().shape)
class TestResetParameters(DistributedTest):
def test_reset_parameters(self):
"""Ensure that reduce_scatter_process_group same size with the world size."""
test_fn = functools.partial(self._test_reset, config={})
spawn_and_init(test_fn, world_sizes=[2])
@classmethod
def _test_reset(self, rank, group, config):
model = self._get_model(group, config)
with model.summon_full_params():
model.reset_parameters()
@classmethod
def _get_model(self, group, config):
with torch.no_grad(): # required for multiprocessing
model = nn.Linear(10, 10)
return FullyShardedDataParallel(model, group, allow_reset_parameters=True, **config)
class TransformerWithSharedParams(nn.Module):
def __init__(self, group, *unused_args, d_vocab=23, d_model=16, add_bn=True, **unused_kwargs):
super().__init__()
self.rank = group.rank()
self.world_size = group.size()
torch.manual_seed(0) # keep everything deterministic
assert d_vocab >= 12 # we use torch.arange(12) as input
self.embed_tokens = nn.Embedding(d_vocab, d_model)
self.transformer = nn.Transformer(
d_model=d_model,
num_encoder_layers=2,
num_decoder_layers=2,
dim_feedforward=8,
dropout=0.1,
)
self.output_proj = nn.Linear(d_model, d_vocab)
# share the embedding and output projection weights
self.output_proj.weight = self.embed_tokens.weight
self.register_buffer("vocab_bias", self.embed_tokens.weight.new_ones((d_model,)))
self.register_buffer("long_buffer", torch.zeros_like(self.vocab_bias, dtype=torch.long))
self.bs = 2
self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity()
def get_input(self, device):
torch.manual_seed(1 + self.rank) # keep everything deterministic
src = torch.arange(12, device=device).view(6, self.bs) # T x B
tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B
return (src, tgt)
def forward(self, src_ids, tgt_ids):
src = self.embed_tokens(src_ids)
src = src + self.vocab_bias + self.long_buffer.type_as(src)
tgt = self.embed_tokens(tgt_ids)
tgt = self.bn(tgt)
x = self.transformer(src, tgt)
return self.output_proj(x)
def get_loss(self, input, output):
_, tgt = input
return nn.functional.cross_entropy(output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum")
def run_backward(self, loss):
loss.backward()
class NestedWrappedModule(nn.Module):
def __init__(self, group, wrapper_config, wrap_everything=False, checkpoint=False):
super().__init__()
self.rank = group.rank()
self.world_size = group.size()
self.wrapper_config = wrapper_config
def _maybe_wrap(layer):
if wrapper_config is not None:
return FullyShardedDataParallel(layer, group, **wrapper_config)
return layer
torch.manual_seed(0) # keep everything deterministic
self.module = nn.Sequential(
nn.Linear(8, 4),
_maybe_wrap(
nn.Sequential(
_maybe_wrap(nn.Linear(4, 16)),
nn.Linear(16, 16),
)
),
_maybe_wrap(nn.Linear(16, 4)),
nn.Linear(4, 8),
)
# Wrap all modules triggers a corner case where root FSDP doesn't have any params.
# Test it with checkpoint_wrapper as well to validate final backward callback
# is queued correctly when root FSDP does not have any params and every layer is
# wrapped as FSDP(checkpoint(module)).
if wrap_everything:
if checkpoint:
self.module = nn.Sequential(
_maybe_wrap(checkpoint_wrapper(nn.Linear(8, 4))),
_maybe_wrap(checkpoint_wrapper(nn.Linear(4, 16))),
_maybe_wrap(checkpoint_wrapper(nn.Linear(16, 4))),
_maybe_wrap(checkpoint_wrapper(nn.Linear(4, 8))),
)
else:
self.module = nn.Sequential(
_maybe_wrap(nn.Linear(8, 4)),
_maybe_wrap(nn.Linear(4, 16)),
_maybe_wrap(nn.Linear(16, 4)),
_maybe_wrap(nn.Linear(4, 8)),
)
def get_input(self, device):
torch.manual_seed(1 + self.rank) # keep everything deterministic
return (torch.rand(4, 8, device=device),)
def forward(self, x):
return self.module(x)
def get_loss(self, input, output):
loss = output.sum()
return loss
def run_backward(self, loss):
loss.backward()
class DummyDDP(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class MixtureOfExperts(NestedWrappedModule):
def __init__(self, group, wrapper_config, checkpoint_act=False, delay_before_free_ms=0, expert_group=None):
super().__init__(group, wrapper_config)
self.group = group
self.delay_before_free_ms = delay_before_free_ms
# "expert" params are different on each rank
torch.manual_seed(42 + group.rank())
d_expert = 23
d_shared = 12
d_input = 8
expert = nn.Linear(d_expert, d_shared)
self.num_expert_params = sum([p.numel() for p in expert.parameters()])
for p in expert.parameters():
p.expert = True
# everything else is shared
torch.manual_seed(0)
shared = nn.Linear(d_shared, d_expert)
if checkpoint_act:
expert = checkpoint_wrapper(expert)
shared = checkpoint_wrapper(shared)
if wrapper_config is not None:
# we create a process group of size >= 1 for the expert params
# we also need to pass that group as the reduce_scatter group.
expert_group = expert_group or torch.distributed.new_group([group.rank()])
expert = FullyShardedDataParallel(
expert, process_group=expert_group, process_group_reduce_scatter=expert_group, **wrapper_config
)
shared = FullyShardedDataParallel(shared, group, **wrapper_config)
self.module = nn.Sequential(nn.Linear(d_input, d_shared), shared, expert, nn.Linear(d_shared, d_input))
def forward(self, x):
if self.delay_before_free_ms > 0:
expert = self.module[2]
if isinstance(expert, FullyShardedDataParallel):
orig_free_full_params = self.module[2]._free_full_params
def _free_full_params_with_delay(*args):
torch.cuda._sleep(int(self.delay_before_free_ms * get_cycles_per_ms()))
return orig_free_full_params(*args)
assert hasattr(expert, "_free_full_params")
with mock.patch.object(expert, "_free_full_params", _free_full_params_with_delay):
return self.module(x)
return self.module(x)
def run_backward(self, loss):
loss.backward()
# manually reduce gradients if not wrapped in FullyShardedDataParallel
if self.wrapper_config is None:
with torch.no_grad():
for p in self.parameters():
if hasattr(p, "expert"):
continue # these params don't need grad reduction
p.grad.data.div_(self.world_size)
torch.distributed.all_reduce(p.grad.data, group=self.group)
class ModuleWithDelay(nn.Module):
def __init__(self, module, delay_after_loss_ms=0, delay_before_reduction_ms=0):
super().__init__()
self.delay_after_loss_ms = delay_after_loss_ms
self.delay_before_reduction_ms = delay_before_reduction_ms
self.module = module
def get_input(self, device):
return self.module.get_input(device)
def forward(self, x):
return self.module(x)
def get_loss(self, input, output):
loss = self.module.get_loss(input, output)
if self.delay_after_loss_ms > 0:
torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms()))
return loss
def run_backward(self, loss):
orig_reduce_scatter = torch.distributed.reduce_scatter
def _delayed_reduce_scatter(*args, **kwargs):
if self.delay_before_reduction_ms > 0:
torch.cuda._sleep(int(self.delay_before_reduction_ms * get_cycles_per_ms()))
return orig_reduce_scatter(*args, **kwargs)
with mock.patch("torch.distributed.reduce_scatter", _delayed_reduce_scatter):
self.module.run_backward(loss)
class NestedWrappedModuleWithDelay(ModuleWithDelay):
def __init__(self, group, wrapper_config, **kwargs):
super().__init__(NestedWrappedModule(group, wrapper_config), **kwargs)
def spawn_and_init(fn, args=None, **spawn_kwargs):
if args is None:
args = ()
run_fn = functools.partial(init_and_run, fn, args)
spawn_for_all_world_sizes(run_fn, **spawn_kwargs)
def init_and_run(fn, args, rank, world_size, filename, filename_rpc):
dist_init(rank, world_size, filename, filename_rpc)
group = torch.distributed.new_group()
fn(rank, group, *args)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import unittest
from parameterized import parameterized
import pytest
import torch.nn as nn
from fairscale.internal import torch_version
from .test_fsdp import (
CONFIG_OPTIONS,
DistributedTest,
NestedWrappedModule,
TransformerWithSharedParams,
rename_test,
spawn_and_init,
)
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestApply(DistributedTest):
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_transformer_weight_init(self, config):
model_init_fn = functools.partial(model_init_and_apply_custom_weight_init, TransformerWithSharedParams)
test_fn = functools.partial(self._test_identical_outputs, model_init_fn, config, lr=0.01)
spawn_and_init(test_fn)
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_nested_wrapped_weight_init(self, config):
model_init_fn = functools.partial(model_init_and_apply_custom_weight_init, NestedWrappedModule)
test_fn = functools.partial(self._test_identical_outputs, model_init_fn, config, lr=0.01)
spawn_and_init(test_fn)
def model_init_and_apply_custom_weight_init(model_init_fn, *args, **kwargs):
model = model_init_fn(*args, **kwargs)
model.apply(init_bert_params_)
return model
def init_bert_params_(module):
"""
Initialize the weights specific to the BERT Model.
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(data.cpu().normal_(mean=0.0, std=0.02))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, nn.MultiheadAttention):
normal_(module.in_proj_weight.data)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.